1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. 6 * Copyright (c) 2005 Robert N. M. Watson 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #include "opt_ktrace.h" 36 37 #include <sys/param.h> 38 #include <sys/capsicum.h> 39 #include <sys/systm.h> 40 #include <sys/fcntl.h> 41 #include <sys/kernel.h> 42 #include <sys/kthread.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/malloc.h> 46 #include <sys/mount.h> 47 #include <sys/namei.h> 48 #include <sys/priv.h> 49 #include <sys/proc.h> 50 #include <sys/resourcevar.h> 51 #include <sys/unistd.h> 52 #include <sys/vnode.h> 53 #include <sys/socket.h> 54 #include <sys/stat.h> 55 #include <sys/ktrace.h> 56 #include <sys/sx.h> 57 #include <sys/sysctl.h> 58 #include <sys/sysent.h> 59 #include <sys/syslog.h> 60 #include <sys/sysproto.h> 61 62 #include <security/mac/mac_framework.h> 63 64 /* 65 * The ktrace facility allows the tracing of certain key events in user space 66 * processes, such as system calls, signal delivery, context switches, and 67 * user generated events using utrace(2). It works by streaming event 68 * records and data to a vnode associated with the process using the 69 * ktrace(2) system call. In general, records can be written directly from 70 * the context that generates the event. One important exception to this is 71 * during a context switch, where sleeping is not permitted. To handle this 72 * case, trace events are generated using in-kernel ktr_request records, and 73 * then delivered to disk at a convenient moment -- either immediately, the 74 * next traceable event, at system call return, or at process exit. 75 * 76 * When dealing with multiple threads or processes writing to the same event 77 * log, ordering guarantees are weak: specifically, if an event has multiple 78 * records (i.e., system call enter and return), they may be interlaced with 79 * records from another event. Process and thread ID information is provided 80 * in the record, and user applications can de-interlace events if required. 81 */ 82 83 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 84 85 #ifdef KTRACE 86 87 FEATURE(ktrace, "Kernel support for system-call tracing"); 88 89 #ifndef KTRACE_REQUEST_POOL 90 #define KTRACE_REQUEST_POOL 100 91 #endif 92 93 struct ktr_request { 94 struct ktr_header ktr_header; 95 void *ktr_buffer; 96 union { 97 struct ktr_proc_ctor ktr_proc_ctor; 98 struct ktr_cap_fail ktr_cap_fail; 99 struct ktr_syscall ktr_syscall; 100 struct ktr_sysret ktr_sysret; 101 struct ktr_genio ktr_genio; 102 struct ktr_psig ktr_psig; 103 struct ktr_csw ktr_csw; 104 struct ktr_fault ktr_fault; 105 struct ktr_faultend ktr_faultend; 106 struct ktr_struct_array ktr_struct_array; 107 } ktr_data; 108 STAILQ_ENTRY(ktr_request) ktr_list; 109 }; 110 111 static const int data_lengths[] = { 112 [KTR_SYSCALL] = offsetof(struct ktr_syscall, ktr_args), 113 [KTR_SYSRET] = sizeof(struct ktr_sysret), 114 [KTR_NAMEI] = 0, 115 [KTR_GENIO] = sizeof(struct ktr_genio), 116 [KTR_PSIG] = sizeof(struct ktr_psig), 117 [KTR_CSW] = sizeof(struct ktr_csw), 118 [KTR_USER] = 0, 119 [KTR_STRUCT] = 0, 120 [KTR_SYSCTL] = 0, 121 [KTR_PROCCTOR] = sizeof(struct ktr_proc_ctor), 122 [KTR_PROCDTOR] = 0, 123 [KTR_CAPFAIL] = sizeof(struct ktr_cap_fail), 124 [KTR_FAULT] = sizeof(struct ktr_fault), 125 [KTR_FAULTEND] = sizeof(struct ktr_faultend), 126 [KTR_STRUCT_ARRAY] = sizeof(struct ktr_struct_array), 127 }; 128 129 static STAILQ_HEAD(, ktr_request) ktr_free; 130 131 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 132 "KTRACE options"); 133 134 static u_int ktr_requestpool = KTRACE_REQUEST_POOL; 135 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool); 136 137 u_int ktr_geniosize = PAGE_SIZE; 138 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RWTUN, &ktr_geniosize, 139 0, "Maximum size of genio event payload"); 140 141 /* 142 * Allow to not to send signal to traced process, in which context the 143 * ktr record is written. The limit is applied from the process that 144 * set up ktrace, so killing the traced process is not completely fair. 145 */ 146 int ktr_filesize_limit_signal = 0; 147 SYSCTL_INT(_kern_ktrace, OID_AUTO, filesize_limit_signal, CTLFLAG_RWTUN, 148 &ktr_filesize_limit_signal, 0, 149 "Send SIGXFSZ to the traced process when the log size limit is exceeded"); 150 151 static int print_message = 1; 152 static struct mtx ktrace_mtx; 153 static struct sx ktrace_sx; 154 155 struct ktr_io_params { 156 struct vnode *vp; 157 struct ucred *cr; 158 off_t lim; 159 u_int refs; 160 }; 161 162 static void ktrace_init(void *dummy); 163 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS); 164 static u_int ktrace_resize_pool(u_int oldsize, u_int newsize); 165 static struct ktr_request *ktr_getrequest_entered(struct thread *td, int type); 166 static struct ktr_request *ktr_getrequest(int type); 167 static void ktr_submitrequest(struct thread *td, struct ktr_request *req); 168 static struct ktr_io_params *ktr_freeproc(struct proc *p); 169 static void ktr_freerequest(struct ktr_request *req); 170 static void ktr_freerequest_locked(struct ktr_request *req); 171 static void ktr_writerequest(struct thread *td, struct ktr_request *req); 172 static int ktrcanset(struct thread *,struct proc *); 173 static int ktrsetchildren(struct thread *, struct proc *, int, int, 174 struct ktr_io_params *); 175 static int ktrops(struct thread *, struct proc *, int, int, 176 struct ktr_io_params *); 177 static void ktrprocctor_entered(struct thread *, struct proc *); 178 179 /* 180 * ktrace itself generates events, such as context switches, which we do not 181 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine 182 * whether or not it is in a region where tracing of events should be 183 * suppressed. 184 */ 185 static void 186 ktrace_enter(struct thread *td) 187 { 188 189 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set")); 190 td->td_pflags |= TDP_INKTRACE; 191 } 192 193 static void 194 ktrace_exit(struct thread *td) 195 { 196 197 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set")); 198 td->td_pflags &= ~TDP_INKTRACE; 199 } 200 201 static void 202 ktrace_assert(struct thread *td) 203 { 204 205 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set")); 206 } 207 208 static void 209 ast_ktrace(struct thread *td, int tda __unused) 210 { 211 KTRUSERRET(td); 212 } 213 214 static void 215 ktrace_init(void *dummy) 216 { 217 struct ktr_request *req; 218 int i; 219 220 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET); 221 sx_init(&ktrace_sx, "ktrace_sx"); 222 STAILQ_INIT(&ktr_free); 223 for (i = 0; i < ktr_requestpool; i++) { 224 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK | 225 M_ZERO); 226 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 227 } 228 ast_register(TDA_KTRACE, ASTR_ASTF_REQUIRED, 0, ast_ktrace); 229 } 230 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL); 231 232 static int 233 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS) 234 { 235 struct thread *td; 236 u_int newsize, oldsize, wantsize; 237 int error; 238 239 /* Handle easy read-only case first to avoid warnings from GCC. */ 240 if (!req->newptr) { 241 oldsize = ktr_requestpool; 242 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int))); 243 } 244 245 error = SYSCTL_IN(req, &wantsize, sizeof(u_int)); 246 if (error) 247 return (error); 248 td = curthread; 249 ktrace_enter(td); 250 oldsize = ktr_requestpool; 251 newsize = ktrace_resize_pool(oldsize, wantsize); 252 ktrace_exit(td); 253 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int)); 254 if (error) 255 return (error); 256 if (wantsize > oldsize && newsize < wantsize) 257 return (ENOSPC); 258 return (0); 259 } 260 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, 261 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ktr_requestpool, 0, 262 sysctl_kern_ktrace_request_pool, "IU", 263 "Pool buffer size for ktrace(1)"); 264 265 static u_int 266 ktrace_resize_pool(u_int oldsize, u_int newsize) 267 { 268 STAILQ_HEAD(, ktr_request) ktr_new; 269 struct ktr_request *req; 270 int bound; 271 272 print_message = 1; 273 bound = newsize - oldsize; 274 if (bound == 0) 275 return (ktr_requestpool); 276 if (bound < 0) { 277 mtx_lock(&ktrace_mtx); 278 /* Shrink pool down to newsize if possible. */ 279 while (bound++ < 0) { 280 req = STAILQ_FIRST(&ktr_free); 281 if (req == NULL) 282 break; 283 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 284 ktr_requestpool--; 285 free(req, M_KTRACE); 286 } 287 } else { 288 /* Grow pool up to newsize. */ 289 STAILQ_INIT(&ktr_new); 290 while (bound-- > 0) { 291 req = malloc(sizeof(struct ktr_request), M_KTRACE, 292 M_WAITOK | M_ZERO); 293 STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list); 294 } 295 mtx_lock(&ktrace_mtx); 296 STAILQ_CONCAT(&ktr_free, &ktr_new); 297 ktr_requestpool += (newsize - oldsize); 298 } 299 mtx_unlock(&ktrace_mtx); 300 return (ktr_requestpool); 301 } 302 303 /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */ 304 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) == 305 (sizeof((struct thread *)NULL)->td_name)); 306 307 static struct ktr_request * 308 ktr_getrequest_entered(struct thread *td, int type) 309 { 310 struct ktr_request *req; 311 struct proc *p = td->td_proc; 312 int pm; 313 314 mtx_lock(&ktrace_mtx); 315 if (!KTRCHECK(td, type)) { 316 mtx_unlock(&ktrace_mtx); 317 return (NULL); 318 } 319 req = STAILQ_FIRST(&ktr_free); 320 if (req != NULL) { 321 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 322 req->ktr_header.ktr_type = type; 323 if (p->p_traceflag & KTRFAC_DROP) { 324 req->ktr_header.ktr_type |= KTR_DROP; 325 p->p_traceflag &= ~KTRFAC_DROP; 326 } 327 mtx_unlock(&ktrace_mtx); 328 nanotime(&req->ktr_header.ktr_time); 329 req->ktr_header.ktr_type |= KTR_VERSIONED; 330 req->ktr_header.ktr_pid = p->p_pid; 331 req->ktr_header.ktr_tid = td->td_tid; 332 req->ktr_header.ktr_cpu = PCPU_GET(cpuid); 333 req->ktr_header.ktr_version = KTR_VERSION1; 334 bcopy(td->td_name, req->ktr_header.ktr_comm, 335 sizeof(req->ktr_header.ktr_comm)); 336 req->ktr_buffer = NULL; 337 req->ktr_header.ktr_len = 0; 338 } else { 339 p->p_traceflag |= KTRFAC_DROP; 340 pm = print_message; 341 print_message = 0; 342 mtx_unlock(&ktrace_mtx); 343 if (pm) 344 printf("Out of ktrace request objects.\n"); 345 } 346 return (req); 347 } 348 349 static struct ktr_request * 350 ktr_getrequest(int type) 351 { 352 struct thread *td = curthread; 353 struct ktr_request *req; 354 355 ktrace_enter(td); 356 req = ktr_getrequest_entered(td, type); 357 if (req == NULL) 358 ktrace_exit(td); 359 360 return (req); 361 } 362 363 /* 364 * Some trace generation environments don't permit direct access to VFS, 365 * such as during a context switch where sleeping is not allowed. Under these 366 * circumstances, queue a request to the thread to be written asynchronously 367 * later. 368 */ 369 static void 370 ktr_enqueuerequest(struct thread *td, struct ktr_request *req) 371 { 372 373 mtx_lock(&ktrace_mtx); 374 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list); 375 mtx_unlock(&ktrace_mtx); 376 ast_sched(td, TDA_KTRACE); 377 } 378 379 /* 380 * Drain any pending ktrace records from the per-thread queue to disk. This 381 * is used both internally before committing other records, and also on 382 * system call return. We drain all the ones we can find at the time when 383 * drain is requested, but don't keep draining after that as those events 384 * may be approximately "after" the current event. 385 */ 386 static void 387 ktr_drain(struct thread *td) 388 { 389 struct ktr_request *queued_req; 390 STAILQ_HEAD(, ktr_request) local_queue; 391 392 ktrace_assert(td); 393 sx_assert(&ktrace_sx, SX_XLOCKED); 394 395 STAILQ_INIT(&local_queue); 396 397 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) { 398 mtx_lock(&ktrace_mtx); 399 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr); 400 mtx_unlock(&ktrace_mtx); 401 402 while ((queued_req = STAILQ_FIRST(&local_queue))) { 403 STAILQ_REMOVE_HEAD(&local_queue, ktr_list); 404 ktr_writerequest(td, queued_req); 405 ktr_freerequest(queued_req); 406 } 407 } 408 } 409 410 /* 411 * Submit a trace record for immediate commit to disk -- to be used only 412 * where entering VFS is OK. First drain any pending records that may have 413 * been cached in the thread. 414 */ 415 static void 416 ktr_submitrequest(struct thread *td, struct ktr_request *req) 417 { 418 419 ktrace_assert(td); 420 421 sx_xlock(&ktrace_sx); 422 ktr_drain(td); 423 ktr_writerequest(td, req); 424 ktr_freerequest(req); 425 sx_xunlock(&ktrace_sx); 426 ktrace_exit(td); 427 } 428 429 static void 430 ktr_freerequest(struct ktr_request *req) 431 { 432 433 mtx_lock(&ktrace_mtx); 434 ktr_freerequest_locked(req); 435 mtx_unlock(&ktrace_mtx); 436 } 437 438 static void 439 ktr_freerequest_locked(struct ktr_request *req) 440 { 441 442 mtx_assert(&ktrace_mtx, MA_OWNED); 443 if (req->ktr_buffer != NULL) 444 free(req->ktr_buffer, M_KTRACE); 445 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 446 } 447 448 static void 449 ktr_io_params_ref(struct ktr_io_params *kiop) 450 { 451 mtx_assert(&ktrace_mtx, MA_OWNED); 452 kiop->refs++; 453 } 454 455 static struct ktr_io_params * 456 ktr_io_params_rele(struct ktr_io_params *kiop) 457 { 458 mtx_assert(&ktrace_mtx, MA_OWNED); 459 if (kiop == NULL) 460 return (NULL); 461 KASSERT(kiop->refs > 0, ("kiop ref == 0 %p", kiop)); 462 return (--(kiop->refs) == 0 ? kiop : NULL); 463 } 464 465 void 466 ktr_io_params_free(struct ktr_io_params *kiop) 467 { 468 if (kiop == NULL) 469 return; 470 471 MPASS(kiop->refs == 0); 472 vn_close(kiop->vp, FWRITE, kiop->cr, curthread); 473 crfree(kiop->cr); 474 free(kiop, M_KTRACE); 475 } 476 477 static struct ktr_io_params * 478 ktr_io_params_alloc(struct thread *td, struct vnode *vp) 479 { 480 struct ktr_io_params *res; 481 482 res = malloc(sizeof(struct ktr_io_params), M_KTRACE, M_WAITOK); 483 res->vp = vp; 484 res->cr = crhold(td->td_ucred); 485 res->lim = lim_cur(td, RLIMIT_FSIZE); 486 res->refs = 1; 487 return (res); 488 } 489 490 /* 491 * Disable tracing for a process and release all associated resources. 492 * The caller is responsible for releasing a reference on the returned 493 * vnode and credentials. 494 */ 495 static struct ktr_io_params * 496 ktr_freeproc(struct proc *p) 497 { 498 struct ktr_io_params *kiop; 499 struct ktr_request *req; 500 501 PROC_LOCK_ASSERT(p, MA_OWNED); 502 mtx_assert(&ktrace_mtx, MA_OWNED); 503 kiop = ktr_io_params_rele(p->p_ktrioparms); 504 p->p_ktrioparms = NULL; 505 p->p_traceflag = 0; 506 while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) { 507 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list); 508 ktr_freerequest_locked(req); 509 } 510 return (kiop); 511 } 512 513 struct vnode * 514 ktr_get_tracevp(struct proc *p, bool ref) 515 { 516 struct vnode *vp; 517 518 PROC_LOCK_ASSERT(p, MA_OWNED); 519 520 if (p->p_ktrioparms != NULL) { 521 vp = p->p_ktrioparms->vp; 522 if (ref) 523 vrefact(vp); 524 } else { 525 vp = NULL; 526 } 527 return (vp); 528 } 529 530 void 531 ktrsyscall(int code, int narg, syscallarg_t args[]) 532 { 533 struct ktr_request *req; 534 struct ktr_syscall *ktp; 535 size_t buflen; 536 char *buf = NULL; 537 538 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 539 return; 540 541 buflen = sizeof(register_t) * narg; 542 if (buflen > 0) { 543 buf = malloc(buflen, M_KTRACE, M_WAITOK); 544 bcopy(args, buf, buflen); 545 } 546 req = ktr_getrequest(KTR_SYSCALL); 547 if (req == NULL) { 548 if (buf != NULL) 549 free(buf, M_KTRACE); 550 return; 551 } 552 ktp = &req->ktr_data.ktr_syscall; 553 ktp->ktr_code = code; 554 ktp->ktr_narg = narg; 555 if (buflen > 0) { 556 req->ktr_header.ktr_len = buflen; 557 req->ktr_buffer = buf; 558 } 559 ktr_submitrequest(curthread, req); 560 } 561 562 void 563 ktrsysret(int code, int error, register_t retval) 564 { 565 struct ktr_request *req; 566 struct ktr_sysret *ktp; 567 568 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 569 return; 570 571 req = ktr_getrequest(KTR_SYSRET); 572 if (req == NULL) 573 return; 574 ktp = &req->ktr_data.ktr_sysret; 575 ktp->ktr_code = code; 576 ktp->ktr_error = error; 577 ktp->ktr_retval = ((error == 0) ? retval: 0); /* what about val2 ? */ 578 ktr_submitrequest(curthread, req); 579 } 580 581 /* 582 * When a setuid process execs, disable tracing. 583 * 584 * XXX: We toss any pending asynchronous records. 585 */ 586 struct ktr_io_params * 587 ktrprocexec(struct proc *p) 588 { 589 struct ktr_io_params *kiop; 590 591 PROC_LOCK_ASSERT(p, MA_OWNED); 592 593 kiop = p->p_ktrioparms; 594 if (kiop == NULL || priv_check_cred(kiop->cr, PRIV_DEBUG_DIFFCRED)) 595 return (NULL); 596 597 mtx_lock(&ktrace_mtx); 598 kiop = ktr_freeproc(p); 599 mtx_unlock(&ktrace_mtx); 600 return (kiop); 601 } 602 603 /* 604 * When a process exits, drain per-process asynchronous trace records 605 * and disable tracing. 606 */ 607 void 608 ktrprocexit(struct thread *td) 609 { 610 struct ktr_request *req; 611 struct proc *p; 612 struct ktr_io_params *kiop; 613 614 p = td->td_proc; 615 if (p->p_traceflag == 0) 616 return; 617 618 ktrace_enter(td); 619 req = ktr_getrequest_entered(td, KTR_PROCDTOR); 620 if (req != NULL) 621 ktr_enqueuerequest(td, req); 622 sx_xlock(&ktrace_sx); 623 ktr_drain(td); 624 sx_xunlock(&ktrace_sx); 625 PROC_LOCK(p); 626 mtx_lock(&ktrace_mtx); 627 kiop = ktr_freeproc(p); 628 mtx_unlock(&ktrace_mtx); 629 PROC_UNLOCK(p); 630 ktr_io_params_free(kiop); 631 ktrace_exit(td); 632 } 633 634 static void 635 ktrprocctor_entered(struct thread *td, struct proc *p) 636 { 637 struct ktr_proc_ctor *ktp; 638 struct ktr_request *req; 639 struct thread *td2; 640 641 ktrace_assert(td); 642 td2 = FIRST_THREAD_IN_PROC(p); 643 req = ktr_getrequest_entered(td2, KTR_PROCCTOR); 644 if (req == NULL) 645 return; 646 ktp = &req->ktr_data.ktr_proc_ctor; 647 ktp->sv_flags = p->p_sysent->sv_flags; 648 ktr_enqueuerequest(td2, req); 649 } 650 651 void 652 ktrprocctor(struct proc *p) 653 { 654 struct thread *td = curthread; 655 656 if ((p->p_traceflag & KTRFAC_MASK) == 0) 657 return; 658 659 ktrace_enter(td); 660 ktrprocctor_entered(td, p); 661 ktrace_exit(td); 662 } 663 664 /* 665 * When a process forks, enable tracing in the new process if needed. 666 */ 667 void 668 ktrprocfork(struct proc *p1, struct proc *p2) 669 { 670 671 MPASS(p2->p_ktrioparms == NULL); 672 MPASS(p2->p_traceflag == 0); 673 674 if (p1->p_traceflag == 0) 675 return; 676 677 PROC_LOCK(p1); 678 mtx_lock(&ktrace_mtx); 679 if (p1->p_traceflag & KTRFAC_INHERIT) { 680 p2->p_traceflag = p1->p_traceflag; 681 if ((p2->p_ktrioparms = p1->p_ktrioparms) != NULL) 682 p1->p_ktrioparms->refs++; 683 } 684 mtx_unlock(&ktrace_mtx); 685 PROC_UNLOCK(p1); 686 687 ktrprocctor(p2); 688 } 689 690 /* 691 * When a thread returns, drain any asynchronous records generated by the 692 * system call. 693 */ 694 void 695 ktruserret(struct thread *td) 696 { 697 698 ktrace_enter(td); 699 sx_xlock(&ktrace_sx); 700 ktr_drain(td); 701 sx_xunlock(&ktrace_sx); 702 ktrace_exit(td); 703 } 704 705 void 706 ktrnamei(const char *path) 707 { 708 struct ktr_request *req; 709 int namelen; 710 char *buf = NULL; 711 712 namelen = strlen(path); 713 if (namelen > 0) { 714 buf = malloc(namelen, M_KTRACE, M_WAITOK); 715 bcopy(path, buf, namelen); 716 } 717 req = ktr_getrequest(KTR_NAMEI); 718 if (req == NULL) { 719 if (buf != NULL) 720 free(buf, M_KTRACE); 721 return; 722 } 723 if (namelen > 0) { 724 req->ktr_header.ktr_len = namelen; 725 req->ktr_buffer = buf; 726 } 727 ktr_submitrequest(curthread, req); 728 } 729 730 void 731 ktrsysctl(int *name, u_int namelen) 732 { 733 struct ktr_request *req; 734 u_int mib[CTL_MAXNAME + 2]; 735 char *mibname; 736 size_t mibnamelen; 737 int error; 738 739 /* Lookup name of mib. */ 740 KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long")); 741 mib[0] = 0; 742 mib[1] = 1; 743 bcopy(name, mib + 2, namelen * sizeof(*name)); 744 mibnamelen = 128; 745 mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK); 746 error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen, 747 NULL, 0, &mibnamelen, 0); 748 if (error) { 749 free(mibname, M_KTRACE); 750 return; 751 } 752 req = ktr_getrequest(KTR_SYSCTL); 753 if (req == NULL) { 754 free(mibname, M_KTRACE); 755 return; 756 } 757 req->ktr_header.ktr_len = mibnamelen; 758 req->ktr_buffer = mibname; 759 ktr_submitrequest(curthread, req); 760 } 761 762 void 763 ktrgenio(int fd, enum uio_rw rw, struct uio *uio, int error) 764 { 765 struct ktr_request *req; 766 struct ktr_genio *ktg; 767 int datalen; 768 char *buf; 769 770 if (error != 0 && (rw == UIO_READ || error == EFAULT)) { 771 freeuio(uio); 772 return; 773 } 774 uio->uio_offset = 0; 775 uio->uio_rw = UIO_WRITE; 776 datalen = MIN(uio->uio_resid, ktr_geniosize); 777 buf = malloc(datalen, M_KTRACE, M_WAITOK); 778 error = uiomove(buf, datalen, uio); 779 freeuio(uio); 780 if (error) { 781 free(buf, M_KTRACE); 782 return; 783 } 784 req = ktr_getrequest(KTR_GENIO); 785 if (req == NULL) { 786 free(buf, M_KTRACE); 787 return; 788 } 789 ktg = &req->ktr_data.ktr_genio; 790 ktg->ktr_fd = fd; 791 ktg->ktr_rw = rw; 792 req->ktr_header.ktr_len = datalen; 793 req->ktr_buffer = buf; 794 ktr_submitrequest(curthread, req); 795 } 796 797 void 798 ktrpsig(int sig, sig_t action, sigset_t *mask, int code) 799 { 800 struct thread *td = curthread; 801 struct ktr_request *req; 802 struct ktr_psig *kp; 803 804 req = ktr_getrequest(KTR_PSIG); 805 if (req == NULL) 806 return; 807 kp = &req->ktr_data.ktr_psig; 808 kp->signo = (char)sig; 809 kp->action = action; 810 kp->mask = *mask; 811 kp->code = code; 812 ktr_enqueuerequest(td, req); 813 ktrace_exit(td); 814 } 815 816 void 817 ktrcsw(int out, int user, const char *wmesg) 818 { 819 struct thread *td = curthread; 820 struct ktr_request *req; 821 struct ktr_csw *kc; 822 823 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 824 return; 825 826 req = ktr_getrequest(KTR_CSW); 827 if (req == NULL) 828 return; 829 kc = &req->ktr_data.ktr_csw; 830 kc->out = out; 831 kc->user = user; 832 if (wmesg != NULL) 833 strlcpy(kc->wmesg, wmesg, sizeof(kc->wmesg)); 834 else 835 bzero(kc->wmesg, sizeof(kc->wmesg)); 836 ktr_enqueuerequest(td, req); 837 ktrace_exit(td); 838 } 839 840 void 841 ktrstruct(const char *name, const void *data, size_t datalen) 842 { 843 struct ktr_request *req; 844 char *buf; 845 size_t buflen, namelen; 846 847 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 848 return; 849 850 if (data == NULL) 851 datalen = 0; 852 namelen = strlen(name) + 1; 853 buflen = namelen + datalen; 854 buf = malloc(buflen, M_KTRACE, M_WAITOK); 855 strcpy(buf, name); 856 bcopy(data, buf + namelen, datalen); 857 if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) { 858 free(buf, M_KTRACE); 859 return; 860 } 861 req->ktr_buffer = buf; 862 req->ktr_header.ktr_len = buflen; 863 ktr_submitrequest(curthread, req); 864 } 865 866 void 867 ktrstruct_error(const char *name, const void *data, size_t datalen, int error) 868 { 869 870 if (error == 0) 871 ktrstruct(name, data, datalen); 872 } 873 874 void 875 ktrstructarray(const char *name, enum uio_seg seg, const void *data, 876 int num_items, size_t struct_size) 877 { 878 struct ktr_request *req; 879 struct ktr_struct_array *ksa; 880 char *buf; 881 size_t buflen, datalen, namelen; 882 int max_items; 883 884 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 885 return; 886 if (num_items < 0) 887 return; 888 889 /* Trim array length to genio size. */ 890 max_items = ktr_geniosize / struct_size; 891 if (num_items > max_items) { 892 if (max_items == 0) 893 num_items = 1; 894 else 895 num_items = max_items; 896 } 897 datalen = num_items * struct_size; 898 899 if (data == NULL) 900 datalen = 0; 901 902 namelen = strlen(name) + 1; 903 buflen = namelen + datalen; 904 buf = malloc(buflen, M_KTRACE, M_WAITOK); 905 strcpy(buf, name); 906 if (seg == UIO_SYSSPACE) 907 bcopy(data, buf + namelen, datalen); 908 else { 909 if (copyin(data, buf + namelen, datalen) != 0) { 910 free(buf, M_KTRACE); 911 return; 912 } 913 } 914 if ((req = ktr_getrequest(KTR_STRUCT_ARRAY)) == NULL) { 915 free(buf, M_KTRACE); 916 return; 917 } 918 ksa = &req->ktr_data.ktr_struct_array; 919 ksa->struct_size = struct_size; 920 req->ktr_buffer = buf; 921 req->ktr_header.ktr_len = buflen; 922 ktr_submitrequest(curthread, req); 923 } 924 925 void 926 ktrcapfail(enum ktr_cap_violation type, const void *data) 927 { 928 struct thread *td = curthread; 929 struct ktr_request *req; 930 struct ktr_cap_fail *kcf; 931 union ktr_cap_data *kcd; 932 933 if (__predict_false(td->td_pflags & TDP_INKTRACE)) 934 return; 935 if (type != CAPFAIL_SYSCALL && 936 (td->td_sa.callp->sy_flags & SYF_CAPENABLED) == 0) 937 return; 938 939 req = ktr_getrequest(KTR_CAPFAIL); 940 if (req == NULL) 941 return; 942 kcf = &req->ktr_data.ktr_cap_fail; 943 kcf->cap_type = type; 944 kcf->cap_code = td->td_sa.code; 945 kcf->cap_svflags = td->td_proc->p_sysent->sv_flags; 946 if (data != NULL) { 947 kcd = &kcf->cap_data; 948 switch (type) { 949 case CAPFAIL_NOTCAPABLE: 950 case CAPFAIL_INCREASE: 951 kcd->cap_needed = *(const cap_rights_t *)data; 952 kcd->cap_held = *((const cap_rights_t *)data + 1); 953 break; 954 case CAPFAIL_SYSCALL: 955 case CAPFAIL_SIGNAL: 956 case CAPFAIL_PROTO: 957 kcd->cap_int = *(const int *)data; 958 break; 959 case CAPFAIL_SOCKADDR: 960 kcd->cap_sockaddr = *(const struct sockaddr *)data; 961 break; 962 case CAPFAIL_NAMEI: 963 strlcpy(kcd->cap_path, data, MAXPATHLEN); 964 break; 965 case CAPFAIL_CPUSET: 966 default: 967 break; 968 } 969 } 970 ktr_enqueuerequest(td, req); 971 ktrace_exit(td); 972 } 973 974 void 975 ktrfault(vm_offset_t vaddr, int type) 976 { 977 struct thread *td = curthread; 978 struct ktr_request *req; 979 struct ktr_fault *kf; 980 981 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 982 return; 983 984 req = ktr_getrequest(KTR_FAULT); 985 if (req == NULL) 986 return; 987 kf = &req->ktr_data.ktr_fault; 988 kf->vaddr = vaddr; 989 kf->type = type; 990 ktr_enqueuerequest(td, req); 991 ktrace_exit(td); 992 } 993 994 void 995 ktrfaultend(int result) 996 { 997 struct thread *td = curthread; 998 struct ktr_request *req; 999 struct ktr_faultend *kf; 1000 1001 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 1002 return; 1003 1004 req = ktr_getrequest(KTR_FAULTEND); 1005 if (req == NULL) 1006 return; 1007 kf = &req->ktr_data.ktr_faultend; 1008 kf->result = result; 1009 ktr_enqueuerequest(td, req); 1010 ktrace_exit(td); 1011 } 1012 #endif /* KTRACE */ 1013 1014 /* Interface and common routines */ 1015 1016 #ifndef _SYS_SYSPROTO_H_ 1017 struct ktrace_args { 1018 char *fname; 1019 int ops; 1020 int facs; 1021 int pid; 1022 }; 1023 #endif 1024 /* ARGSUSED */ 1025 int 1026 sys_ktrace(struct thread *td, struct ktrace_args *uap) 1027 { 1028 #ifdef KTRACE 1029 struct vnode *vp = NULL; 1030 struct proc *p; 1031 struct pgrp *pg; 1032 int facs = uap->facs & ~KTRFAC_ROOT; 1033 int ops = KTROP(uap->ops); 1034 int descend = uap->ops & KTRFLAG_DESCEND; 1035 int ret = 0; 1036 int flags, error = 0; 1037 struct nameidata nd; 1038 struct ktr_io_params *kiop, *old_kiop; 1039 1040 /* 1041 * Need something to (un)trace. 1042 */ 1043 if (ops != KTROP_CLEARFILE && facs == 0) 1044 return (EINVAL); 1045 1046 kiop = NULL; 1047 if (ops != KTROP_CLEAR) { 1048 /* 1049 * an operation which requires a file argument. 1050 */ 1051 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname); 1052 flags = FREAD | FWRITE | O_NOFOLLOW; 1053 error = vn_open(&nd, &flags, 0, NULL); 1054 if (error) 1055 return (error); 1056 NDFREE_PNBUF(&nd); 1057 vp = nd.ni_vp; 1058 VOP_UNLOCK(vp); 1059 if (vp->v_type != VREG) { 1060 (void)vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 1061 return (EACCES); 1062 } 1063 kiop = ktr_io_params_alloc(td, vp); 1064 } 1065 1066 /* 1067 * Clear all uses of the tracefile. 1068 */ 1069 ktrace_enter(td); 1070 if (ops == KTROP_CLEARFILE) { 1071 restart: 1072 sx_slock(&allproc_lock); 1073 FOREACH_PROC_IN_SYSTEM(p) { 1074 old_kiop = NULL; 1075 PROC_LOCK(p); 1076 if (p->p_ktrioparms != NULL && 1077 p->p_ktrioparms->vp == vp) { 1078 if (ktrcanset(td, p)) { 1079 mtx_lock(&ktrace_mtx); 1080 old_kiop = ktr_freeproc(p); 1081 mtx_unlock(&ktrace_mtx); 1082 } else 1083 error = EPERM; 1084 } 1085 PROC_UNLOCK(p); 1086 if (old_kiop != NULL) { 1087 sx_sunlock(&allproc_lock); 1088 ktr_io_params_free(old_kiop); 1089 goto restart; 1090 } 1091 } 1092 sx_sunlock(&allproc_lock); 1093 goto done; 1094 } 1095 /* 1096 * do it 1097 */ 1098 sx_slock(&proctree_lock); 1099 if (uap->pid < 0) { 1100 /* 1101 * by process group 1102 */ 1103 pg = pgfind(-uap->pid); 1104 if (pg == NULL) { 1105 sx_sunlock(&proctree_lock); 1106 error = ESRCH; 1107 goto done; 1108 } 1109 1110 /* 1111 * ktrops() may call vrele(). Lock pg_members 1112 * by the proctree_lock rather than pg_mtx. 1113 */ 1114 PGRP_UNLOCK(pg); 1115 if (LIST_EMPTY(&pg->pg_members)) { 1116 sx_sunlock(&proctree_lock); 1117 error = ESRCH; 1118 goto done; 1119 } 1120 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 1121 PROC_LOCK(p); 1122 if (descend) 1123 ret |= ktrsetchildren(td, p, ops, facs, kiop); 1124 else 1125 ret |= ktrops(td, p, ops, facs, kiop); 1126 } 1127 } else { 1128 /* 1129 * by pid 1130 */ 1131 p = pfind(uap->pid); 1132 if (p == NULL) { 1133 error = ESRCH; 1134 sx_sunlock(&proctree_lock); 1135 goto done; 1136 } 1137 if (descend) 1138 ret |= ktrsetchildren(td, p, ops, facs, kiop); 1139 else 1140 ret |= ktrops(td, p, ops, facs, kiop); 1141 } 1142 sx_sunlock(&proctree_lock); 1143 if (!ret) 1144 error = EPERM; 1145 done: 1146 if (kiop != NULL) { 1147 mtx_lock(&ktrace_mtx); 1148 kiop = ktr_io_params_rele(kiop); 1149 mtx_unlock(&ktrace_mtx); 1150 ktr_io_params_free(kiop); 1151 } 1152 ktrace_exit(td); 1153 return (error); 1154 #else /* !KTRACE */ 1155 return (ENOSYS); 1156 #endif /* KTRACE */ 1157 } 1158 1159 /* ARGSUSED */ 1160 int 1161 sys_utrace(struct thread *td, struct utrace_args *uap) 1162 { 1163 1164 #ifdef KTRACE 1165 struct ktr_request *req; 1166 void *cp; 1167 int error; 1168 1169 if (!KTRPOINT(td, KTR_USER)) 1170 return (0); 1171 if (uap->len > KTR_USER_MAXLEN) 1172 return (EINVAL); 1173 cp = malloc(uap->len, M_KTRACE, M_WAITOK); 1174 error = copyin(uap->addr, cp, uap->len); 1175 if (error) { 1176 free(cp, M_KTRACE); 1177 return (error); 1178 } 1179 req = ktr_getrequest(KTR_USER); 1180 if (req == NULL) { 1181 free(cp, M_KTRACE); 1182 return (ENOMEM); 1183 } 1184 req->ktr_buffer = cp; 1185 req->ktr_header.ktr_len = uap->len; 1186 ktr_submitrequest(td, req); 1187 return (0); 1188 #else /* !KTRACE */ 1189 return (ENOSYS); 1190 #endif /* KTRACE */ 1191 } 1192 1193 #ifdef KTRACE 1194 static int 1195 ktrops(struct thread *td, struct proc *p, int ops, int facs, 1196 struct ktr_io_params *new_kiop) 1197 { 1198 struct ktr_io_params *old_kiop; 1199 1200 PROC_LOCK_ASSERT(p, MA_OWNED); 1201 if (!ktrcanset(td, p)) { 1202 PROC_UNLOCK(p); 1203 return (0); 1204 } 1205 if ((ops == KTROP_SET && p->p_state == PRS_NEW) || 1206 p_cansee(td, p) != 0) { 1207 /* 1208 * Disallow setting trace points if the process is being born. 1209 * This avoids races with trace point inheritance in 1210 * ktrprocfork(). 1211 */ 1212 PROC_UNLOCK(p); 1213 return (0); 1214 } 1215 if ((p->p_flag & P_WEXIT) != 0) { 1216 /* 1217 * There's nothing to do if the process is exiting, but avoid 1218 * signaling an error. 1219 */ 1220 PROC_UNLOCK(p); 1221 return (1); 1222 } 1223 old_kiop = NULL; 1224 mtx_lock(&ktrace_mtx); 1225 if (ops == KTROP_SET) { 1226 if (p->p_ktrioparms != NULL && 1227 p->p_ktrioparms->vp != new_kiop->vp) { 1228 /* if trace file already in use, relinquish below */ 1229 old_kiop = ktr_io_params_rele(p->p_ktrioparms); 1230 p->p_ktrioparms = NULL; 1231 } 1232 if (p->p_ktrioparms == NULL) { 1233 p->p_ktrioparms = new_kiop; 1234 ktr_io_params_ref(new_kiop); 1235 } 1236 p->p_traceflag |= facs; 1237 if (priv_check(td, PRIV_KTRACE) == 0) 1238 p->p_traceflag |= KTRFAC_ROOT; 1239 } else { 1240 /* KTROP_CLEAR */ 1241 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) 1242 /* no more tracing */ 1243 old_kiop = ktr_freeproc(p); 1244 } 1245 mtx_unlock(&ktrace_mtx); 1246 if ((p->p_traceflag & KTRFAC_MASK) != 0) 1247 ktrprocctor_entered(td, p); 1248 PROC_UNLOCK(p); 1249 ktr_io_params_free(old_kiop); 1250 1251 return (1); 1252 } 1253 1254 static int 1255 ktrsetchildren(struct thread *td, struct proc *top, int ops, int facs, 1256 struct ktr_io_params *new_kiop) 1257 { 1258 struct proc *p; 1259 int ret = 0; 1260 1261 p = top; 1262 PROC_LOCK_ASSERT(p, MA_OWNED); 1263 sx_assert(&proctree_lock, SX_LOCKED); 1264 for (;;) { 1265 ret |= ktrops(td, p, ops, facs, new_kiop); 1266 /* 1267 * If this process has children, descend to them next, 1268 * otherwise do any siblings, and if done with this level, 1269 * follow back up the tree (but not past top). 1270 */ 1271 if (!LIST_EMPTY(&p->p_children)) 1272 p = LIST_FIRST(&p->p_children); 1273 else for (;;) { 1274 if (p == top) 1275 return (ret); 1276 if (LIST_NEXT(p, p_sibling)) { 1277 p = LIST_NEXT(p, p_sibling); 1278 break; 1279 } 1280 p = p->p_pptr; 1281 } 1282 PROC_LOCK(p); 1283 } 1284 /*NOTREACHED*/ 1285 } 1286 1287 static void 1288 ktr_writerequest(struct thread *td, struct ktr_request *req) 1289 { 1290 struct ktr_io_params *kiop, *kiop1; 1291 struct ktr_header *kth; 1292 struct vnode *vp; 1293 struct proc *p; 1294 struct ucred *cred; 1295 struct uio auio; 1296 struct iovec aiov[3]; 1297 struct mount *mp; 1298 off_t lim; 1299 int datalen, buflen; 1300 int error; 1301 1302 p = td->td_proc; 1303 1304 /* 1305 * We reference the kiop for use in I/O in case ktrace is 1306 * disabled on the process as we write out the request. 1307 */ 1308 mtx_lock(&ktrace_mtx); 1309 kiop = p->p_ktrioparms; 1310 1311 /* 1312 * If kiop is NULL, it has been cleared out from under this 1313 * request, so just drop it. 1314 */ 1315 if (kiop == NULL) { 1316 mtx_unlock(&ktrace_mtx); 1317 return; 1318 } 1319 1320 ktr_io_params_ref(kiop); 1321 vp = kiop->vp; 1322 cred = kiop->cr; 1323 lim = kiop->lim; 1324 1325 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL")); 1326 mtx_unlock(&ktrace_mtx); 1327 1328 kth = &req->ktr_header; 1329 KASSERT(((u_short)kth->ktr_type & ~KTR_TYPE) < nitems(data_lengths), 1330 ("data_lengths array overflow")); 1331 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_TYPE]; 1332 buflen = kth->ktr_len; 1333 auio.uio_iov = &aiov[0]; 1334 auio.uio_offset = 0; 1335 auio.uio_segflg = UIO_SYSSPACE; 1336 auio.uio_rw = UIO_WRITE; 1337 aiov[0].iov_base = (caddr_t)kth; 1338 aiov[0].iov_len = sizeof(struct ktr_header); 1339 auio.uio_resid = sizeof(struct ktr_header); 1340 auio.uio_iovcnt = 1; 1341 auio.uio_td = td; 1342 if (datalen != 0) { 1343 aiov[1].iov_base = (caddr_t)&req->ktr_data; 1344 aiov[1].iov_len = datalen; 1345 auio.uio_resid += datalen; 1346 auio.uio_iovcnt++; 1347 kth->ktr_len += datalen; 1348 } 1349 if (buflen != 0) { 1350 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write")); 1351 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer; 1352 aiov[auio.uio_iovcnt].iov_len = buflen; 1353 auio.uio_resid += buflen; 1354 auio.uio_iovcnt++; 1355 } 1356 1357 vn_start_write(vp, &mp, V_WAIT); 1358 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1359 td->td_ktr_io_lim = lim; 1360 #ifdef MAC 1361 error = mac_vnode_check_write(cred, NOCRED, vp); 1362 if (error == 0) 1363 #endif 1364 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred); 1365 VOP_UNLOCK(vp); 1366 vn_finished_write(mp); 1367 if (error == 0) { 1368 mtx_lock(&ktrace_mtx); 1369 kiop = ktr_io_params_rele(kiop); 1370 mtx_unlock(&ktrace_mtx); 1371 ktr_io_params_free(kiop); 1372 return; 1373 } 1374 1375 /* 1376 * If error encountered, give up tracing on this vnode on this 1377 * process. Other processes might still be suitable for 1378 * writes to this vnode. 1379 */ 1380 log(LOG_NOTICE, 1381 "ktrace write failed, errno %d, tracing stopped for pid %d\n", 1382 error, p->p_pid); 1383 1384 kiop1 = NULL; 1385 PROC_LOCK(p); 1386 mtx_lock(&ktrace_mtx); 1387 if (p->p_ktrioparms != NULL && p->p_ktrioparms->vp == vp) 1388 kiop1 = ktr_freeproc(p); 1389 kiop = ktr_io_params_rele(kiop); 1390 mtx_unlock(&ktrace_mtx); 1391 PROC_UNLOCK(p); 1392 ktr_io_params_free(kiop1); 1393 ktr_io_params_free(kiop); 1394 } 1395 1396 /* 1397 * Return true if caller has permission to set the ktracing state 1398 * of target. Essentially, the target can't possess any 1399 * more permissions than the caller. KTRFAC_ROOT signifies that 1400 * root previously set the tracing status on the target process, and 1401 * so, only root may further change it. 1402 */ 1403 static int 1404 ktrcanset(struct thread *td, struct proc *targetp) 1405 { 1406 1407 PROC_LOCK_ASSERT(targetp, MA_OWNED); 1408 if (targetp->p_traceflag & KTRFAC_ROOT && 1409 priv_check(td, PRIV_KTRACE)) 1410 return (0); 1411 1412 if (p_candebug(td, targetp) != 0) 1413 return (0); 1414 1415 return (1); 1416 } 1417 1418 #endif /* KTRACE */ 1419