1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. 6 * Copyright (c) 2005 Robert N. M. Watson 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #include "opt_ktrace.h" 36 37 #include <sys/param.h> 38 #include <sys/capsicum.h> 39 #include <sys/systm.h> 40 #include <sys/fcntl.h> 41 #include <sys/kernel.h> 42 #include <sys/kthread.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/malloc.h> 46 #include <sys/mount.h> 47 #include <sys/namei.h> 48 #include <sys/priv.h> 49 #include <sys/proc.h> 50 #include <sys/resourcevar.h> 51 #include <sys/unistd.h> 52 #include <sys/vnode.h> 53 #include <sys/socket.h> 54 #include <sys/stat.h> 55 #include <sys/ktrace.h> 56 #include <sys/sx.h> 57 #include <sys/sysctl.h> 58 #include <sys/sysent.h> 59 #include <sys/syslog.h> 60 #include <sys/sysproto.h> 61 62 #include <security/mac/mac_framework.h> 63 64 /* 65 * The ktrace facility allows the tracing of certain key events in user space 66 * processes, such as system calls, signal delivery, context switches, and 67 * user generated events using utrace(2). It works by streaming event 68 * records and data to a vnode associated with the process using the 69 * ktrace(2) system call. In general, records can be written directly from 70 * the context that generates the event. One important exception to this is 71 * during a context switch, where sleeping is not permitted. To handle this 72 * case, trace events are generated using in-kernel ktr_request records, and 73 * then delivered to disk at a convenient moment -- either immediately, the 74 * next traceable event, at system call return, or at process exit. 75 * 76 * When dealing with multiple threads or processes writing to the same event 77 * log, ordering guarantees are weak: specifically, if an event has multiple 78 * records (i.e., system call enter and return), they may be interlaced with 79 * records from another event. Process and thread ID information is provided 80 * in the record, and user applications can de-interlace events if required. 81 */ 82 83 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 84 85 #ifdef KTRACE 86 87 FEATURE(ktrace, "Kernel support for system-call tracing"); 88 89 #ifndef KTRACE_REQUEST_POOL 90 #define KTRACE_REQUEST_POOL 100 91 #endif 92 93 struct ktr_request { 94 struct ktr_header ktr_header; 95 void *ktr_buffer; 96 union { 97 struct ktr_proc_ctor ktr_proc_ctor; 98 struct ktr_cap_fail ktr_cap_fail; 99 struct ktr_syscall ktr_syscall; 100 struct ktr_sysret ktr_sysret; 101 struct ktr_genio ktr_genio; 102 struct ktr_psig ktr_psig; 103 struct ktr_csw ktr_csw; 104 struct ktr_fault ktr_fault; 105 struct ktr_faultend ktr_faultend; 106 struct ktr_struct_array ktr_struct_array; 107 } ktr_data; 108 STAILQ_ENTRY(ktr_request) ktr_list; 109 }; 110 111 static const int data_lengths[] = { 112 [KTR_SYSCALL] = offsetof(struct ktr_syscall, ktr_args), 113 [KTR_SYSRET] = sizeof(struct ktr_sysret), 114 [KTR_NAMEI] = 0, 115 [KTR_GENIO] = sizeof(struct ktr_genio), 116 [KTR_PSIG] = sizeof(struct ktr_psig), 117 [KTR_CSW] = sizeof(struct ktr_csw), 118 [KTR_USER] = 0, 119 [KTR_STRUCT] = 0, 120 [KTR_SYSCTL] = 0, 121 [KTR_PROCCTOR] = sizeof(struct ktr_proc_ctor), 122 [KTR_PROCDTOR] = 0, 123 [KTR_CAPFAIL] = sizeof(struct ktr_cap_fail), 124 [KTR_FAULT] = sizeof(struct ktr_fault), 125 [KTR_FAULTEND] = sizeof(struct ktr_faultend), 126 [KTR_STRUCT_ARRAY] = sizeof(struct ktr_struct_array), 127 [KTR_ARGS] = 0, 128 [KTR_ENVS] = 0, 129 }; 130 131 static STAILQ_HEAD(, ktr_request) ktr_free; 132 133 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 134 "KTRACE options"); 135 136 static u_int ktr_requestpool = KTRACE_REQUEST_POOL; 137 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool); 138 139 u_int ktr_geniosize = PAGE_SIZE; 140 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RWTUN, &ktr_geniosize, 141 0, "Maximum size of genio event payload"); 142 143 /* 144 * Allow to not to send signal to traced process, in which context the 145 * ktr record is written. The limit is applied from the process that 146 * set up ktrace, so killing the traced process is not completely fair. 147 */ 148 int ktr_filesize_limit_signal = 0; 149 SYSCTL_INT(_kern_ktrace, OID_AUTO, filesize_limit_signal, CTLFLAG_RWTUN, 150 &ktr_filesize_limit_signal, 0, 151 "Send SIGXFSZ to the traced process when the log size limit is exceeded"); 152 153 static int print_message = 1; 154 static struct mtx ktrace_mtx; 155 static struct sx ktrace_sx; 156 157 struct ktr_io_params { 158 struct vnode *vp; 159 struct ucred *cr; 160 off_t lim; 161 u_int refs; 162 }; 163 164 static void ktrace_init(void *dummy); 165 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS); 166 static u_int ktrace_resize_pool(u_int oldsize, u_int newsize); 167 static struct ktr_request *ktr_getrequest_entered(struct thread *td, int type); 168 static struct ktr_request *ktr_getrequest(int type); 169 static void ktr_submitrequest(struct thread *td, struct ktr_request *req); 170 static struct ktr_io_params *ktr_freeproc(struct proc *p); 171 static void ktr_freerequest(struct ktr_request *req); 172 static void ktr_freerequest_locked(struct ktr_request *req); 173 static void ktr_writerequest(struct thread *td, struct ktr_request *req); 174 static int ktrcanset(struct thread *,struct proc *); 175 static int ktrsetchildren(struct thread *, struct proc *, int, int, 176 struct ktr_io_params *); 177 static int ktrops(struct thread *, struct proc *, int, int, 178 struct ktr_io_params *); 179 static void ktrprocctor_entered(struct thread *, struct proc *); 180 181 /* 182 * ktrace itself generates events, such as context switches, which we do not 183 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine 184 * whether or not it is in a region where tracing of events should be 185 * suppressed. 186 */ 187 static void 188 ktrace_enter(struct thread *td) 189 { 190 191 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set")); 192 td->td_pflags |= TDP_INKTRACE; 193 } 194 195 static void 196 ktrace_exit(struct thread *td) 197 { 198 199 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set")); 200 td->td_pflags &= ~TDP_INKTRACE; 201 } 202 203 static void 204 ktrace_assert(struct thread *td) 205 { 206 207 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set")); 208 } 209 210 static void 211 ast_ktrace(struct thread *td, int tda __unused) 212 { 213 KTRUSERRET(td); 214 } 215 216 static void 217 ktrace_init(void *dummy) 218 { 219 struct ktr_request *req; 220 int i; 221 222 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET); 223 sx_init(&ktrace_sx, "ktrace_sx"); 224 STAILQ_INIT(&ktr_free); 225 for (i = 0; i < ktr_requestpool; i++) { 226 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK | 227 M_ZERO); 228 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 229 } 230 ast_register(TDA_KTRACE, ASTR_ASTF_REQUIRED, 0, ast_ktrace); 231 } 232 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL); 233 234 static int 235 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS) 236 { 237 struct thread *td; 238 u_int newsize, oldsize, wantsize; 239 int error; 240 241 /* Handle easy read-only case first to avoid warnings from GCC. */ 242 if (!req->newptr) { 243 oldsize = ktr_requestpool; 244 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int))); 245 } 246 247 error = SYSCTL_IN(req, &wantsize, sizeof(u_int)); 248 if (error) 249 return (error); 250 td = curthread; 251 ktrace_enter(td); 252 oldsize = ktr_requestpool; 253 newsize = ktrace_resize_pool(oldsize, wantsize); 254 ktrace_exit(td); 255 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int)); 256 if (error) 257 return (error); 258 if (wantsize > oldsize && newsize < wantsize) 259 return (ENOSPC); 260 return (0); 261 } 262 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, 263 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ktr_requestpool, 0, 264 sysctl_kern_ktrace_request_pool, "IU", 265 "Pool buffer size for ktrace(1)"); 266 267 static u_int 268 ktrace_resize_pool(u_int oldsize, u_int newsize) 269 { 270 STAILQ_HEAD(, ktr_request) ktr_new; 271 struct ktr_request *req; 272 int bound; 273 274 print_message = 1; 275 bound = newsize - oldsize; 276 if (bound == 0) 277 return (ktr_requestpool); 278 if (bound < 0) { 279 mtx_lock(&ktrace_mtx); 280 /* Shrink pool down to newsize if possible. */ 281 while (bound++ < 0) { 282 req = STAILQ_FIRST(&ktr_free); 283 if (req == NULL) 284 break; 285 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 286 ktr_requestpool--; 287 free(req, M_KTRACE); 288 } 289 } else { 290 /* Grow pool up to newsize. */ 291 STAILQ_INIT(&ktr_new); 292 while (bound-- > 0) { 293 req = malloc(sizeof(struct ktr_request), M_KTRACE, 294 M_WAITOK | M_ZERO); 295 STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list); 296 } 297 mtx_lock(&ktrace_mtx); 298 STAILQ_CONCAT(&ktr_free, &ktr_new); 299 ktr_requestpool += (newsize - oldsize); 300 } 301 mtx_unlock(&ktrace_mtx); 302 return (ktr_requestpool); 303 } 304 305 /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */ 306 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) == 307 (sizeof((struct thread *)NULL)->td_name)); 308 309 static struct ktr_request * 310 ktr_getrequest_entered(struct thread *td, int type) 311 { 312 struct ktr_request *req; 313 struct proc *p = td->td_proc; 314 int pm; 315 316 mtx_lock(&ktrace_mtx); 317 if (!KTRCHECK(td, type)) { 318 mtx_unlock(&ktrace_mtx); 319 return (NULL); 320 } 321 req = STAILQ_FIRST(&ktr_free); 322 if (req != NULL) { 323 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 324 req->ktr_header.ktr_type = type; 325 if (p->p_traceflag & KTRFAC_DROP) { 326 req->ktr_header.ktr_type |= KTR_DROP; 327 p->p_traceflag &= ~KTRFAC_DROP; 328 } 329 mtx_unlock(&ktrace_mtx); 330 nanotime(&req->ktr_header.ktr_time); 331 req->ktr_header.ktr_type |= KTR_VERSIONED; 332 req->ktr_header.ktr_pid = p->p_pid; 333 req->ktr_header.ktr_tid = td->td_tid; 334 req->ktr_header.ktr_cpu = PCPU_GET(cpuid); 335 req->ktr_header.ktr_version = KTR_VERSION1; 336 bcopy(td->td_name, req->ktr_header.ktr_comm, 337 sizeof(req->ktr_header.ktr_comm)); 338 req->ktr_buffer = NULL; 339 req->ktr_header.ktr_len = 0; 340 } else { 341 p->p_traceflag |= KTRFAC_DROP; 342 pm = print_message; 343 print_message = 0; 344 mtx_unlock(&ktrace_mtx); 345 if (pm) 346 printf("Out of ktrace request objects.\n"); 347 } 348 return (req); 349 } 350 351 static struct ktr_request * 352 ktr_getrequest(int type) 353 { 354 struct thread *td = curthread; 355 struct ktr_request *req; 356 357 ktrace_enter(td); 358 req = ktr_getrequest_entered(td, type); 359 if (req == NULL) 360 ktrace_exit(td); 361 362 return (req); 363 } 364 365 /* 366 * Some trace generation environments don't permit direct access to VFS, 367 * such as during a context switch where sleeping is not allowed. Under these 368 * circumstances, queue a request to the thread to be written asynchronously 369 * later. 370 */ 371 static void 372 ktr_enqueuerequest(struct thread *td, struct ktr_request *req) 373 { 374 375 mtx_lock(&ktrace_mtx); 376 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list); 377 mtx_unlock(&ktrace_mtx); 378 ast_sched(td, TDA_KTRACE); 379 } 380 381 /* 382 * Drain any pending ktrace records from the per-thread queue to disk. This 383 * is used both internally before committing other records, and also on 384 * system call return. We drain all the ones we can find at the time when 385 * drain is requested, but don't keep draining after that as those events 386 * may be approximately "after" the current event. 387 */ 388 static void 389 ktr_drain(struct thread *td) 390 { 391 struct ktr_request *queued_req; 392 STAILQ_HEAD(, ktr_request) local_queue; 393 394 ktrace_assert(td); 395 sx_assert(&ktrace_sx, SX_XLOCKED); 396 397 STAILQ_INIT(&local_queue); 398 399 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) { 400 mtx_lock(&ktrace_mtx); 401 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr); 402 mtx_unlock(&ktrace_mtx); 403 404 while ((queued_req = STAILQ_FIRST(&local_queue))) { 405 STAILQ_REMOVE_HEAD(&local_queue, ktr_list); 406 ktr_writerequest(td, queued_req); 407 ktr_freerequest(queued_req); 408 } 409 } 410 } 411 412 /* 413 * Submit a trace record for immediate commit to disk -- to be used only 414 * where entering VFS is OK. First drain any pending records that may have 415 * been cached in the thread. 416 */ 417 static void 418 ktr_submitrequest(struct thread *td, struct ktr_request *req) 419 { 420 421 ktrace_assert(td); 422 423 sx_xlock(&ktrace_sx); 424 ktr_drain(td); 425 ktr_writerequest(td, req); 426 ktr_freerequest(req); 427 sx_xunlock(&ktrace_sx); 428 ktrace_exit(td); 429 } 430 431 static void 432 ktr_freerequest(struct ktr_request *req) 433 { 434 435 mtx_lock(&ktrace_mtx); 436 ktr_freerequest_locked(req); 437 mtx_unlock(&ktrace_mtx); 438 } 439 440 static void 441 ktr_freerequest_locked(struct ktr_request *req) 442 { 443 444 mtx_assert(&ktrace_mtx, MA_OWNED); 445 if (req->ktr_buffer != NULL) 446 free(req->ktr_buffer, M_KTRACE); 447 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 448 } 449 450 static void 451 ktr_io_params_ref(struct ktr_io_params *kiop) 452 { 453 mtx_assert(&ktrace_mtx, MA_OWNED); 454 kiop->refs++; 455 } 456 457 static struct ktr_io_params * 458 ktr_io_params_rele(struct ktr_io_params *kiop) 459 { 460 mtx_assert(&ktrace_mtx, MA_OWNED); 461 if (kiop == NULL) 462 return (NULL); 463 KASSERT(kiop->refs > 0, ("kiop ref == 0 %p", kiop)); 464 return (--(kiop->refs) == 0 ? kiop : NULL); 465 } 466 467 void 468 ktr_io_params_free(struct ktr_io_params *kiop) 469 { 470 if (kiop == NULL) 471 return; 472 473 MPASS(kiop->refs == 0); 474 vn_close(kiop->vp, FWRITE, kiop->cr, curthread); 475 crfree(kiop->cr); 476 free(kiop, M_KTRACE); 477 } 478 479 static struct ktr_io_params * 480 ktr_io_params_alloc(struct thread *td, struct vnode *vp) 481 { 482 struct ktr_io_params *res; 483 484 res = malloc(sizeof(struct ktr_io_params), M_KTRACE, M_WAITOK); 485 res->vp = vp; 486 res->cr = crhold(td->td_ucred); 487 res->lim = lim_cur(td, RLIMIT_FSIZE); 488 res->refs = 1; 489 return (res); 490 } 491 492 /* 493 * Disable tracing for a process and release all associated resources. 494 * The caller is responsible for releasing a reference on the returned 495 * vnode and credentials. 496 */ 497 static struct ktr_io_params * 498 ktr_freeproc(struct proc *p) 499 { 500 struct ktr_io_params *kiop; 501 struct ktr_request *req; 502 503 PROC_LOCK_ASSERT(p, MA_OWNED); 504 mtx_assert(&ktrace_mtx, MA_OWNED); 505 kiop = ktr_io_params_rele(p->p_ktrioparms); 506 p->p_ktrioparms = NULL; 507 p->p_traceflag = 0; 508 while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) { 509 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list); 510 ktr_freerequest_locked(req); 511 } 512 return (kiop); 513 } 514 515 struct vnode * 516 ktr_get_tracevp(struct proc *p, bool ref) 517 { 518 struct vnode *vp; 519 520 PROC_LOCK_ASSERT(p, MA_OWNED); 521 522 if (p->p_ktrioparms != NULL) { 523 vp = p->p_ktrioparms->vp; 524 if (ref) 525 vrefact(vp); 526 } else { 527 vp = NULL; 528 } 529 return (vp); 530 } 531 532 void 533 ktrsyscall(int code, int narg, syscallarg_t args[]) 534 { 535 struct ktr_request *req; 536 struct ktr_syscall *ktp; 537 size_t buflen; 538 char *buf = NULL; 539 540 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 541 return; 542 543 buflen = sizeof(register_t) * narg; 544 if (buflen > 0) { 545 buf = malloc(buflen, M_KTRACE, M_WAITOK); 546 bcopy(args, buf, buflen); 547 } 548 req = ktr_getrequest(KTR_SYSCALL); 549 if (req == NULL) { 550 if (buf != NULL) 551 free(buf, M_KTRACE); 552 return; 553 } 554 ktp = &req->ktr_data.ktr_syscall; 555 ktp->ktr_code = code; 556 ktp->ktr_narg = narg; 557 if (buflen > 0) { 558 req->ktr_header.ktr_len = buflen; 559 req->ktr_buffer = buf; 560 } 561 ktr_submitrequest(curthread, req); 562 } 563 564 void 565 ktrdata(int type, const void *data, size_t len) 566 { 567 struct ktr_request *req; 568 void *buf; 569 570 if ((req = ktr_getrequest(type)) == NULL) 571 return; 572 buf = malloc(len, M_KTRACE, M_WAITOK); 573 bcopy(data, buf, len); 574 req->ktr_header.ktr_len = len; 575 req->ktr_buffer = buf; 576 ktr_submitrequest(curthread, req); 577 } 578 579 void 580 ktrsysret(int code, int error, register_t retval) 581 { 582 struct ktr_request *req; 583 struct ktr_sysret *ktp; 584 585 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 586 return; 587 588 req = ktr_getrequest(KTR_SYSRET); 589 if (req == NULL) 590 return; 591 ktp = &req->ktr_data.ktr_sysret; 592 ktp->ktr_code = code; 593 ktp->ktr_error = error; 594 ktp->ktr_retval = ((error == 0) ? retval: 0); /* what about val2 ? */ 595 ktr_submitrequest(curthread, req); 596 } 597 598 /* 599 * When a setuid process execs, disable tracing. 600 * 601 * XXX: We toss any pending asynchronous records. 602 */ 603 struct ktr_io_params * 604 ktrprocexec(struct proc *p) 605 { 606 struct ktr_io_params *kiop; 607 608 PROC_LOCK_ASSERT(p, MA_OWNED); 609 610 kiop = p->p_ktrioparms; 611 if (kiop == NULL || priv_check_cred(kiop->cr, PRIV_DEBUG_DIFFCRED) == 0) 612 return (NULL); 613 614 mtx_lock(&ktrace_mtx); 615 kiop = ktr_freeproc(p); 616 mtx_unlock(&ktrace_mtx); 617 return (kiop); 618 } 619 620 /* 621 * When a process exits, drain per-process asynchronous trace records 622 * and disable tracing. 623 */ 624 void 625 ktrprocexit(struct thread *td) 626 { 627 struct ktr_request *req; 628 struct proc *p; 629 struct ktr_io_params *kiop; 630 631 p = td->td_proc; 632 if (p->p_traceflag == 0) 633 return; 634 635 ktrace_enter(td); 636 req = ktr_getrequest_entered(td, KTR_PROCDTOR); 637 if (req != NULL) 638 ktr_enqueuerequest(td, req); 639 sx_xlock(&ktrace_sx); 640 ktr_drain(td); 641 sx_xunlock(&ktrace_sx); 642 PROC_LOCK(p); 643 mtx_lock(&ktrace_mtx); 644 kiop = ktr_freeproc(p); 645 mtx_unlock(&ktrace_mtx); 646 PROC_UNLOCK(p); 647 ktr_io_params_free(kiop); 648 ktrace_exit(td); 649 } 650 651 static void 652 ktrprocctor_entered(struct thread *td, struct proc *p) 653 { 654 struct ktr_proc_ctor *ktp; 655 struct ktr_request *req; 656 struct thread *td2; 657 658 ktrace_assert(td); 659 td2 = FIRST_THREAD_IN_PROC(p); 660 req = ktr_getrequest_entered(td2, KTR_PROCCTOR); 661 if (req == NULL) 662 return; 663 ktp = &req->ktr_data.ktr_proc_ctor; 664 ktp->sv_flags = p->p_sysent->sv_flags; 665 ktr_enqueuerequest(td2, req); 666 } 667 668 void 669 ktrprocctor(struct proc *p) 670 { 671 struct thread *td = curthread; 672 673 if ((p->p_traceflag & KTRFAC_MASK) == 0) 674 return; 675 676 ktrace_enter(td); 677 ktrprocctor_entered(td, p); 678 ktrace_exit(td); 679 } 680 681 /* 682 * When a process forks, enable tracing in the new process if needed. 683 */ 684 void 685 ktrprocfork(struct proc *p1, struct proc *p2) 686 { 687 688 MPASS(p2->p_ktrioparms == NULL); 689 MPASS(p2->p_traceflag == 0); 690 691 if (p1->p_traceflag == 0) 692 return; 693 694 PROC_LOCK(p1); 695 mtx_lock(&ktrace_mtx); 696 if (p1->p_traceflag & KTRFAC_INHERIT) { 697 p2->p_traceflag = p1->p_traceflag; 698 if ((p2->p_ktrioparms = p1->p_ktrioparms) != NULL) 699 p1->p_ktrioparms->refs++; 700 } 701 mtx_unlock(&ktrace_mtx); 702 PROC_UNLOCK(p1); 703 704 ktrprocctor(p2); 705 } 706 707 /* 708 * When a thread returns, drain any asynchronous records generated by the 709 * system call. 710 */ 711 void 712 ktruserret(struct thread *td) 713 { 714 715 ktrace_enter(td); 716 sx_xlock(&ktrace_sx); 717 ktr_drain(td); 718 sx_xunlock(&ktrace_sx); 719 ktrace_exit(td); 720 } 721 722 void 723 ktrnamei(const char *path) 724 { 725 struct ktr_request *req; 726 int namelen; 727 char *buf = NULL; 728 729 namelen = strlen(path); 730 if (namelen > 0) { 731 buf = malloc(namelen, M_KTRACE, M_WAITOK); 732 bcopy(path, buf, namelen); 733 } 734 req = ktr_getrequest(KTR_NAMEI); 735 if (req == NULL) { 736 if (buf != NULL) 737 free(buf, M_KTRACE); 738 return; 739 } 740 if (namelen > 0) { 741 req->ktr_header.ktr_len = namelen; 742 req->ktr_buffer = buf; 743 } 744 ktr_submitrequest(curthread, req); 745 } 746 747 void 748 ktrsysctl(int *name, u_int namelen) 749 { 750 struct ktr_request *req; 751 u_int mib[CTL_MAXNAME + 2]; 752 char *mibname; 753 size_t mibnamelen; 754 int error; 755 756 /* Lookup name of mib. */ 757 KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long")); 758 mib[0] = 0; 759 mib[1] = 1; 760 bcopy(name, mib + 2, namelen * sizeof(*name)); 761 mibnamelen = 128; 762 mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK); 763 error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen, 764 NULL, 0, &mibnamelen, 0); 765 if (error) { 766 free(mibname, M_KTRACE); 767 return; 768 } 769 req = ktr_getrequest(KTR_SYSCTL); 770 if (req == NULL) { 771 free(mibname, M_KTRACE); 772 return; 773 } 774 req->ktr_header.ktr_len = mibnamelen; 775 req->ktr_buffer = mibname; 776 ktr_submitrequest(curthread, req); 777 } 778 779 void 780 ktrgenio(int fd, enum uio_rw rw, struct uio *uio, int error) 781 { 782 struct ktr_request *req; 783 struct ktr_genio *ktg; 784 int datalen; 785 char *buf; 786 787 if (error != 0 && (rw == UIO_READ || error == EFAULT)) { 788 freeuio(uio); 789 return; 790 } 791 uio->uio_offset = 0; 792 uio->uio_rw = UIO_WRITE; 793 datalen = MIN(uio->uio_resid, ktr_geniosize); 794 buf = malloc(datalen, M_KTRACE, M_WAITOK); 795 error = uiomove(buf, datalen, uio); 796 freeuio(uio); 797 if (error) { 798 free(buf, M_KTRACE); 799 return; 800 } 801 req = ktr_getrequest(KTR_GENIO); 802 if (req == NULL) { 803 free(buf, M_KTRACE); 804 return; 805 } 806 ktg = &req->ktr_data.ktr_genio; 807 ktg->ktr_fd = fd; 808 ktg->ktr_rw = rw; 809 req->ktr_header.ktr_len = datalen; 810 req->ktr_buffer = buf; 811 ktr_submitrequest(curthread, req); 812 } 813 814 void 815 ktrpsig(int sig, sig_t action, sigset_t *mask, int code) 816 { 817 struct thread *td = curthread; 818 struct ktr_request *req; 819 struct ktr_psig *kp; 820 821 req = ktr_getrequest(KTR_PSIG); 822 if (req == NULL) 823 return; 824 kp = &req->ktr_data.ktr_psig; 825 kp->signo = (char)sig; 826 kp->action = action; 827 kp->mask = *mask; 828 kp->code = code; 829 ktr_enqueuerequest(td, req); 830 ktrace_exit(td); 831 } 832 833 void 834 ktrcsw(int out, int user, const char *wmesg) 835 { 836 struct thread *td = curthread; 837 struct ktr_request *req; 838 struct ktr_csw *kc; 839 840 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 841 return; 842 843 req = ktr_getrequest(KTR_CSW); 844 if (req == NULL) 845 return; 846 kc = &req->ktr_data.ktr_csw; 847 kc->out = out; 848 kc->user = user; 849 if (wmesg != NULL) 850 strlcpy(kc->wmesg, wmesg, sizeof(kc->wmesg)); 851 else 852 bzero(kc->wmesg, sizeof(kc->wmesg)); 853 ktr_enqueuerequest(td, req); 854 ktrace_exit(td); 855 } 856 857 void 858 ktrstruct(const char *name, const void *data, size_t datalen) 859 { 860 struct ktr_request *req; 861 char *buf; 862 size_t buflen, namelen; 863 864 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 865 return; 866 867 if (data == NULL) 868 datalen = 0; 869 namelen = strlen(name) + 1; 870 buflen = namelen + datalen; 871 buf = malloc(buflen, M_KTRACE, M_WAITOK); 872 strcpy(buf, name); 873 bcopy(data, buf + namelen, datalen); 874 if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) { 875 free(buf, M_KTRACE); 876 return; 877 } 878 req->ktr_buffer = buf; 879 req->ktr_header.ktr_len = buflen; 880 ktr_submitrequest(curthread, req); 881 } 882 883 void 884 ktrstruct_error(const char *name, const void *data, size_t datalen, int error) 885 { 886 887 if (error == 0) 888 ktrstruct(name, data, datalen); 889 } 890 891 void 892 ktrstructarray(const char *name, enum uio_seg seg, const void *data, 893 int num_items, size_t struct_size) 894 { 895 struct ktr_request *req; 896 struct ktr_struct_array *ksa; 897 char *buf; 898 size_t buflen, datalen, namelen; 899 int max_items; 900 901 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 902 return; 903 if (num_items < 0) 904 return; 905 906 /* Trim array length to genio size. */ 907 max_items = ktr_geniosize / struct_size; 908 if (num_items > max_items) { 909 if (max_items == 0) 910 num_items = 1; 911 else 912 num_items = max_items; 913 } 914 datalen = num_items * struct_size; 915 916 if (data == NULL) 917 datalen = 0; 918 919 namelen = strlen(name) + 1; 920 buflen = namelen + datalen; 921 buf = malloc(buflen, M_KTRACE, M_WAITOK); 922 strcpy(buf, name); 923 if (seg == UIO_SYSSPACE) 924 bcopy(data, buf + namelen, datalen); 925 else { 926 if (copyin(data, buf + namelen, datalen) != 0) { 927 free(buf, M_KTRACE); 928 return; 929 } 930 } 931 if ((req = ktr_getrequest(KTR_STRUCT_ARRAY)) == NULL) { 932 free(buf, M_KTRACE); 933 return; 934 } 935 ksa = &req->ktr_data.ktr_struct_array; 936 ksa->struct_size = struct_size; 937 req->ktr_buffer = buf; 938 req->ktr_header.ktr_len = buflen; 939 ktr_submitrequest(curthread, req); 940 } 941 942 void 943 ktrcapfail(enum ktr_cap_violation type, const void *data) 944 { 945 struct thread *td = curthread; 946 struct ktr_request *req; 947 struct ktr_cap_fail *kcf; 948 union ktr_cap_data *kcd; 949 950 if (__predict_false(td->td_pflags & TDP_INKTRACE)) 951 return; 952 if (type != CAPFAIL_SYSCALL && 953 (td->td_sa.callp->sy_flags & SYF_CAPENABLED) == 0) 954 return; 955 956 req = ktr_getrequest(KTR_CAPFAIL); 957 if (req == NULL) 958 return; 959 kcf = &req->ktr_data.ktr_cap_fail; 960 kcf->cap_type = type; 961 kcf->cap_code = td->td_sa.code; 962 kcf->cap_svflags = td->td_proc->p_sysent->sv_flags; 963 if (data != NULL) { 964 kcd = &kcf->cap_data; 965 switch (type) { 966 case CAPFAIL_NOTCAPABLE: 967 case CAPFAIL_INCREASE: 968 kcd->cap_needed = *(const cap_rights_t *)data; 969 kcd->cap_held = *((const cap_rights_t *)data + 1); 970 break; 971 case CAPFAIL_SYSCALL: 972 case CAPFAIL_SIGNAL: 973 case CAPFAIL_PROTO: 974 kcd->cap_int = *(const int *)data; 975 break; 976 case CAPFAIL_SOCKADDR: 977 kcd->cap_sockaddr = *(const struct sockaddr *)data; 978 break; 979 case CAPFAIL_NAMEI: 980 strlcpy(kcd->cap_path, data, MAXPATHLEN); 981 break; 982 case CAPFAIL_CPUSET: 983 default: 984 break; 985 } 986 } 987 ktr_enqueuerequest(td, req); 988 ktrace_exit(td); 989 } 990 991 void 992 ktrfault(vm_offset_t vaddr, int type) 993 { 994 struct thread *td = curthread; 995 struct ktr_request *req; 996 struct ktr_fault *kf; 997 998 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 999 return; 1000 1001 req = ktr_getrequest(KTR_FAULT); 1002 if (req == NULL) 1003 return; 1004 kf = &req->ktr_data.ktr_fault; 1005 kf->vaddr = vaddr; 1006 kf->type = type; 1007 ktr_enqueuerequest(td, req); 1008 ktrace_exit(td); 1009 } 1010 1011 void 1012 ktrfaultend(int result) 1013 { 1014 struct thread *td = curthread; 1015 struct ktr_request *req; 1016 struct ktr_faultend *kf; 1017 1018 if (__predict_false(curthread->td_pflags & TDP_INKTRACE)) 1019 return; 1020 1021 req = ktr_getrequest(KTR_FAULTEND); 1022 if (req == NULL) 1023 return; 1024 kf = &req->ktr_data.ktr_faultend; 1025 kf->result = result; 1026 ktr_enqueuerequest(td, req); 1027 ktrace_exit(td); 1028 } 1029 #endif /* KTRACE */ 1030 1031 /* Interface and common routines */ 1032 1033 #ifndef _SYS_SYSPROTO_H_ 1034 struct ktrace_args { 1035 char *fname; 1036 int ops; 1037 int facs; 1038 int pid; 1039 }; 1040 #endif 1041 /* ARGSUSED */ 1042 int 1043 sys_ktrace(struct thread *td, struct ktrace_args *uap) 1044 { 1045 #ifdef KTRACE 1046 struct vnode *vp = NULL; 1047 struct proc *p; 1048 struct pgrp *pg; 1049 int facs = uap->facs & ~KTRFAC_ROOT; 1050 int ops = KTROP(uap->ops); 1051 int descend = uap->ops & KTRFLAG_DESCEND; 1052 int ret = 0; 1053 int flags, error = 0; 1054 struct nameidata nd; 1055 struct ktr_io_params *kiop, *old_kiop; 1056 1057 /* 1058 * Need something to (un)trace. 1059 */ 1060 if (ops != KTROP_CLEARFILE && facs == 0) 1061 return (EINVAL); 1062 1063 kiop = NULL; 1064 if (ops != KTROP_CLEAR) { 1065 /* 1066 * an operation which requires a file argument. 1067 */ 1068 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname); 1069 flags = FREAD | FWRITE | O_NOFOLLOW; 1070 error = vn_open(&nd, &flags, 0, NULL); 1071 if (error) 1072 return (error); 1073 NDFREE_PNBUF(&nd); 1074 vp = nd.ni_vp; 1075 VOP_UNLOCK(vp); 1076 if (vp->v_type != VREG) { 1077 (void)vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 1078 return (EACCES); 1079 } 1080 kiop = ktr_io_params_alloc(td, vp); 1081 } 1082 1083 /* 1084 * Clear all uses of the tracefile. 1085 */ 1086 ktrace_enter(td); 1087 if (ops == KTROP_CLEARFILE) { 1088 restart: 1089 sx_slock(&allproc_lock); 1090 FOREACH_PROC_IN_SYSTEM(p) { 1091 old_kiop = NULL; 1092 PROC_LOCK(p); 1093 if (p->p_ktrioparms != NULL && 1094 p->p_ktrioparms->vp == vp) { 1095 if (ktrcanset(td, p)) { 1096 mtx_lock(&ktrace_mtx); 1097 old_kiop = ktr_freeproc(p); 1098 mtx_unlock(&ktrace_mtx); 1099 } else 1100 error = EPERM; 1101 } 1102 PROC_UNLOCK(p); 1103 if (old_kiop != NULL) { 1104 sx_sunlock(&allproc_lock); 1105 ktr_io_params_free(old_kiop); 1106 goto restart; 1107 } 1108 } 1109 sx_sunlock(&allproc_lock); 1110 goto done; 1111 } 1112 /* 1113 * do it 1114 */ 1115 sx_slock(&proctree_lock); 1116 if (uap->pid < 0) { 1117 /* 1118 * by process group 1119 */ 1120 pg = pgfind(-uap->pid); 1121 if (pg == NULL) { 1122 sx_sunlock(&proctree_lock); 1123 error = ESRCH; 1124 goto done; 1125 } 1126 1127 /* 1128 * ktrops() may call vrele(). Lock pg_members 1129 * by the proctree_lock rather than pg_mtx. 1130 */ 1131 PGRP_UNLOCK(pg); 1132 if (LIST_EMPTY(&pg->pg_members)) { 1133 sx_sunlock(&proctree_lock); 1134 error = ESRCH; 1135 goto done; 1136 } 1137 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 1138 PROC_LOCK(p); 1139 if (descend) 1140 ret |= ktrsetchildren(td, p, ops, facs, kiop); 1141 else 1142 ret |= ktrops(td, p, ops, facs, kiop); 1143 } 1144 } else { 1145 /* 1146 * by pid 1147 */ 1148 p = pfind(uap->pid); 1149 if (p == NULL) { 1150 error = ESRCH; 1151 sx_sunlock(&proctree_lock); 1152 goto done; 1153 } 1154 if (descend) 1155 ret |= ktrsetchildren(td, p, ops, facs, kiop); 1156 else 1157 ret |= ktrops(td, p, ops, facs, kiop); 1158 } 1159 sx_sunlock(&proctree_lock); 1160 if (!ret) 1161 error = EPERM; 1162 done: 1163 if (kiop != NULL) { 1164 mtx_lock(&ktrace_mtx); 1165 kiop = ktr_io_params_rele(kiop); 1166 mtx_unlock(&ktrace_mtx); 1167 ktr_io_params_free(kiop); 1168 } 1169 ktrace_exit(td); 1170 return (error); 1171 #else /* !KTRACE */ 1172 return (ENOSYS); 1173 #endif /* KTRACE */ 1174 } 1175 1176 /* ARGSUSED */ 1177 int 1178 sys_utrace(struct thread *td, struct utrace_args *uap) 1179 { 1180 1181 #ifdef KTRACE 1182 struct ktr_request *req; 1183 void *cp; 1184 int error; 1185 1186 if (!KTRPOINT(td, KTR_USER)) 1187 return (0); 1188 if (uap->len > KTR_USER_MAXLEN) 1189 return (EINVAL); 1190 cp = malloc(uap->len, M_KTRACE, M_WAITOK); 1191 error = copyin(uap->addr, cp, uap->len); 1192 if (error) { 1193 free(cp, M_KTRACE); 1194 return (error); 1195 } 1196 req = ktr_getrequest(KTR_USER); 1197 if (req == NULL) { 1198 free(cp, M_KTRACE); 1199 return (ENOMEM); 1200 } 1201 req->ktr_buffer = cp; 1202 req->ktr_header.ktr_len = uap->len; 1203 ktr_submitrequest(td, req); 1204 return (0); 1205 #else /* !KTRACE */ 1206 return (ENOSYS); 1207 #endif /* KTRACE */ 1208 } 1209 1210 #ifdef KTRACE 1211 static int 1212 ktrops(struct thread *td, struct proc *p, int ops, int facs, 1213 struct ktr_io_params *new_kiop) 1214 { 1215 struct ktr_io_params *old_kiop; 1216 1217 PROC_LOCK_ASSERT(p, MA_OWNED); 1218 if (!ktrcanset(td, p)) { 1219 PROC_UNLOCK(p); 1220 return (0); 1221 } 1222 if ((ops == KTROP_SET && p->p_state == PRS_NEW) || 1223 p_cansee(td, p) != 0) { 1224 /* 1225 * Disallow setting trace points if the process is being born. 1226 * This avoids races with trace point inheritance in 1227 * ktrprocfork(). 1228 */ 1229 PROC_UNLOCK(p); 1230 return (0); 1231 } 1232 if ((p->p_flag & P_WEXIT) != 0) { 1233 /* 1234 * There's nothing to do if the process is exiting, but avoid 1235 * signaling an error. 1236 */ 1237 PROC_UNLOCK(p); 1238 return (1); 1239 } 1240 old_kiop = NULL; 1241 mtx_lock(&ktrace_mtx); 1242 if (ops == KTROP_SET) { 1243 if (p->p_ktrioparms != NULL && 1244 p->p_ktrioparms->vp != new_kiop->vp) { 1245 /* if trace file already in use, relinquish below */ 1246 old_kiop = ktr_io_params_rele(p->p_ktrioparms); 1247 p->p_ktrioparms = NULL; 1248 } 1249 if (p->p_ktrioparms == NULL) { 1250 p->p_ktrioparms = new_kiop; 1251 ktr_io_params_ref(new_kiop); 1252 } 1253 p->p_traceflag |= facs; 1254 if (priv_check(td, PRIV_KTRACE) == 0) 1255 p->p_traceflag |= KTRFAC_ROOT; 1256 } else { 1257 /* KTROP_CLEAR */ 1258 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) 1259 /* no more tracing */ 1260 old_kiop = ktr_freeproc(p); 1261 } 1262 mtx_unlock(&ktrace_mtx); 1263 if ((p->p_traceflag & KTRFAC_MASK) != 0) 1264 ktrprocctor_entered(td, p); 1265 PROC_UNLOCK(p); 1266 ktr_io_params_free(old_kiop); 1267 1268 return (1); 1269 } 1270 1271 static int 1272 ktrsetchildren(struct thread *td, struct proc *top, int ops, int facs, 1273 struct ktr_io_params *new_kiop) 1274 { 1275 struct proc *p; 1276 int ret = 0; 1277 1278 p = top; 1279 PROC_LOCK_ASSERT(p, MA_OWNED); 1280 sx_assert(&proctree_lock, SX_LOCKED); 1281 for (;;) { 1282 ret |= ktrops(td, p, ops, facs, new_kiop); 1283 /* 1284 * If this process has children, descend to them next, 1285 * otherwise do any siblings, and if done with this level, 1286 * follow back up the tree (but not past top). 1287 */ 1288 if (!LIST_EMPTY(&p->p_children)) 1289 p = LIST_FIRST(&p->p_children); 1290 else for (;;) { 1291 if (p == top) 1292 return (ret); 1293 if (LIST_NEXT(p, p_sibling)) { 1294 p = LIST_NEXT(p, p_sibling); 1295 break; 1296 } 1297 p = p->p_pptr; 1298 } 1299 PROC_LOCK(p); 1300 } 1301 /*NOTREACHED*/ 1302 } 1303 1304 static void 1305 ktr_writerequest(struct thread *td, struct ktr_request *req) 1306 { 1307 struct ktr_io_params *kiop, *kiop1; 1308 struct ktr_header *kth; 1309 struct vnode *vp; 1310 struct proc *p; 1311 struct ucred *cred; 1312 struct uio auio; 1313 struct iovec aiov[3]; 1314 struct mount *mp; 1315 off_t lim; 1316 int datalen, buflen; 1317 int error; 1318 1319 p = td->td_proc; 1320 1321 /* 1322 * We reference the kiop for use in I/O in case ktrace is 1323 * disabled on the process as we write out the request. 1324 */ 1325 mtx_lock(&ktrace_mtx); 1326 kiop = p->p_ktrioparms; 1327 1328 /* 1329 * If kiop is NULL, it has been cleared out from under this 1330 * request, so just drop it. 1331 */ 1332 if (kiop == NULL) { 1333 mtx_unlock(&ktrace_mtx); 1334 return; 1335 } 1336 1337 ktr_io_params_ref(kiop); 1338 vp = kiop->vp; 1339 cred = kiop->cr; 1340 lim = kiop->lim; 1341 1342 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL")); 1343 mtx_unlock(&ktrace_mtx); 1344 1345 kth = &req->ktr_header; 1346 KASSERT(((u_short)kth->ktr_type & ~KTR_TYPE) < nitems(data_lengths), 1347 ("data_lengths array overflow")); 1348 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_TYPE]; 1349 buflen = kth->ktr_len; 1350 auio.uio_iov = &aiov[0]; 1351 auio.uio_offset = 0; 1352 auio.uio_segflg = UIO_SYSSPACE; 1353 auio.uio_rw = UIO_WRITE; 1354 aiov[0].iov_base = (caddr_t)kth; 1355 aiov[0].iov_len = sizeof(struct ktr_header); 1356 auio.uio_resid = sizeof(struct ktr_header); 1357 auio.uio_iovcnt = 1; 1358 auio.uio_td = td; 1359 if (datalen != 0) { 1360 aiov[1].iov_base = (caddr_t)&req->ktr_data; 1361 aiov[1].iov_len = datalen; 1362 auio.uio_resid += datalen; 1363 auio.uio_iovcnt++; 1364 kth->ktr_len += datalen; 1365 } 1366 if (buflen != 0) { 1367 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write")); 1368 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer; 1369 aiov[auio.uio_iovcnt].iov_len = buflen; 1370 auio.uio_resid += buflen; 1371 auio.uio_iovcnt++; 1372 } 1373 1374 vn_start_write(vp, &mp, V_WAIT); 1375 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1376 td->td_ktr_io_lim = lim; 1377 #ifdef MAC 1378 error = mac_vnode_check_write(cred, NOCRED, vp); 1379 if (error == 0) 1380 #endif 1381 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred); 1382 VOP_UNLOCK(vp); 1383 vn_finished_write(mp); 1384 if (error == 0) { 1385 mtx_lock(&ktrace_mtx); 1386 kiop = ktr_io_params_rele(kiop); 1387 mtx_unlock(&ktrace_mtx); 1388 ktr_io_params_free(kiop); 1389 return; 1390 } 1391 1392 /* 1393 * If error encountered, give up tracing on this vnode on this 1394 * process. Other processes might still be suitable for 1395 * writes to this vnode. 1396 */ 1397 log(LOG_NOTICE, 1398 "ktrace write failed, errno %d, tracing stopped for pid %d\n", 1399 error, p->p_pid); 1400 1401 kiop1 = NULL; 1402 PROC_LOCK(p); 1403 mtx_lock(&ktrace_mtx); 1404 if (p->p_ktrioparms != NULL && p->p_ktrioparms->vp == vp) 1405 kiop1 = ktr_freeproc(p); 1406 kiop = ktr_io_params_rele(kiop); 1407 mtx_unlock(&ktrace_mtx); 1408 PROC_UNLOCK(p); 1409 ktr_io_params_free(kiop1); 1410 ktr_io_params_free(kiop); 1411 } 1412 1413 /* 1414 * Return true if caller has permission to set the ktracing state 1415 * of target. Essentially, the target can't possess any 1416 * more permissions than the caller. KTRFAC_ROOT signifies that 1417 * root previously set the tracing status on the target process, and 1418 * so, only root may further change it. 1419 */ 1420 static int 1421 ktrcanset(struct thread *td, struct proc *targetp) 1422 { 1423 1424 PROC_LOCK_ASSERT(targetp, MA_OWNED); 1425 if (targetp->p_traceflag & KTRFAC_ROOT && 1426 priv_check(td, PRIV_KTRACE)) 1427 return (0); 1428 1429 if (p_candebug(td, targetp) != 0) 1430 return (0); 1431 1432 return (1); 1433 } 1434 1435 #endif /* KTRACE */ 1436