1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. 4 * Copyright (c) 2005 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_ktrace.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/fcntl.h> 42 #include <sys/kernel.h> 43 #include <sys/kthread.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/malloc.h> 47 #include <sys/mount.h> 48 #include <sys/namei.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/unistd.h> 52 #include <sys/vnode.h> 53 #include <sys/socket.h> 54 #include <sys/stat.h> 55 #include <sys/ktrace.h> 56 #include <sys/sx.h> 57 #include <sys/sysctl.h> 58 #include <sys/sysent.h> 59 #include <sys/syslog.h> 60 #include <sys/sysproto.h> 61 62 #include <security/mac/mac_framework.h> 63 64 /* 65 * The ktrace facility allows the tracing of certain key events in user space 66 * processes, such as system calls, signal delivery, context switches, and 67 * user generated events using utrace(2). It works by streaming event 68 * records and data to a vnode associated with the process using the 69 * ktrace(2) system call. In general, records can be written directly from 70 * the context that generates the event. One important exception to this is 71 * during a context switch, where sleeping is not permitted. To handle this 72 * case, trace events are generated using in-kernel ktr_request records, and 73 * then delivered to disk at a convenient moment -- either immediately, the 74 * next traceable event, at system call return, or at process exit. 75 * 76 * When dealing with multiple threads or processes writing to the same event 77 * log, ordering guarantees are weak: specifically, if an event has multiple 78 * records (i.e., system call enter and return), they may be interlaced with 79 * records from another event. Process and thread ID information is provided 80 * in the record, and user applications can de-interlace events if required. 81 */ 82 83 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 84 85 #ifdef KTRACE 86 87 FEATURE(ktrace, "Kernel support for system-call tracing"); 88 89 #ifndef KTRACE_REQUEST_POOL 90 #define KTRACE_REQUEST_POOL 100 91 #endif 92 93 struct ktr_request { 94 struct ktr_header ktr_header; 95 void *ktr_buffer; 96 union { 97 struct ktr_proc_ctor ktr_proc_ctor; 98 struct ktr_cap_fail ktr_cap_fail; 99 struct ktr_syscall ktr_syscall; 100 struct ktr_sysret ktr_sysret; 101 struct ktr_genio ktr_genio; 102 struct ktr_psig ktr_psig; 103 struct ktr_csw ktr_csw; 104 } ktr_data; 105 STAILQ_ENTRY(ktr_request) ktr_list; 106 }; 107 108 static int data_lengths[] = { 109 0, /* none */ 110 offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */ 111 sizeof(struct ktr_sysret), /* KTR_SYSRET */ 112 0, /* KTR_NAMEI */ 113 sizeof(struct ktr_genio), /* KTR_GENIO */ 114 sizeof(struct ktr_psig), /* KTR_PSIG */ 115 sizeof(struct ktr_csw), /* KTR_CSW */ 116 0, /* KTR_USER */ 117 0, /* KTR_STRUCT */ 118 0, /* KTR_SYSCTL */ 119 sizeof(struct ktr_proc_ctor), /* KTR_PROCCTOR */ 120 0, /* KTR_PROCDTOR */ 121 sizeof(struct ktr_cap_fail), /* KTR_CAPFAIL */ 122 }; 123 124 static STAILQ_HEAD(, ktr_request) ktr_free; 125 126 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options"); 127 128 static u_int ktr_requestpool = KTRACE_REQUEST_POOL; 129 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool); 130 131 static u_int ktr_geniosize = PAGE_SIZE; 132 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize); 133 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize, 134 0, "Maximum size of genio event payload"); 135 136 static int print_message = 1; 137 static struct mtx ktrace_mtx; 138 static struct sx ktrace_sx; 139 140 static void ktrace_init(void *dummy); 141 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS); 142 static u_int ktrace_resize_pool(u_int oldsize, u_int newsize); 143 static struct ktr_request *ktr_getrequest_entered(struct thread *td, int type); 144 static struct ktr_request *ktr_getrequest(int type); 145 static void ktr_submitrequest(struct thread *td, struct ktr_request *req); 146 static void ktr_freeproc(struct proc *p, struct ucred **uc, 147 struct vnode **vp); 148 static void ktr_freerequest(struct ktr_request *req); 149 static void ktr_freerequest_locked(struct ktr_request *req); 150 static void ktr_writerequest(struct thread *td, struct ktr_request *req); 151 static int ktrcanset(struct thread *,struct proc *); 152 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *); 153 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *); 154 static void ktrprocctor_entered(struct thread *, struct proc *); 155 156 /* 157 * ktrace itself generates events, such as context switches, which we do not 158 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine 159 * whether or not it is in a region where tracing of events should be 160 * suppressed. 161 */ 162 static void 163 ktrace_enter(struct thread *td) 164 { 165 166 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set")); 167 td->td_pflags |= TDP_INKTRACE; 168 } 169 170 static void 171 ktrace_exit(struct thread *td) 172 { 173 174 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set")); 175 td->td_pflags &= ~TDP_INKTRACE; 176 } 177 178 static void 179 ktrace_assert(struct thread *td) 180 { 181 182 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set")); 183 } 184 185 static void 186 ktrace_init(void *dummy) 187 { 188 struct ktr_request *req; 189 int i; 190 191 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET); 192 sx_init(&ktrace_sx, "ktrace_sx"); 193 STAILQ_INIT(&ktr_free); 194 for (i = 0; i < ktr_requestpool; i++) { 195 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK); 196 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 197 } 198 } 199 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL); 200 201 static int 202 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS) 203 { 204 struct thread *td; 205 u_int newsize, oldsize, wantsize; 206 int error; 207 208 /* Handle easy read-only case first to avoid warnings from GCC. */ 209 if (!req->newptr) { 210 oldsize = ktr_requestpool; 211 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int))); 212 } 213 214 error = SYSCTL_IN(req, &wantsize, sizeof(u_int)); 215 if (error) 216 return (error); 217 td = curthread; 218 ktrace_enter(td); 219 oldsize = ktr_requestpool; 220 newsize = ktrace_resize_pool(oldsize, wantsize); 221 ktrace_exit(td); 222 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int)); 223 if (error) 224 return (error); 225 if (wantsize > oldsize && newsize < wantsize) 226 return (ENOSPC); 227 return (0); 228 } 229 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW, 230 &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", 231 "Pool buffer size for ktrace(1)"); 232 233 static u_int 234 ktrace_resize_pool(u_int oldsize, u_int newsize) 235 { 236 STAILQ_HEAD(, ktr_request) ktr_new; 237 struct ktr_request *req; 238 int bound; 239 240 print_message = 1; 241 bound = newsize - oldsize; 242 if (bound == 0) 243 return (ktr_requestpool); 244 if (bound < 0) { 245 mtx_lock(&ktrace_mtx); 246 /* Shrink pool down to newsize if possible. */ 247 while (bound++ < 0) { 248 req = STAILQ_FIRST(&ktr_free); 249 if (req == NULL) 250 break; 251 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 252 ktr_requestpool--; 253 free(req, M_KTRACE); 254 } 255 } else { 256 /* Grow pool up to newsize. */ 257 STAILQ_INIT(&ktr_new); 258 while (bound-- > 0) { 259 req = malloc(sizeof(struct ktr_request), M_KTRACE, 260 M_WAITOK); 261 STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list); 262 } 263 mtx_lock(&ktrace_mtx); 264 STAILQ_CONCAT(&ktr_free, &ktr_new); 265 ktr_requestpool += (newsize - oldsize); 266 } 267 mtx_unlock(&ktrace_mtx); 268 return (ktr_requestpool); 269 } 270 271 /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */ 272 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) == 273 (sizeof((struct thread *)NULL)->td_name)); 274 275 static struct ktr_request * 276 ktr_getrequest_entered(struct thread *td, int type) 277 { 278 struct ktr_request *req; 279 struct proc *p = td->td_proc; 280 int pm; 281 282 mtx_lock(&ktrace_mtx); 283 if (!KTRCHECK(td, type)) { 284 mtx_unlock(&ktrace_mtx); 285 return (NULL); 286 } 287 req = STAILQ_FIRST(&ktr_free); 288 if (req != NULL) { 289 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 290 req->ktr_header.ktr_type = type; 291 if (p->p_traceflag & KTRFAC_DROP) { 292 req->ktr_header.ktr_type |= KTR_DROP; 293 p->p_traceflag &= ~KTRFAC_DROP; 294 } 295 mtx_unlock(&ktrace_mtx); 296 microtime(&req->ktr_header.ktr_time); 297 req->ktr_header.ktr_pid = p->p_pid; 298 req->ktr_header.ktr_tid = td->td_tid; 299 bcopy(td->td_name, req->ktr_header.ktr_comm, 300 sizeof(req->ktr_header.ktr_comm)); 301 req->ktr_buffer = NULL; 302 req->ktr_header.ktr_len = 0; 303 } else { 304 p->p_traceflag |= KTRFAC_DROP; 305 pm = print_message; 306 print_message = 0; 307 mtx_unlock(&ktrace_mtx); 308 if (pm) 309 printf("Out of ktrace request objects.\n"); 310 } 311 return (req); 312 } 313 314 static struct ktr_request * 315 ktr_getrequest(int type) 316 { 317 struct thread *td = curthread; 318 struct ktr_request *req; 319 320 ktrace_enter(td); 321 req = ktr_getrequest_entered(td, type); 322 if (req == NULL) 323 ktrace_exit(td); 324 325 return (req); 326 } 327 328 /* 329 * Some trace generation environments don't permit direct access to VFS, 330 * such as during a context switch where sleeping is not allowed. Under these 331 * circumstances, queue a request to the thread to be written asynchronously 332 * later. 333 */ 334 static void 335 ktr_enqueuerequest(struct thread *td, struct ktr_request *req) 336 { 337 338 mtx_lock(&ktrace_mtx); 339 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list); 340 mtx_unlock(&ktrace_mtx); 341 } 342 343 /* 344 * Drain any pending ktrace records from the per-thread queue to disk. This 345 * is used both internally before committing other records, and also on 346 * system call return. We drain all the ones we can find at the time when 347 * drain is requested, but don't keep draining after that as those events 348 * may be approximately "after" the current event. 349 */ 350 static void 351 ktr_drain(struct thread *td) 352 { 353 struct ktr_request *queued_req; 354 STAILQ_HEAD(, ktr_request) local_queue; 355 356 ktrace_assert(td); 357 sx_assert(&ktrace_sx, SX_XLOCKED); 358 359 STAILQ_INIT(&local_queue); 360 361 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) { 362 mtx_lock(&ktrace_mtx); 363 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr); 364 mtx_unlock(&ktrace_mtx); 365 366 while ((queued_req = STAILQ_FIRST(&local_queue))) { 367 STAILQ_REMOVE_HEAD(&local_queue, ktr_list); 368 ktr_writerequest(td, queued_req); 369 ktr_freerequest(queued_req); 370 } 371 } 372 } 373 374 /* 375 * Submit a trace record for immediate commit to disk -- to be used only 376 * where entering VFS is OK. First drain any pending records that may have 377 * been cached in the thread. 378 */ 379 static void 380 ktr_submitrequest(struct thread *td, struct ktr_request *req) 381 { 382 383 ktrace_assert(td); 384 385 sx_xlock(&ktrace_sx); 386 ktr_drain(td); 387 ktr_writerequest(td, req); 388 ktr_freerequest(req); 389 sx_xunlock(&ktrace_sx); 390 ktrace_exit(td); 391 } 392 393 static void 394 ktr_freerequest(struct ktr_request *req) 395 { 396 397 mtx_lock(&ktrace_mtx); 398 ktr_freerequest_locked(req); 399 mtx_unlock(&ktrace_mtx); 400 } 401 402 static void 403 ktr_freerequest_locked(struct ktr_request *req) 404 { 405 406 mtx_assert(&ktrace_mtx, MA_OWNED); 407 if (req->ktr_buffer != NULL) 408 free(req->ktr_buffer, M_KTRACE); 409 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 410 } 411 412 /* 413 * Disable tracing for a process and release all associated resources. 414 * The caller is responsible for releasing a reference on the returned 415 * vnode and credentials. 416 */ 417 static void 418 ktr_freeproc(struct proc *p, struct ucred **uc, struct vnode **vp) 419 { 420 struct ktr_request *req; 421 422 PROC_LOCK_ASSERT(p, MA_OWNED); 423 mtx_assert(&ktrace_mtx, MA_OWNED); 424 *uc = p->p_tracecred; 425 p->p_tracecred = NULL; 426 if (vp != NULL) 427 *vp = p->p_tracevp; 428 p->p_tracevp = NULL; 429 p->p_traceflag = 0; 430 while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) { 431 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list); 432 ktr_freerequest_locked(req); 433 } 434 } 435 436 void 437 ktrsyscall(code, narg, args) 438 int code, narg; 439 register_t args[]; 440 { 441 struct ktr_request *req; 442 struct ktr_syscall *ktp; 443 size_t buflen; 444 char *buf = NULL; 445 446 buflen = sizeof(register_t) * narg; 447 if (buflen > 0) { 448 buf = malloc(buflen, M_KTRACE, M_WAITOK); 449 bcopy(args, buf, buflen); 450 } 451 req = ktr_getrequest(KTR_SYSCALL); 452 if (req == NULL) { 453 if (buf != NULL) 454 free(buf, M_KTRACE); 455 return; 456 } 457 ktp = &req->ktr_data.ktr_syscall; 458 ktp->ktr_code = code; 459 ktp->ktr_narg = narg; 460 if (buflen > 0) { 461 req->ktr_header.ktr_len = buflen; 462 req->ktr_buffer = buf; 463 } 464 ktr_submitrequest(curthread, req); 465 } 466 467 void 468 ktrsysret(code, error, retval) 469 int code, error; 470 register_t retval; 471 { 472 struct ktr_request *req; 473 struct ktr_sysret *ktp; 474 475 req = ktr_getrequest(KTR_SYSRET); 476 if (req == NULL) 477 return; 478 ktp = &req->ktr_data.ktr_sysret; 479 ktp->ktr_code = code; 480 ktp->ktr_error = error; 481 ktp->ktr_retval = ((error == 0) ? retval: 0); /* what about val2 ? */ 482 ktr_submitrequest(curthread, req); 483 } 484 485 /* 486 * When a setuid process execs, disable tracing. 487 * 488 * XXX: We toss any pending asynchronous records. 489 */ 490 void 491 ktrprocexec(struct proc *p, struct ucred **uc, struct vnode **vp) 492 { 493 494 PROC_LOCK_ASSERT(p, MA_OWNED); 495 mtx_lock(&ktrace_mtx); 496 ktr_freeproc(p, uc, vp); 497 mtx_unlock(&ktrace_mtx); 498 } 499 500 /* 501 * When a process exits, drain per-process asynchronous trace records 502 * and disable tracing. 503 */ 504 void 505 ktrprocexit(struct thread *td) 506 { 507 struct ktr_request *req; 508 struct proc *p; 509 struct ucred *cred; 510 struct vnode *vp; 511 int vfslocked; 512 513 p = td->td_proc; 514 if (p->p_traceflag == 0) 515 return; 516 517 ktrace_enter(td); 518 req = ktr_getrequest_entered(td, KTR_PROCDTOR); 519 if (req != NULL) 520 ktr_enqueuerequest(td, req); 521 sx_xlock(&ktrace_sx); 522 ktr_drain(td); 523 sx_xunlock(&ktrace_sx); 524 PROC_LOCK(p); 525 mtx_lock(&ktrace_mtx); 526 ktr_freeproc(p, &cred, &vp); 527 mtx_unlock(&ktrace_mtx); 528 PROC_UNLOCK(p); 529 if (vp != NULL) { 530 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 531 vrele(vp); 532 VFS_UNLOCK_GIANT(vfslocked); 533 } 534 if (cred != NULL) 535 crfree(cred); 536 ktrace_exit(td); 537 } 538 539 static void 540 ktrprocctor_entered(struct thread *td, struct proc *p) 541 { 542 struct ktr_proc_ctor *ktp; 543 struct ktr_request *req; 544 struct thread *td2; 545 546 ktrace_assert(td); 547 td2 = FIRST_THREAD_IN_PROC(p); 548 req = ktr_getrequest_entered(td2, KTR_PROCCTOR); 549 if (req == NULL) 550 return; 551 ktp = &req->ktr_data.ktr_proc_ctor; 552 ktp->sv_flags = p->p_sysent->sv_flags; 553 ktr_enqueuerequest(td2, req); 554 } 555 556 void 557 ktrprocctor(struct proc *p) 558 { 559 struct thread *td = curthread; 560 561 if ((p->p_traceflag & KTRFAC_MASK) == 0) 562 return; 563 564 ktrace_enter(td); 565 ktrprocctor_entered(td, p); 566 ktrace_exit(td); 567 } 568 569 /* 570 * When a process forks, enable tracing in the new process if needed. 571 */ 572 void 573 ktrprocfork(struct proc *p1, struct proc *p2) 574 { 575 576 PROC_LOCK(p1); 577 mtx_lock(&ktrace_mtx); 578 KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode")); 579 if (p1->p_traceflag & KTRFAC_INHERIT) { 580 p2->p_traceflag = p1->p_traceflag; 581 if ((p2->p_tracevp = p1->p_tracevp) != NULL) { 582 VREF(p2->p_tracevp); 583 KASSERT(p1->p_tracecred != NULL, 584 ("ktrace vnode with no cred")); 585 p2->p_tracecred = crhold(p1->p_tracecred); 586 } 587 } 588 mtx_unlock(&ktrace_mtx); 589 PROC_UNLOCK(p1); 590 591 ktrprocctor(p2); 592 } 593 594 /* 595 * When a thread returns, drain any asynchronous records generated by the 596 * system call. 597 */ 598 void 599 ktruserret(struct thread *td) 600 { 601 602 ktrace_enter(td); 603 sx_xlock(&ktrace_sx); 604 ktr_drain(td); 605 sx_xunlock(&ktrace_sx); 606 ktrace_exit(td); 607 } 608 609 void 610 ktrnamei(path) 611 char *path; 612 { 613 struct ktr_request *req; 614 int namelen; 615 char *buf = NULL; 616 617 namelen = strlen(path); 618 if (namelen > 0) { 619 buf = malloc(namelen, M_KTRACE, M_WAITOK); 620 bcopy(path, buf, namelen); 621 } 622 req = ktr_getrequest(KTR_NAMEI); 623 if (req == NULL) { 624 if (buf != NULL) 625 free(buf, M_KTRACE); 626 return; 627 } 628 if (namelen > 0) { 629 req->ktr_header.ktr_len = namelen; 630 req->ktr_buffer = buf; 631 } 632 ktr_submitrequest(curthread, req); 633 } 634 635 void 636 ktrsysctl(name, namelen) 637 int *name; 638 u_int namelen; 639 { 640 struct ktr_request *req; 641 u_int mib[CTL_MAXNAME + 2]; 642 char *mibname; 643 size_t mibnamelen; 644 int error; 645 646 /* Lookup name of mib. */ 647 KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long")); 648 mib[0] = 0; 649 mib[1] = 1; 650 bcopy(name, mib + 2, namelen * sizeof(*name)); 651 mibnamelen = 128; 652 mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK); 653 error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen, 654 NULL, 0, &mibnamelen, 0); 655 if (error) { 656 free(mibname, M_KTRACE); 657 return; 658 } 659 req = ktr_getrequest(KTR_SYSCTL); 660 if (req == NULL) { 661 free(mibname, M_KTRACE); 662 return; 663 } 664 req->ktr_header.ktr_len = mibnamelen; 665 req->ktr_buffer = mibname; 666 ktr_submitrequest(curthread, req); 667 } 668 669 void 670 ktrgenio(fd, rw, uio, error) 671 int fd; 672 enum uio_rw rw; 673 struct uio *uio; 674 int error; 675 { 676 struct ktr_request *req; 677 struct ktr_genio *ktg; 678 int datalen; 679 char *buf; 680 681 if (error) { 682 free(uio, M_IOV); 683 return; 684 } 685 uio->uio_offset = 0; 686 uio->uio_rw = UIO_WRITE; 687 datalen = imin(uio->uio_resid, ktr_geniosize); 688 buf = malloc(datalen, M_KTRACE, M_WAITOK); 689 error = uiomove(buf, datalen, uio); 690 free(uio, M_IOV); 691 if (error) { 692 free(buf, M_KTRACE); 693 return; 694 } 695 req = ktr_getrequest(KTR_GENIO); 696 if (req == NULL) { 697 free(buf, M_KTRACE); 698 return; 699 } 700 ktg = &req->ktr_data.ktr_genio; 701 ktg->ktr_fd = fd; 702 ktg->ktr_rw = rw; 703 req->ktr_header.ktr_len = datalen; 704 req->ktr_buffer = buf; 705 ktr_submitrequest(curthread, req); 706 } 707 708 void 709 ktrpsig(sig, action, mask, code) 710 int sig; 711 sig_t action; 712 sigset_t *mask; 713 int code; 714 { 715 struct thread *td = curthread; 716 struct ktr_request *req; 717 struct ktr_psig *kp; 718 719 req = ktr_getrequest(KTR_PSIG); 720 if (req == NULL) 721 return; 722 kp = &req->ktr_data.ktr_psig; 723 kp->signo = (char)sig; 724 kp->action = action; 725 kp->mask = *mask; 726 kp->code = code; 727 ktr_enqueuerequest(td, req); 728 ktrace_exit(td); 729 } 730 731 void 732 ktrcsw(out, user) 733 int out, user; 734 { 735 struct thread *td = curthread; 736 struct ktr_request *req; 737 struct ktr_csw *kc; 738 739 req = ktr_getrequest(KTR_CSW); 740 if (req == NULL) 741 return; 742 kc = &req->ktr_data.ktr_csw; 743 kc->out = out; 744 kc->user = user; 745 ktr_enqueuerequest(td, req); 746 ktrace_exit(td); 747 } 748 749 void 750 ktrstruct(name, data, datalen) 751 const char *name; 752 void *data; 753 size_t datalen; 754 { 755 struct ktr_request *req; 756 char *buf = NULL; 757 size_t buflen; 758 759 if (!data) 760 datalen = 0; 761 buflen = strlen(name) + 1 + datalen; 762 buf = malloc(buflen, M_KTRACE, M_WAITOK); 763 strcpy(buf, name); 764 bcopy(data, buf + strlen(name) + 1, datalen); 765 if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) { 766 free(buf, M_KTRACE); 767 return; 768 } 769 req->ktr_buffer = buf; 770 req->ktr_header.ktr_len = buflen; 771 ktr_submitrequest(curthread, req); 772 } 773 774 void 775 ktrcapfail(type, needed, held) 776 enum ktr_cap_fail_type type; 777 cap_rights_t needed; 778 cap_rights_t held; 779 { 780 struct thread *td = curthread; 781 struct ktr_request *req; 782 struct ktr_cap_fail *kcf; 783 784 req = ktr_getrequest(KTR_CAPFAIL); 785 if (req == NULL) 786 return; 787 kcf = &req->ktr_data.ktr_cap_fail; 788 kcf->cap_type = type; 789 kcf->cap_needed = needed; 790 kcf->cap_held = held; 791 ktr_enqueuerequest(td, req); 792 ktrace_exit(td); 793 } 794 #endif /* KTRACE */ 795 796 /* Interface and common routines */ 797 798 #ifndef _SYS_SYSPROTO_H_ 799 struct ktrace_args { 800 char *fname; 801 int ops; 802 int facs; 803 int pid; 804 }; 805 #endif 806 /* ARGSUSED */ 807 int 808 sys_ktrace(td, uap) 809 struct thread *td; 810 register struct ktrace_args *uap; 811 { 812 #ifdef KTRACE 813 register struct vnode *vp = NULL; 814 register struct proc *p; 815 struct pgrp *pg; 816 int facs = uap->facs & ~KTRFAC_ROOT; 817 int ops = KTROP(uap->ops); 818 int descend = uap->ops & KTRFLAG_DESCEND; 819 int nfound, ret = 0; 820 int flags, error = 0, vfslocked; 821 struct nameidata nd; 822 struct ucred *cred; 823 824 /* 825 * Need something to (un)trace. 826 */ 827 if (ops != KTROP_CLEARFILE && facs == 0) 828 return (EINVAL); 829 830 ktrace_enter(td); 831 if (ops != KTROP_CLEAR) { 832 /* 833 * an operation which requires a file argument. 834 */ 835 NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_USERSPACE, 836 uap->fname, td); 837 flags = FREAD | FWRITE | O_NOFOLLOW; 838 error = vn_open(&nd, &flags, 0, NULL); 839 if (error) { 840 ktrace_exit(td); 841 return (error); 842 } 843 vfslocked = NDHASGIANT(&nd); 844 NDFREE(&nd, NDF_ONLY_PNBUF); 845 vp = nd.ni_vp; 846 VOP_UNLOCK(vp, 0); 847 if (vp->v_type != VREG) { 848 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 849 VFS_UNLOCK_GIANT(vfslocked); 850 ktrace_exit(td); 851 return (EACCES); 852 } 853 VFS_UNLOCK_GIANT(vfslocked); 854 } 855 /* 856 * Clear all uses of the tracefile. 857 */ 858 if (ops == KTROP_CLEARFILE) { 859 int vrele_count; 860 861 vrele_count = 0; 862 sx_slock(&allproc_lock); 863 FOREACH_PROC_IN_SYSTEM(p) { 864 PROC_LOCK(p); 865 if (p->p_tracevp == vp) { 866 if (ktrcanset(td, p)) { 867 mtx_lock(&ktrace_mtx); 868 ktr_freeproc(p, &cred, NULL); 869 mtx_unlock(&ktrace_mtx); 870 vrele_count++; 871 crfree(cred); 872 } else 873 error = EPERM; 874 } 875 PROC_UNLOCK(p); 876 } 877 sx_sunlock(&allproc_lock); 878 if (vrele_count > 0) { 879 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 880 while (vrele_count-- > 0) 881 vrele(vp); 882 VFS_UNLOCK_GIANT(vfslocked); 883 } 884 goto done; 885 } 886 /* 887 * do it 888 */ 889 sx_slock(&proctree_lock); 890 if (uap->pid < 0) { 891 /* 892 * by process group 893 */ 894 pg = pgfind(-uap->pid); 895 if (pg == NULL) { 896 sx_sunlock(&proctree_lock); 897 error = ESRCH; 898 goto done; 899 } 900 /* 901 * ktrops() may call vrele(). Lock pg_members 902 * by the proctree_lock rather than pg_mtx. 903 */ 904 PGRP_UNLOCK(pg); 905 nfound = 0; 906 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 907 PROC_LOCK(p); 908 if (p->p_state == PRS_NEW || 909 p_cansee(td, p) != 0) { 910 PROC_UNLOCK(p); 911 continue; 912 } 913 nfound++; 914 if (descend) 915 ret |= ktrsetchildren(td, p, ops, facs, vp); 916 else 917 ret |= ktrops(td, p, ops, facs, vp); 918 } 919 if (nfound == 0) { 920 sx_sunlock(&proctree_lock); 921 error = ESRCH; 922 goto done; 923 } 924 } else { 925 /* 926 * by pid 927 */ 928 p = pfind(uap->pid); 929 if (p == NULL) 930 error = ESRCH; 931 else 932 error = p_cansee(td, p); 933 if (error) { 934 if (p != NULL) 935 PROC_UNLOCK(p); 936 sx_sunlock(&proctree_lock); 937 goto done; 938 } 939 if (descend) 940 ret |= ktrsetchildren(td, p, ops, facs, vp); 941 else 942 ret |= ktrops(td, p, ops, facs, vp); 943 } 944 sx_sunlock(&proctree_lock); 945 if (!ret) 946 error = EPERM; 947 done: 948 if (vp != NULL) { 949 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 950 (void) vn_close(vp, FWRITE, td->td_ucred, td); 951 VFS_UNLOCK_GIANT(vfslocked); 952 } 953 ktrace_exit(td); 954 return (error); 955 #else /* !KTRACE */ 956 return (ENOSYS); 957 #endif /* KTRACE */ 958 } 959 960 /* ARGSUSED */ 961 int 962 sys_utrace(td, uap) 963 struct thread *td; 964 register struct utrace_args *uap; 965 { 966 967 #ifdef KTRACE 968 struct ktr_request *req; 969 void *cp; 970 int error; 971 972 if (!KTRPOINT(td, KTR_USER)) 973 return (0); 974 if (uap->len > KTR_USER_MAXLEN) 975 return (EINVAL); 976 cp = malloc(uap->len, M_KTRACE, M_WAITOK); 977 error = copyin(uap->addr, cp, uap->len); 978 if (error) { 979 free(cp, M_KTRACE); 980 return (error); 981 } 982 req = ktr_getrequest(KTR_USER); 983 if (req == NULL) { 984 free(cp, M_KTRACE); 985 return (ENOMEM); 986 } 987 req->ktr_buffer = cp; 988 req->ktr_header.ktr_len = uap->len; 989 ktr_submitrequest(td, req); 990 return (0); 991 #else /* !KTRACE */ 992 return (ENOSYS); 993 #endif /* KTRACE */ 994 } 995 996 #ifdef KTRACE 997 static int 998 ktrops(td, p, ops, facs, vp) 999 struct thread *td; 1000 struct proc *p; 1001 int ops, facs; 1002 struct vnode *vp; 1003 { 1004 struct vnode *tracevp = NULL; 1005 struct ucred *tracecred = NULL; 1006 1007 PROC_LOCK_ASSERT(p, MA_OWNED); 1008 if (!ktrcanset(td, p)) { 1009 PROC_UNLOCK(p); 1010 return (0); 1011 } 1012 if (p->p_flag & P_WEXIT) { 1013 /* If the process is exiting, just ignore it. */ 1014 PROC_UNLOCK(p); 1015 return (1); 1016 } 1017 mtx_lock(&ktrace_mtx); 1018 if (ops == KTROP_SET) { 1019 if (p->p_tracevp != vp) { 1020 /* 1021 * if trace file already in use, relinquish below 1022 */ 1023 tracevp = p->p_tracevp; 1024 VREF(vp); 1025 p->p_tracevp = vp; 1026 } 1027 if (p->p_tracecred != td->td_ucred) { 1028 tracecred = p->p_tracecred; 1029 p->p_tracecred = crhold(td->td_ucred); 1030 } 1031 p->p_traceflag |= facs; 1032 if (priv_check(td, PRIV_KTRACE) == 0) 1033 p->p_traceflag |= KTRFAC_ROOT; 1034 } else { 1035 /* KTROP_CLEAR */ 1036 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) 1037 /* no more tracing */ 1038 ktr_freeproc(p, &tracecred, &tracevp); 1039 } 1040 mtx_unlock(&ktrace_mtx); 1041 if ((p->p_traceflag & KTRFAC_MASK) != 0) 1042 ktrprocctor_entered(td, p); 1043 PROC_UNLOCK(p); 1044 if (tracevp != NULL) { 1045 int vfslocked; 1046 1047 vfslocked = VFS_LOCK_GIANT(tracevp->v_mount); 1048 vrele(tracevp); 1049 VFS_UNLOCK_GIANT(vfslocked); 1050 } 1051 if (tracecred != NULL) 1052 crfree(tracecred); 1053 1054 return (1); 1055 } 1056 1057 static int 1058 ktrsetchildren(td, top, ops, facs, vp) 1059 struct thread *td; 1060 struct proc *top; 1061 int ops, facs; 1062 struct vnode *vp; 1063 { 1064 register struct proc *p; 1065 register int ret = 0; 1066 1067 p = top; 1068 PROC_LOCK_ASSERT(p, MA_OWNED); 1069 sx_assert(&proctree_lock, SX_LOCKED); 1070 for (;;) { 1071 ret |= ktrops(td, p, ops, facs, vp); 1072 /* 1073 * If this process has children, descend to them next, 1074 * otherwise do any siblings, and if done with this level, 1075 * follow back up the tree (but not past top). 1076 */ 1077 if (!LIST_EMPTY(&p->p_children)) 1078 p = LIST_FIRST(&p->p_children); 1079 else for (;;) { 1080 if (p == top) 1081 return (ret); 1082 if (LIST_NEXT(p, p_sibling)) { 1083 p = LIST_NEXT(p, p_sibling); 1084 break; 1085 } 1086 p = p->p_pptr; 1087 } 1088 PROC_LOCK(p); 1089 } 1090 /*NOTREACHED*/ 1091 } 1092 1093 static void 1094 ktr_writerequest(struct thread *td, struct ktr_request *req) 1095 { 1096 struct ktr_header *kth; 1097 struct vnode *vp; 1098 struct proc *p; 1099 struct ucred *cred; 1100 struct uio auio; 1101 struct iovec aiov[3]; 1102 struct mount *mp; 1103 int datalen, buflen, vrele_count; 1104 int error, vfslocked; 1105 1106 /* 1107 * We hold the vnode and credential for use in I/O in case ktrace is 1108 * disabled on the process as we write out the request. 1109 * 1110 * XXXRW: This is not ideal: we could end up performing a write after 1111 * the vnode has been closed. 1112 */ 1113 mtx_lock(&ktrace_mtx); 1114 vp = td->td_proc->p_tracevp; 1115 cred = td->td_proc->p_tracecred; 1116 1117 /* 1118 * If vp is NULL, the vp has been cleared out from under this 1119 * request, so just drop it. Make sure the credential and vnode are 1120 * in sync: we should have both or neither. 1121 */ 1122 if (vp == NULL) { 1123 KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL")); 1124 mtx_unlock(&ktrace_mtx); 1125 return; 1126 } 1127 VREF(vp); 1128 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL")); 1129 crhold(cred); 1130 mtx_unlock(&ktrace_mtx); 1131 1132 kth = &req->ktr_header; 1133 KASSERT(((u_short)kth->ktr_type & ~KTR_DROP) < 1134 sizeof(data_lengths) / sizeof(data_lengths[0]), 1135 ("data_lengths array overflow")); 1136 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP]; 1137 buflen = kth->ktr_len; 1138 auio.uio_iov = &aiov[0]; 1139 auio.uio_offset = 0; 1140 auio.uio_segflg = UIO_SYSSPACE; 1141 auio.uio_rw = UIO_WRITE; 1142 aiov[0].iov_base = (caddr_t)kth; 1143 aiov[0].iov_len = sizeof(struct ktr_header); 1144 auio.uio_resid = sizeof(struct ktr_header); 1145 auio.uio_iovcnt = 1; 1146 auio.uio_td = td; 1147 if (datalen != 0) { 1148 aiov[1].iov_base = (caddr_t)&req->ktr_data; 1149 aiov[1].iov_len = datalen; 1150 auio.uio_resid += datalen; 1151 auio.uio_iovcnt++; 1152 kth->ktr_len += datalen; 1153 } 1154 if (buflen != 0) { 1155 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write")); 1156 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer; 1157 aiov[auio.uio_iovcnt].iov_len = buflen; 1158 auio.uio_resid += buflen; 1159 auio.uio_iovcnt++; 1160 } 1161 1162 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1163 vn_start_write(vp, &mp, V_WAIT); 1164 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1165 #ifdef MAC 1166 error = mac_vnode_check_write(cred, NOCRED, vp); 1167 if (error == 0) 1168 #endif 1169 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred); 1170 VOP_UNLOCK(vp, 0); 1171 vn_finished_write(mp); 1172 crfree(cred); 1173 if (!error) { 1174 vrele(vp); 1175 VFS_UNLOCK_GIANT(vfslocked); 1176 return; 1177 } 1178 VFS_UNLOCK_GIANT(vfslocked); 1179 1180 /* 1181 * If error encountered, give up tracing on this vnode. We defer 1182 * all the vrele()'s on the vnode until after we are finished walking 1183 * the various lists to avoid needlessly holding locks. 1184 * NB: at this point we still hold the vnode reference that must 1185 * not go away as we need the valid vnode to compare with. Thus let 1186 * vrele_count start at 1 and the reference will be freed 1187 * by the loop at the end after our last use of vp. 1188 */ 1189 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", 1190 error); 1191 vrele_count = 1; 1192 /* 1193 * First, clear this vnode from being used by any processes in the 1194 * system. 1195 * XXX - If one process gets an EPERM writing to the vnode, should 1196 * we really do this? Other processes might have suitable 1197 * credentials for the operation. 1198 */ 1199 cred = NULL; 1200 sx_slock(&allproc_lock); 1201 FOREACH_PROC_IN_SYSTEM(p) { 1202 PROC_LOCK(p); 1203 if (p->p_tracevp == vp) { 1204 mtx_lock(&ktrace_mtx); 1205 ktr_freeproc(p, &cred, NULL); 1206 mtx_unlock(&ktrace_mtx); 1207 vrele_count++; 1208 } 1209 PROC_UNLOCK(p); 1210 if (cred != NULL) { 1211 crfree(cred); 1212 cred = NULL; 1213 } 1214 } 1215 sx_sunlock(&allproc_lock); 1216 1217 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1218 while (vrele_count-- > 0) 1219 vrele(vp); 1220 VFS_UNLOCK_GIANT(vfslocked); 1221 } 1222 1223 /* 1224 * Return true if caller has permission to set the ktracing state 1225 * of target. Essentially, the target can't possess any 1226 * more permissions than the caller. KTRFAC_ROOT signifies that 1227 * root previously set the tracing status on the target process, and 1228 * so, only root may further change it. 1229 */ 1230 static int 1231 ktrcanset(td, targetp) 1232 struct thread *td; 1233 struct proc *targetp; 1234 { 1235 1236 PROC_LOCK_ASSERT(targetp, MA_OWNED); 1237 if (targetp->p_traceflag & KTRFAC_ROOT && 1238 priv_check(td, PRIV_KTRACE)) 1239 return (0); 1240 1241 if (p_candebug(td, targetp) != 0) 1242 return (0); 1243 1244 return (1); 1245 } 1246 1247 #endif /* KTRACE */ 1248