1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_ktrace.h" 40 #include "opt_mac.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/fcntl.h> 45 #include <sys/jail.h> 46 #include <sys/kernel.h> 47 #include <sys/kthread.h> 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/mac.h> 51 #include <sys/malloc.h> 52 #include <sys/namei.h> 53 #include <sys/proc.h> 54 #include <sys/unistd.h> 55 #include <sys/vnode.h> 56 #include <sys/ktrace.h> 57 #include <sys/sema.h> 58 #include <sys/sx.h> 59 #include <sys/sysctl.h> 60 #include <sys/syslog.h> 61 #include <sys/sysproto.h> 62 63 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 64 65 #ifdef KTRACE 66 67 #ifndef KTRACE_REQUEST_POOL 68 #define KTRACE_REQUEST_POOL 100 69 #endif 70 71 struct ktr_request { 72 struct ktr_header ktr_header; 73 struct ucred *ktr_cred; 74 struct vnode *ktr_vp; 75 union { 76 struct ktr_syscall ktr_syscall; 77 struct ktr_sysret ktr_sysret; 78 struct ktr_genio ktr_genio; 79 struct ktr_psig ktr_psig; 80 struct ktr_csw ktr_csw; 81 } ktr_data; 82 STAILQ_ENTRY(ktr_request) ktr_list; 83 }; 84 85 static int data_lengths[] = { 86 0, /* none */ 87 offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */ 88 sizeof(struct ktr_sysret), /* KTR_SYSRET */ 89 0, /* KTR_NAMEI */ 90 sizeof(struct ktr_genio), /* KTR_GENIO */ 91 sizeof(struct ktr_psig), /* KTR_PSIG */ 92 sizeof(struct ktr_csw), /* KTR_CSW */ 93 0 /* KTR_USER */ 94 }; 95 96 static STAILQ_HEAD(, ktr_request) ktr_todo; 97 static STAILQ_HEAD(, ktr_request) ktr_free; 98 99 SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options"); 100 101 static u_int ktr_requestpool = KTRACE_REQUEST_POOL; 102 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool); 103 104 static u_int ktr_geniosize = PAGE_SIZE; 105 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize); 106 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize, 107 0, "Maximum size of genio event payload"); 108 109 static int print_message = 1; 110 struct mtx ktrace_mtx; 111 static struct sema ktrace_sema; 112 113 static void ktrace_init(void *dummy); 114 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS); 115 static u_int ktrace_resize_pool(u_int newsize); 116 static struct ktr_request *ktr_getrequest(int type); 117 static void ktr_submitrequest(struct ktr_request *req); 118 static void ktr_freerequest(struct ktr_request *req); 119 static void ktr_loop(void *dummy); 120 static void ktr_writerequest(struct ktr_request *req); 121 static int ktrcanset(struct thread *,struct proc *); 122 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *); 123 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *); 124 125 static void 126 ktrace_init(void *dummy) 127 { 128 struct ktr_request *req; 129 int i; 130 131 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET); 132 sema_init(&ktrace_sema, 0, "ktrace"); 133 STAILQ_INIT(&ktr_todo); 134 STAILQ_INIT(&ktr_free); 135 for (i = 0; i < ktr_requestpool; i++) { 136 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK); 137 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 138 } 139 kthread_create(ktr_loop, NULL, NULL, RFHIGHPID, 0, "ktrace"); 140 } 141 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL); 142 143 static int 144 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS) 145 { 146 struct thread *td; 147 u_int newsize, oldsize, wantsize; 148 int error; 149 150 /* Handle easy read-only case first to avoid warnings from GCC. */ 151 if (!req->newptr) { 152 mtx_lock(&ktrace_mtx); 153 oldsize = ktr_requestpool; 154 mtx_unlock(&ktrace_mtx); 155 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int))); 156 } 157 158 error = SYSCTL_IN(req, &wantsize, sizeof(u_int)); 159 if (error) 160 return (error); 161 td = curthread; 162 td->td_pflags |= TDP_INKTRACE; 163 mtx_lock(&ktrace_mtx); 164 oldsize = ktr_requestpool; 165 newsize = ktrace_resize_pool(wantsize); 166 mtx_unlock(&ktrace_mtx); 167 td->td_pflags &= ~TDP_INKTRACE; 168 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int)); 169 if (error) 170 return (error); 171 if (newsize != wantsize) 172 return (ENOSPC); 173 return (0); 174 } 175 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW, 176 &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", ""); 177 178 static u_int 179 ktrace_resize_pool(u_int newsize) 180 { 181 struct ktr_request *req; 182 183 mtx_assert(&ktrace_mtx, MA_OWNED); 184 print_message = 1; 185 if (newsize == ktr_requestpool) 186 return (newsize); 187 if (newsize < ktr_requestpool) 188 /* Shrink pool down to newsize if possible. */ 189 while (ktr_requestpool > newsize) { 190 req = STAILQ_FIRST(&ktr_free); 191 if (req == NULL) 192 return (ktr_requestpool); 193 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 194 ktr_requestpool--; 195 mtx_unlock(&ktrace_mtx); 196 free(req, M_KTRACE); 197 mtx_lock(&ktrace_mtx); 198 } 199 else 200 /* Grow pool up to newsize. */ 201 while (ktr_requestpool < newsize) { 202 mtx_unlock(&ktrace_mtx); 203 req = malloc(sizeof(struct ktr_request), M_KTRACE, 204 M_WAITOK); 205 mtx_lock(&ktrace_mtx); 206 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 207 ktr_requestpool++; 208 } 209 return (ktr_requestpool); 210 } 211 212 static struct ktr_request * 213 ktr_getrequest(int type) 214 { 215 struct ktr_request *req; 216 struct thread *td = curthread; 217 struct proc *p = td->td_proc; 218 int pm; 219 220 td->td_pflags |= TDP_INKTRACE; 221 mtx_lock(&ktrace_mtx); 222 if (!KTRCHECK(td, type)) { 223 mtx_unlock(&ktrace_mtx); 224 td->td_pflags &= ~TDP_INKTRACE; 225 return (NULL); 226 } 227 req = STAILQ_FIRST(&ktr_free); 228 if (req != NULL) { 229 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 230 req->ktr_header.ktr_type = type; 231 if (p->p_traceflag & KTRFAC_DROP) { 232 req->ktr_header.ktr_type |= KTR_DROP; 233 p->p_traceflag &= ~KTRFAC_DROP; 234 } 235 KASSERT(p->p_tracevp != NULL, ("ktrace: no trace vnode")); 236 KASSERT(p->p_tracecred != NULL, ("ktrace: no trace cred")); 237 req->ktr_vp = p->p_tracevp; 238 VREF(p->p_tracevp); 239 req->ktr_cred = crhold(p->p_tracecred); 240 mtx_unlock(&ktrace_mtx); 241 microtime(&req->ktr_header.ktr_time); 242 req->ktr_header.ktr_pid = p->p_pid; 243 bcopy(p->p_comm, req->ktr_header.ktr_comm, MAXCOMLEN + 1); 244 req->ktr_header.ktr_buffer = NULL; 245 req->ktr_header.ktr_len = 0; 246 } else { 247 p->p_traceflag |= KTRFAC_DROP; 248 pm = print_message; 249 print_message = 0; 250 mtx_unlock(&ktrace_mtx); 251 if (pm) 252 printf("Out of ktrace request objects.\n"); 253 td->td_pflags &= ~TDP_INKTRACE; 254 } 255 return (req); 256 } 257 258 static void 259 ktr_submitrequest(struct ktr_request *req) 260 { 261 262 mtx_lock(&ktrace_mtx); 263 STAILQ_INSERT_TAIL(&ktr_todo, req, ktr_list); 264 mtx_unlock(&ktrace_mtx); 265 sema_post(&ktrace_sema); 266 curthread->td_pflags &= ~TDP_INKTRACE; 267 } 268 269 static void 270 ktr_freerequest(struct ktr_request *req) 271 { 272 273 crfree(req->ktr_cred); 274 if (req->ktr_vp != NULL) { 275 mtx_lock(&Giant); 276 vrele(req->ktr_vp); 277 mtx_unlock(&Giant); 278 } 279 if (req->ktr_header.ktr_buffer != NULL) 280 free(req->ktr_header.ktr_buffer, M_KTRACE); 281 mtx_lock(&ktrace_mtx); 282 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 283 mtx_unlock(&ktrace_mtx); 284 } 285 286 static void 287 ktr_loop(void *dummy) 288 { 289 struct ktr_request *req; 290 struct thread *td; 291 struct ucred *cred; 292 293 /* Only cache these values once. */ 294 td = curthread; 295 cred = td->td_ucred; 296 for (;;) { 297 sema_wait(&ktrace_sema); 298 mtx_lock(&ktrace_mtx); 299 req = STAILQ_FIRST(&ktr_todo); 300 STAILQ_REMOVE_HEAD(&ktr_todo, ktr_list); 301 KASSERT(req != NULL, ("got a NULL request")); 302 mtx_unlock(&ktrace_mtx); 303 /* 304 * It is not enough just to pass the cached cred 305 * to the VOP's in ktr_writerequest(). Some VFS 306 * operations use curthread->td_ucred, so we need 307 * to modify our thread's credentials as well. 308 * Evil. 309 */ 310 td->td_ucred = req->ktr_cred; 311 ktr_writerequest(req); 312 td->td_ucred = cred; 313 ktr_freerequest(req); 314 } 315 } 316 317 /* 318 * MPSAFE 319 */ 320 void 321 ktrsyscall(code, narg, args) 322 int code, narg; 323 register_t args[]; 324 { 325 struct ktr_request *req; 326 struct ktr_syscall *ktp; 327 size_t buflen; 328 char *buf = NULL; 329 330 buflen = sizeof(register_t) * narg; 331 if (buflen > 0) { 332 buf = malloc(buflen, M_KTRACE, M_WAITOK); 333 bcopy(args, buf, buflen); 334 } 335 req = ktr_getrequest(KTR_SYSCALL); 336 if (req == NULL) { 337 if (buf != NULL) 338 free(buf, M_KTRACE); 339 return; 340 } 341 ktp = &req->ktr_data.ktr_syscall; 342 ktp->ktr_code = code; 343 ktp->ktr_narg = narg; 344 if (buflen > 0) { 345 req->ktr_header.ktr_len = buflen; 346 req->ktr_header.ktr_buffer = buf; 347 } 348 ktr_submitrequest(req); 349 } 350 351 /* 352 * MPSAFE 353 */ 354 void 355 ktrsysret(code, error, retval) 356 int code, error; 357 register_t retval; 358 { 359 struct ktr_request *req; 360 struct ktr_sysret *ktp; 361 362 req = ktr_getrequest(KTR_SYSRET); 363 if (req == NULL) 364 return; 365 ktp = &req->ktr_data.ktr_sysret; 366 ktp->ktr_code = code; 367 ktp->ktr_error = error; 368 ktp->ktr_retval = retval; /* what about val2 ? */ 369 ktr_submitrequest(req); 370 } 371 372 void 373 ktrnamei(path) 374 char *path; 375 { 376 struct ktr_request *req; 377 int namelen; 378 char *buf = NULL; 379 380 namelen = strlen(path); 381 if (namelen > 0) { 382 buf = malloc(namelen, M_KTRACE, M_WAITOK); 383 bcopy(path, buf, namelen); 384 } 385 req = ktr_getrequest(KTR_NAMEI); 386 if (req == NULL) { 387 if (buf != NULL) 388 free(buf, M_KTRACE); 389 return; 390 } 391 if (namelen > 0) { 392 req->ktr_header.ktr_len = namelen; 393 req->ktr_header.ktr_buffer = buf; 394 } 395 ktr_submitrequest(req); 396 } 397 398 /* 399 * Since the uio may not stay valid, we can not hand off this request to 400 * the thread and need to process it synchronously. However, we wish to 401 * keep the relative order of records in a trace file correct, so we 402 * do put this request on the queue (if it isn't empty) and then block. 403 * The ktrace thread waks us back up when it is time for this event to 404 * be posted and blocks until we have completed writing out the event 405 * and woken it back up. 406 */ 407 void 408 ktrgenio(fd, rw, uio, error) 409 int fd; 410 enum uio_rw rw; 411 struct uio *uio; 412 int error; 413 { 414 struct ktr_request *req; 415 struct ktr_genio *ktg; 416 int datalen; 417 char *buf; 418 419 if (error) 420 return; 421 uio->uio_offset = 0; 422 uio->uio_rw = UIO_WRITE; 423 datalen = imin(uio->uio_resid, ktr_geniosize); 424 buf = malloc(datalen, M_KTRACE, M_WAITOK); 425 if (uiomove(buf, datalen, uio)) { 426 free(buf, M_KTRACE); 427 return; 428 } 429 req = ktr_getrequest(KTR_GENIO); 430 if (req == NULL) { 431 free(buf, M_KTRACE); 432 return; 433 } 434 ktg = &req->ktr_data.ktr_genio; 435 ktg->ktr_fd = fd; 436 ktg->ktr_rw = rw; 437 req->ktr_header.ktr_len = datalen; 438 req->ktr_header.ktr_buffer = buf; 439 ktr_submitrequest(req); 440 } 441 442 void 443 ktrpsig(sig, action, mask, code) 444 int sig; 445 sig_t action; 446 sigset_t *mask; 447 int code; 448 { 449 struct ktr_request *req; 450 struct ktr_psig *kp; 451 452 req = ktr_getrequest(KTR_PSIG); 453 if (req == NULL) 454 return; 455 kp = &req->ktr_data.ktr_psig; 456 kp->signo = (char)sig; 457 kp->action = action; 458 kp->mask = *mask; 459 kp->code = code; 460 ktr_submitrequest(req); 461 } 462 463 void 464 ktrcsw(out, user) 465 int out, user; 466 { 467 struct ktr_request *req; 468 struct ktr_csw *kc; 469 470 req = ktr_getrequest(KTR_CSW); 471 if (req == NULL) 472 return; 473 kc = &req->ktr_data.ktr_csw; 474 kc->out = out; 475 kc->user = user; 476 ktr_submitrequest(req); 477 } 478 #endif /* KTRACE */ 479 480 /* Interface and common routines */ 481 482 /* 483 * ktrace system call 484 * 485 * MPSAFE 486 */ 487 #ifndef _SYS_SYSPROTO_H_ 488 struct ktrace_args { 489 char *fname; 490 int ops; 491 int facs; 492 int pid; 493 }; 494 #endif 495 /* ARGSUSED */ 496 int 497 ktrace(td, uap) 498 struct thread *td; 499 register struct ktrace_args *uap; 500 { 501 #ifdef KTRACE 502 register struct vnode *vp = NULL; 503 register struct proc *p; 504 struct pgrp *pg; 505 int facs = uap->facs & ~KTRFAC_ROOT; 506 int ops = KTROP(uap->ops); 507 int descend = uap->ops & KTRFLAG_DESCEND; 508 int ret = 0; 509 int flags, error = 0; 510 struct nameidata nd; 511 struct ucred *cred; 512 513 /* 514 * Need something to (un)trace. 515 */ 516 if (ops != KTROP_CLEARFILE && facs == 0) 517 return (EINVAL); 518 519 td->td_pflags |= TDP_INKTRACE; 520 if (ops != KTROP_CLEAR) { 521 /* 522 * an operation which requires a file argument. 523 */ 524 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, td); 525 flags = FREAD | FWRITE | O_NOFOLLOW; 526 mtx_lock(&Giant); 527 error = vn_open(&nd, &flags, 0, -1); 528 if (error) { 529 mtx_unlock(&Giant); 530 td->td_pflags &= ~TDP_INKTRACE; 531 return (error); 532 } 533 NDFREE(&nd, NDF_ONLY_PNBUF); 534 vp = nd.ni_vp; 535 VOP_UNLOCK(vp, 0, td); 536 if (vp->v_type != VREG) { 537 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 538 mtx_unlock(&Giant); 539 td->td_pflags &= ~TDP_INKTRACE; 540 return (EACCES); 541 } 542 mtx_unlock(&Giant); 543 } 544 /* 545 * Clear all uses of the tracefile. 546 */ 547 if (ops == KTROP_CLEARFILE) { 548 sx_slock(&allproc_lock); 549 LIST_FOREACH(p, &allproc, p_list) { 550 PROC_LOCK(p); 551 if (p->p_tracevp == vp) { 552 if (ktrcanset(td, p)) { 553 mtx_lock(&ktrace_mtx); 554 cred = p->p_tracecred; 555 p->p_tracecred = NULL; 556 p->p_tracevp = NULL; 557 p->p_traceflag = 0; 558 mtx_unlock(&ktrace_mtx); 559 PROC_UNLOCK(p); 560 mtx_lock(&Giant); 561 (void) vn_close(vp, FREAD|FWRITE, 562 cred, td); 563 mtx_unlock(&Giant); 564 crfree(cred); 565 } else { 566 PROC_UNLOCK(p); 567 error = EPERM; 568 } 569 } else 570 PROC_UNLOCK(p); 571 } 572 sx_sunlock(&allproc_lock); 573 goto done; 574 } 575 /* 576 * do it 577 */ 578 sx_slock(&proctree_lock); 579 if (uap->pid < 0) { 580 /* 581 * by process group 582 */ 583 pg = pgfind(-uap->pid); 584 if (pg == NULL) { 585 sx_sunlock(&proctree_lock); 586 error = ESRCH; 587 goto done; 588 } 589 /* 590 * ktrops() may call vrele(). Lock pg_members 591 * by the proctree_lock rather than pg_mtx. 592 */ 593 PGRP_UNLOCK(pg); 594 LIST_FOREACH(p, &pg->pg_members, p_pglist) 595 if (descend) 596 ret |= ktrsetchildren(td, p, ops, facs, vp); 597 else 598 ret |= ktrops(td, p, ops, facs, vp); 599 } else { 600 /* 601 * by pid 602 */ 603 p = pfind(uap->pid); 604 if (p == NULL) { 605 sx_sunlock(&proctree_lock); 606 error = ESRCH; 607 goto done; 608 } 609 /* 610 * The slock of the proctree lock will keep this process 611 * from going away, so unlocking the proc here is ok. 612 */ 613 PROC_UNLOCK(p); 614 if (descend) 615 ret |= ktrsetchildren(td, p, ops, facs, vp); 616 else 617 ret |= ktrops(td, p, ops, facs, vp); 618 } 619 sx_sunlock(&proctree_lock); 620 if (!ret) 621 error = EPERM; 622 done: 623 if (vp != NULL) { 624 mtx_lock(&Giant); 625 (void) vn_close(vp, FWRITE, td->td_ucred, td); 626 mtx_unlock(&Giant); 627 } 628 td->td_pflags &= ~TDP_INKTRACE; 629 return (error); 630 #else /* !KTRACE */ 631 return (ENOSYS); 632 #endif /* KTRACE */ 633 } 634 635 /* 636 * utrace system call 637 * 638 * MPSAFE 639 */ 640 /* ARGSUSED */ 641 int 642 utrace(td, uap) 643 struct thread *td; 644 register struct utrace_args *uap; 645 { 646 647 #ifdef KTRACE 648 struct ktr_request *req; 649 void *cp; 650 int error; 651 652 if (!KTRPOINT(td, KTR_USER)) 653 return (0); 654 if (uap->len > KTR_USER_MAXLEN) 655 return (EINVAL); 656 cp = malloc(uap->len, M_KTRACE, M_WAITOK); 657 error = copyin(uap->addr, cp, uap->len); 658 if (error) { 659 free(cp, M_KTRACE); 660 return (error); 661 } 662 req = ktr_getrequest(KTR_USER); 663 if (req == NULL) { 664 free(cp, M_KTRACE); 665 return (0); 666 } 667 req->ktr_header.ktr_buffer = cp; 668 req->ktr_header.ktr_len = uap->len; 669 ktr_submitrequest(req); 670 return (0); 671 #else /* !KTRACE */ 672 return (ENOSYS); 673 #endif /* KTRACE */ 674 } 675 676 #ifdef KTRACE 677 static int 678 ktrops(td, p, ops, facs, vp) 679 struct thread *td; 680 struct proc *p; 681 int ops, facs; 682 struct vnode *vp; 683 { 684 struct vnode *tracevp = NULL; 685 struct ucred *tracecred = NULL; 686 687 PROC_LOCK(p); 688 if (!ktrcanset(td, p)) { 689 PROC_UNLOCK(p); 690 return (0); 691 } 692 mtx_lock(&ktrace_mtx); 693 if (ops == KTROP_SET) { 694 if (p->p_tracevp != vp) { 695 /* 696 * if trace file already in use, relinquish below 697 */ 698 tracevp = p->p_tracevp; 699 VREF(vp); 700 p->p_tracevp = vp; 701 } 702 if (p->p_tracecred != td->td_ucred) { 703 tracecred = p->p_tracecred; 704 p->p_tracecred = crhold(td->td_ucred); 705 } 706 p->p_traceflag |= facs; 707 if (td->td_ucred->cr_uid == 0) 708 p->p_traceflag |= KTRFAC_ROOT; 709 } else { 710 /* KTROP_CLEAR */ 711 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 712 /* no more tracing */ 713 p->p_traceflag = 0; 714 tracevp = p->p_tracevp; 715 p->p_tracevp = NULL; 716 tracecred = p->p_tracecred; 717 p->p_tracecred = NULL; 718 } 719 } 720 mtx_unlock(&ktrace_mtx); 721 PROC_UNLOCK(p); 722 if (tracevp != NULL) { 723 mtx_lock(&Giant); 724 vrele(tracevp); 725 mtx_unlock(&Giant); 726 } 727 if (tracecred != NULL) 728 crfree(tracecred); 729 730 return (1); 731 } 732 733 static int 734 ktrsetchildren(td, top, ops, facs, vp) 735 struct thread *td; 736 struct proc *top; 737 int ops, facs; 738 struct vnode *vp; 739 { 740 register struct proc *p; 741 register int ret = 0; 742 743 p = top; 744 sx_assert(&proctree_lock, SX_LOCKED); 745 for (;;) { 746 ret |= ktrops(td, p, ops, facs, vp); 747 /* 748 * If this process has children, descend to them next, 749 * otherwise do any siblings, and if done with this level, 750 * follow back up the tree (but not past top). 751 */ 752 if (!LIST_EMPTY(&p->p_children)) 753 p = LIST_FIRST(&p->p_children); 754 else for (;;) { 755 if (p == top) 756 return (ret); 757 if (LIST_NEXT(p, p_sibling)) { 758 p = LIST_NEXT(p, p_sibling); 759 break; 760 } 761 p = p->p_pptr; 762 } 763 } 764 /*NOTREACHED*/ 765 } 766 767 static void 768 ktr_writerequest(struct ktr_request *req) 769 { 770 struct ktr_header *kth; 771 struct vnode *vp; 772 struct proc *p; 773 struct thread *td; 774 struct ucred *cred; 775 struct uio auio; 776 struct iovec aiov[3]; 777 struct mount *mp; 778 int datalen, buflen, vrele_count; 779 int error; 780 781 vp = req->ktr_vp; 782 /* 783 * If vp is NULL, the vp has been cleared out from under this 784 * request, so just drop it. 785 */ 786 if (vp == NULL) 787 return; 788 kth = &req->ktr_header; 789 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP]; 790 buflen = kth->ktr_len; 791 cred = req->ktr_cred; 792 td = curthread; 793 auio.uio_iov = &aiov[0]; 794 auio.uio_offset = 0; 795 auio.uio_segflg = UIO_SYSSPACE; 796 auio.uio_rw = UIO_WRITE; 797 aiov[0].iov_base = (caddr_t)kth; 798 aiov[0].iov_len = sizeof(struct ktr_header); 799 auio.uio_resid = sizeof(struct ktr_header); 800 auio.uio_iovcnt = 1; 801 auio.uio_td = td; 802 if (datalen != 0) { 803 aiov[1].iov_base = (caddr_t)&req->ktr_data; 804 aiov[1].iov_len = datalen; 805 auio.uio_resid += datalen; 806 auio.uio_iovcnt++; 807 kth->ktr_len += datalen; 808 } 809 if (buflen != 0) { 810 KASSERT(kth->ktr_buffer != NULL, ("ktrace: nothing to write")); 811 aiov[auio.uio_iovcnt].iov_base = kth->ktr_buffer; 812 aiov[auio.uio_iovcnt].iov_len = buflen; 813 auio.uio_resid += buflen; 814 auio.uio_iovcnt++; 815 } 816 mtx_lock(&Giant); 817 vn_start_write(vp, &mp, V_WAIT); 818 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 819 (void)VOP_LEASE(vp, td, cred, LEASE_WRITE); 820 #ifdef MAC 821 error = mac_check_vnode_write(cred, NOCRED, vp); 822 if (error == 0) 823 #endif 824 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred); 825 VOP_UNLOCK(vp, 0, td); 826 vn_finished_write(mp); 827 mtx_unlock(&Giant); 828 if (!error) 829 return; 830 /* 831 * If error encountered, give up tracing on this vnode. We defer 832 * all the vrele()'s on the vnode until after we are finished walking 833 * the various lists to avoid needlessly holding locks. 834 */ 835 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", 836 error); 837 vrele_count = 0; 838 /* 839 * First, clear this vnode from being used by any processes in the 840 * system. 841 * XXX - If one process gets an EPERM writing to the vnode, should 842 * we really do this? Other processes might have suitable 843 * credentials for the operation. 844 */ 845 cred = NULL; 846 sx_slock(&allproc_lock); 847 LIST_FOREACH(p, &allproc, p_list) { 848 PROC_LOCK(p); 849 if (p->p_tracevp == vp) { 850 mtx_lock(&ktrace_mtx); 851 p->p_tracevp = NULL; 852 p->p_traceflag = 0; 853 cred = p->p_tracecred; 854 p->p_tracecred = NULL; 855 mtx_unlock(&ktrace_mtx); 856 vrele_count++; 857 } 858 PROC_UNLOCK(p); 859 if (cred != NULL) { 860 crfree(cred); 861 cred = NULL; 862 } 863 } 864 sx_sunlock(&allproc_lock); 865 /* 866 * Second, clear this vnode from any pending requests. 867 */ 868 mtx_lock(&ktrace_mtx); 869 STAILQ_FOREACH(req, &ktr_todo, ktr_list) { 870 if (req->ktr_vp == vp) { 871 req->ktr_vp = NULL; 872 vrele_count++; 873 } 874 } 875 mtx_unlock(&ktrace_mtx); 876 mtx_lock(&Giant); 877 while (vrele_count-- > 0) 878 vrele(vp); 879 mtx_unlock(&Giant); 880 } 881 882 /* 883 * Return true if caller has permission to set the ktracing state 884 * of target. Essentially, the target can't possess any 885 * more permissions than the caller. KTRFAC_ROOT signifies that 886 * root previously set the tracing status on the target process, and 887 * so, only root may further change it. 888 */ 889 static int 890 ktrcanset(td, targetp) 891 struct thread *td; 892 struct proc *targetp; 893 { 894 895 PROC_LOCK_ASSERT(targetp, MA_OWNED); 896 if (targetp->p_traceflag & KTRFAC_ROOT && 897 suser_cred(td->td_ucred, PRISON_ROOT)) 898 return (0); 899 900 if (p_candebug(td, targetp) != 0) 901 return (0); 902 903 return (1); 904 } 905 906 #endif /* KTRACE */ 907