1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_ktrace.h" 40 #include "opt_mac.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/fcntl.h> 45 #include <sys/kernel.h> 46 #include <sys/kthread.h> 47 #include <sys/lock.h> 48 #include <sys/mutex.h> 49 #include <sys/mac.h> 50 #include <sys/malloc.h> 51 #include <sys/namei.h> 52 #include <sys/proc.h> 53 #include <sys/unistd.h> 54 #include <sys/vnode.h> 55 #include <sys/ktrace.h> 56 #include <sys/sx.h> 57 #include <sys/sysctl.h> 58 #include <sys/syslog.h> 59 #include <sys/sysproto.h> 60 61 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 62 63 #ifdef KTRACE 64 65 #ifndef KTRACE_REQUEST_POOL 66 #define KTRACE_REQUEST_POOL 100 67 #endif 68 69 struct ktr_request { 70 struct ktr_header ktr_header; 71 struct ucred *ktr_cred; 72 struct vnode *ktr_vp; 73 union { 74 struct ktr_syscall ktr_syscall; 75 struct ktr_sysret ktr_sysret; 76 struct ktr_genio ktr_genio; 77 struct ktr_psig ktr_psig; 78 struct ktr_csw ktr_csw; 79 } ktr_data; 80 STAILQ_ENTRY(ktr_request) ktr_list; 81 }; 82 83 static int data_lengths[] = { 84 0, /* none */ 85 offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */ 86 sizeof(struct ktr_sysret), /* KTR_SYSRET */ 87 0, /* KTR_NAMEI */ 88 sizeof(struct ktr_genio), /* KTR_GENIO */ 89 sizeof(struct ktr_psig), /* KTR_PSIG */ 90 sizeof(struct ktr_csw), /* KTR_CSW */ 91 0 /* KTR_USER */ 92 }; 93 94 static STAILQ_HEAD(, ktr_request) ktr_todo; 95 static STAILQ_HEAD(, ktr_request) ktr_free; 96 97 SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options"); 98 99 static u_int ktr_requestpool = KTRACE_REQUEST_POOL; 100 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool); 101 102 static u_int ktr_geniosize = PAGE_SIZE; 103 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize); 104 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize, 105 0, "Maximum size of genio event payload"); 106 107 static int print_message = 1; 108 struct mtx ktrace_mtx; 109 static struct cv ktrace_cv; 110 111 static void ktrace_init(void *dummy); 112 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS); 113 static u_int ktrace_resize_pool(u_int newsize); 114 static struct ktr_request *ktr_getrequest(int type); 115 static void ktr_submitrequest(struct ktr_request *req); 116 static void ktr_freerequest(struct ktr_request *req); 117 static void ktr_loop(void *dummy); 118 static void ktr_writerequest(struct ktr_request *req); 119 static int ktrcanset(struct thread *,struct proc *); 120 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *); 121 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *); 122 123 static void 124 ktrace_init(void *dummy) 125 { 126 struct ktr_request *req; 127 int i; 128 129 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET); 130 cv_init(&ktrace_cv, "ktrace"); 131 STAILQ_INIT(&ktr_todo); 132 STAILQ_INIT(&ktr_free); 133 for (i = 0; i < ktr_requestpool; i++) { 134 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK); 135 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 136 } 137 kthread_create(ktr_loop, NULL, NULL, RFHIGHPID, 0, "ktrace"); 138 } 139 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL); 140 141 static int 142 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS) 143 { 144 struct thread *td; 145 u_int newsize, oldsize, wantsize; 146 int error; 147 148 /* Handle easy read-only case first to avoid warnings from GCC. */ 149 if (!req->newptr) { 150 mtx_lock(&ktrace_mtx); 151 oldsize = ktr_requestpool; 152 mtx_unlock(&ktrace_mtx); 153 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int))); 154 } 155 156 error = SYSCTL_IN(req, &wantsize, sizeof(u_int)); 157 if (error) 158 return (error); 159 td = curthread; 160 td->td_pflags |= TDP_INKTRACE; 161 mtx_lock(&ktrace_mtx); 162 oldsize = ktr_requestpool; 163 newsize = ktrace_resize_pool(wantsize); 164 mtx_unlock(&ktrace_mtx); 165 td->td_pflags &= ~TDP_INKTRACE; 166 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int)); 167 if (error) 168 return (error); 169 if (wantsize > oldsize && newsize < wantsize) 170 return (ENOSPC); 171 return (0); 172 } 173 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW, 174 &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", ""); 175 176 static u_int 177 ktrace_resize_pool(u_int newsize) 178 { 179 struct ktr_request *req; 180 int bound; 181 182 mtx_assert(&ktrace_mtx, MA_OWNED); 183 print_message = 1; 184 bound = newsize - ktr_requestpool; 185 if (bound == 0) 186 return (ktr_requestpool); 187 if (bound < 0) 188 /* Shrink pool down to newsize if possible. */ 189 while (bound++ < 0) { 190 req = STAILQ_FIRST(&ktr_free); 191 if (req == NULL) 192 return (ktr_requestpool); 193 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 194 ktr_requestpool--; 195 mtx_unlock(&ktrace_mtx); 196 free(req, M_KTRACE); 197 mtx_lock(&ktrace_mtx); 198 } 199 else 200 /* Grow pool up to newsize. */ 201 while (bound-- > 0) { 202 mtx_unlock(&ktrace_mtx); 203 req = malloc(sizeof(struct ktr_request), M_KTRACE, 204 M_WAITOK); 205 mtx_lock(&ktrace_mtx); 206 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 207 ktr_requestpool++; 208 } 209 return (ktr_requestpool); 210 } 211 212 static struct ktr_request * 213 ktr_getrequest(int type) 214 { 215 struct ktr_request *req; 216 struct thread *td = curthread; 217 struct proc *p = td->td_proc; 218 int pm; 219 220 td->td_pflags |= TDP_INKTRACE; 221 mtx_lock(&ktrace_mtx); 222 if (!KTRCHECK(td, type)) { 223 mtx_unlock(&ktrace_mtx); 224 td->td_pflags &= ~TDP_INKTRACE; 225 return (NULL); 226 } 227 req = STAILQ_FIRST(&ktr_free); 228 if (req != NULL) { 229 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 230 req->ktr_header.ktr_type = type; 231 if (p->p_traceflag & KTRFAC_DROP) { 232 req->ktr_header.ktr_type |= KTR_DROP; 233 p->p_traceflag &= ~KTRFAC_DROP; 234 } 235 KASSERT(p->p_tracevp != NULL, ("ktrace: no trace vnode")); 236 KASSERT(p->p_tracecred != NULL, ("ktrace: no trace cred")); 237 req->ktr_vp = p->p_tracevp; 238 VREF(p->p_tracevp); 239 req->ktr_cred = crhold(p->p_tracecred); 240 mtx_unlock(&ktrace_mtx); 241 microtime(&req->ktr_header.ktr_time); 242 req->ktr_header.ktr_pid = p->p_pid; 243 bcopy(p->p_comm, req->ktr_header.ktr_comm, MAXCOMLEN + 1); 244 req->ktr_header.ktr_buffer = NULL; 245 req->ktr_header.ktr_len = 0; 246 } else { 247 p->p_traceflag |= KTRFAC_DROP; 248 pm = print_message; 249 print_message = 0; 250 mtx_unlock(&ktrace_mtx); 251 if (pm) 252 printf("Out of ktrace request objects.\n"); 253 td->td_pflags &= ~TDP_INKTRACE; 254 } 255 return (req); 256 } 257 258 static void 259 ktr_submitrequest(struct ktr_request *req) 260 { 261 262 mtx_lock(&ktrace_mtx); 263 STAILQ_INSERT_TAIL(&ktr_todo, req, ktr_list); 264 cv_signal(&ktrace_cv); 265 mtx_unlock(&ktrace_mtx); 266 curthread->td_pflags &= ~TDP_INKTRACE; 267 } 268 269 static void 270 ktr_freerequest(struct ktr_request *req) 271 { 272 273 crfree(req->ktr_cred); 274 if (req->ktr_vp != NULL) { 275 mtx_lock(&Giant); 276 vrele(req->ktr_vp); 277 mtx_unlock(&Giant); 278 } 279 if (req->ktr_header.ktr_buffer != NULL) 280 free(req->ktr_header.ktr_buffer, M_KTRACE); 281 mtx_lock(&ktrace_mtx); 282 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 283 mtx_unlock(&ktrace_mtx); 284 } 285 286 static void 287 ktr_loop(void *dummy) 288 { 289 struct ktr_request *req; 290 struct thread *td; 291 struct ucred *cred; 292 293 /* Only cache these values once. */ 294 td = curthread; 295 cred = td->td_ucred; 296 for (;;) { 297 mtx_lock(&ktrace_mtx); 298 while (STAILQ_EMPTY(&ktr_todo)) 299 cv_wait(&ktrace_cv, &ktrace_mtx); 300 req = STAILQ_FIRST(&ktr_todo); 301 STAILQ_REMOVE_HEAD(&ktr_todo, ktr_list); 302 KASSERT(req != NULL, ("got a NULL request")); 303 mtx_unlock(&ktrace_mtx); 304 /* 305 * It is not enough just to pass the cached cred 306 * to the VOP's in ktr_writerequest(). Some VFS 307 * operations use curthread->td_ucred, so we need 308 * to modify our thread's credentials as well. 309 * Evil. 310 */ 311 td->td_ucred = req->ktr_cred; 312 ktr_writerequest(req); 313 td->td_ucred = cred; 314 ktr_freerequest(req); 315 } 316 } 317 318 /* 319 * MPSAFE 320 */ 321 void 322 ktrsyscall(code, narg, args) 323 int code, narg; 324 register_t args[]; 325 { 326 struct ktr_request *req; 327 struct ktr_syscall *ktp; 328 size_t buflen; 329 char *buf = NULL; 330 331 buflen = sizeof(register_t) * narg; 332 if (buflen > 0) { 333 buf = malloc(buflen, M_KTRACE, M_WAITOK); 334 bcopy(args, buf, buflen); 335 } 336 req = ktr_getrequest(KTR_SYSCALL); 337 if (req == NULL) { 338 if (buf != NULL) 339 free(buf, M_KTRACE); 340 return; 341 } 342 ktp = &req->ktr_data.ktr_syscall; 343 ktp->ktr_code = code; 344 ktp->ktr_narg = narg; 345 if (buflen > 0) { 346 req->ktr_header.ktr_len = buflen; 347 req->ktr_header.ktr_buffer = buf; 348 } 349 ktr_submitrequest(req); 350 } 351 352 /* 353 * MPSAFE 354 */ 355 void 356 ktrsysret(code, error, retval) 357 int code, error; 358 register_t retval; 359 { 360 struct ktr_request *req; 361 struct ktr_sysret *ktp; 362 363 req = ktr_getrequest(KTR_SYSRET); 364 if (req == NULL) 365 return; 366 ktp = &req->ktr_data.ktr_sysret; 367 ktp->ktr_code = code; 368 ktp->ktr_error = error; 369 ktp->ktr_retval = retval; /* what about val2 ? */ 370 ktr_submitrequest(req); 371 } 372 373 void 374 ktrnamei(path) 375 char *path; 376 { 377 struct ktr_request *req; 378 int namelen; 379 char *buf = NULL; 380 381 namelen = strlen(path); 382 if (namelen > 0) { 383 buf = malloc(namelen, M_KTRACE, M_WAITOK); 384 bcopy(path, buf, namelen); 385 } 386 req = ktr_getrequest(KTR_NAMEI); 387 if (req == NULL) { 388 if (buf != NULL) 389 free(buf, M_KTRACE); 390 return; 391 } 392 if (namelen > 0) { 393 req->ktr_header.ktr_len = namelen; 394 req->ktr_header.ktr_buffer = buf; 395 } 396 ktr_submitrequest(req); 397 } 398 399 /* 400 * Since the uio may not stay valid, we can not hand off this request to 401 * the thread and need to process it synchronously. However, we wish to 402 * keep the relative order of records in a trace file correct, so we 403 * do put this request on the queue (if it isn't empty) and then block. 404 * The ktrace thread waks us back up when it is time for this event to 405 * be posted and blocks until we have completed writing out the event 406 * and woken it back up. 407 */ 408 void 409 ktrgenio(fd, rw, uio, error) 410 int fd; 411 enum uio_rw rw; 412 struct uio *uio; 413 int error; 414 { 415 struct ktr_request *req; 416 struct ktr_genio *ktg; 417 int datalen; 418 char *buf; 419 420 if (error) 421 return; 422 uio->uio_offset = 0; 423 uio->uio_rw = UIO_WRITE; 424 datalen = imin(uio->uio_resid, ktr_geniosize); 425 buf = malloc(datalen, M_KTRACE, M_WAITOK); 426 if (uiomove(buf, datalen, uio)) { 427 free(buf, M_KTRACE); 428 return; 429 } 430 req = ktr_getrequest(KTR_GENIO); 431 if (req == NULL) { 432 free(buf, M_KTRACE); 433 return; 434 } 435 ktg = &req->ktr_data.ktr_genio; 436 ktg->ktr_fd = fd; 437 ktg->ktr_rw = rw; 438 req->ktr_header.ktr_len = datalen; 439 req->ktr_header.ktr_buffer = buf; 440 ktr_submitrequest(req); 441 } 442 443 void 444 ktrpsig(sig, action, mask, code) 445 int sig; 446 sig_t action; 447 sigset_t *mask; 448 int code; 449 { 450 struct ktr_request *req; 451 struct ktr_psig *kp; 452 453 req = ktr_getrequest(KTR_PSIG); 454 if (req == NULL) 455 return; 456 kp = &req->ktr_data.ktr_psig; 457 kp->signo = (char)sig; 458 kp->action = action; 459 kp->mask = *mask; 460 kp->code = code; 461 ktr_submitrequest(req); 462 } 463 464 void 465 ktrcsw(out, user) 466 int out, user; 467 { 468 struct ktr_request *req; 469 struct ktr_csw *kc; 470 471 req = ktr_getrequest(KTR_CSW); 472 if (req == NULL) 473 return; 474 kc = &req->ktr_data.ktr_csw; 475 kc->out = out; 476 kc->user = user; 477 ktr_submitrequest(req); 478 } 479 #endif /* KTRACE */ 480 481 /* Interface and common routines */ 482 483 /* 484 * ktrace system call 485 * 486 * MPSAFE 487 */ 488 #ifndef _SYS_SYSPROTO_H_ 489 struct ktrace_args { 490 char *fname; 491 int ops; 492 int facs; 493 int pid; 494 }; 495 #endif 496 /* ARGSUSED */ 497 int 498 ktrace(td, uap) 499 struct thread *td; 500 register struct ktrace_args *uap; 501 { 502 #ifdef KTRACE 503 register struct vnode *vp = NULL; 504 register struct proc *p; 505 struct pgrp *pg; 506 int facs = uap->facs & ~KTRFAC_ROOT; 507 int ops = KTROP(uap->ops); 508 int descend = uap->ops & KTRFLAG_DESCEND; 509 int ret = 0; 510 int flags, error = 0; 511 struct nameidata nd; 512 struct ucred *cred; 513 514 /* 515 * Need something to (un)trace. 516 */ 517 if (ops != KTROP_CLEARFILE && facs == 0) 518 return (EINVAL); 519 520 td->td_pflags |= TDP_INKTRACE; 521 if (ops != KTROP_CLEAR) { 522 /* 523 * an operation which requires a file argument. 524 */ 525 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, td); 526 flags = FREAD | FWRITE | O_NOFOLLOW; 527 mtx_lock(&Giant); 528 error = vn_open(&nd, &flags, 0, -1); 529 if (error) { 530 mtx_unlock(&Giant); 531 td->td_pflags &= ~TDP_INKTRACE; 532 return (error); 533 } 534 NDFREE(&nd, NDF_ONLY_PNBUF); 535 vp = nd.ni_vp; 536 VOP_UNLOCK(vp, 0, td); 537 if (vp->v_type != VREG) { 538 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 539 mtx_unlock(&Giant); 540 td->td_pflags &= ~TDP_INKTRACE; 541 return (EACCES); 542 } 543 mtx_unlock(&Giant); 544 } 545 /* 546 * Clear all uses of the tracefile. 547 */ 548 if (ops == KTROP_CLEARFILE) { 549 sx_slock(&allproc_lock); 550 LIST_FOREACH(p, &allproc, p_list) { 551 PROC_LOCK(p); 552 if (p->p_tracevp == vp) { 553 if (ktrcanset(td, p)) { 554 mtx_lock(&ktrace_mtx); 555 cred = p->p_tracecred; 556 p->p_tracecred = NULL; 557 p->p_tracevp = NULL; 558 p->p_traceflag = 0; 559 mtx_unlock(&ktrace_mtx); 560 PROC_UNLOCK(p); 561 mtx_lock(&Giant); 562 (void) vn_close(vp, FREAD|FWRITE, 563 cred, td); 564 mtx_unlock(&Giant); 565 crfree(cred); 566 } else { 567 PROC_UNLOCK(p); 568 error = EPERM; 569 } 570 } else 571 PROC_UNLOCK(p); 572 } 573 sx_sunlock(&allproc_lock); 574 goto done; 575 } 576 /* 577 * do it 578 */ 579 sx_slock(&proctree_lock); 580 if (uap->pid < 0) { 581 /* 582 * by process group 583 */ 584 pg = pgfind(-uap->pid); 585 if (pg == NULL) { 586 sx_sunlock(&proctree_lock); 587 error = ESRCH; 588 goto done; 589 } 590 /* 591 * ktrops() may call vrele(). Lock pg_members 592 * by the proctree_lock rather than pg_mtx. 593 */ 594 PGRP_UNLOCK(pg); 595 LIST_FOREACH(p, &pg->pg_members, p_pglist) 596 if (descend) 597 ret |= ktrsetchildren(td, p, ops, facs, vp); 598 else 599 ret |= ktrops(td, p, ops, facs, vp); 600 } else { 601 /* 602 * by pid 603 */ 604 p = pfind(uap->pid); 605 if (p == NULL) { 606 sx_sunlock(&proctree_lock); 607 error = ESRCH; 608 goto done; 609 } 610 /* 611 * The slock of the proctree lock will keep this process 612 * from going away, so unlocking the proc here is ok. 613 */ 614 PROC_UNLOCK(p); 615 if (descend) 616 ret |= ktrsetchildren(td, p, ops, facs, vp); 617 else 618 ret |= ktrops(td, p, ops, facs, vp); 619 } 620 sx_sunlock(&proctree_lock); 621 if (!ret) 622 error = EPERM; 623 done: 624 if (vp != NULL) { 625 mtx_lock(&Giant); 626 (void) vn_close(vp, FWRITE, td->td_ucred, td); 627 mtx_unlock(&Giant); 628 } 629 td->td_pflags &= ~TDP_INKTRACE; 630 return (error); 631 #else /* !KTRACE */ 632 return (ENOSYS); 633 #endif /* KTRACE */ 634 } 635 636 /* 637 * utrace system call 638 * 639 * MPSAFE 640 */ 641 /* ARGSUSED */ 642 int 643 utrace(td, uap) 644 struct thread *td; 645 register struct utrace_args *uap; 646 { 647 648 #ifdef KTRACE 649 struct ktr_request *req; 650 void *cp; 651 int error; 652 653 if (!KTRPOINT(td, KTR_USER)) 654 return (0); 655 if (uap->len > KTR_USER_MAXLEN) 656 return (EINVAL); 657 cp = malloc(uap->len, M_KTRACE, M_WAITOK); 658 error = copyin(uap->addr, cp, uap->len); 659 if (error) { 660 free(cp, M_KTRACE); 661 return (error); 662 } 663 req = ktr_getrequest(KTR_USER); 664 if (req == NULL) { 665 free(cp, M_KTRACE); 666 return (ENOMEM); 667 } 668 req->ktr_header.ktr_buffer = cp; 669 req->ktr_header.ktr_len = uap->len; 670 ktr_submitrequest(req); 671 return (0); 672 #else /* !KTRACE */ 673 return (ENOSYS); 674 #endif /* KTRACE */ 675 } 676 677 #ifdef KTRACE 678 static int 679 ktrops(td, p, ops, facs, vp) 680 struct thread *td; 681 struct proc *p; 682 int ops, facs; 683 struct vnode *vp; 684 { 685 struct vnode *tracevp = NULL; 686 struct ucred *tracecred = NULL; 687 688 PROC_LOCK(p); 689 if (!ktrcanset(td, p)) { 690 PROC_UNLOCK(p); 691 return (0); 692 } 693 mtx_lock(&ktrace_mtx); 694 if (ops == KTROP_SET) { 695 if (p->p_tracevp != vp) { 696 /* 697 * if trace file already in use, relinquish below 698 */ 699 tracevp = p->p_tracevp; 700 VREF(vp); 701 p->p_tracevp = vp; 702 } 703 if (p->p_tracecred != td->td_ucred) { 704 tracecred = p->p_tracecred; 705 p->p_tracecred = crhold(td->td_ucred); 706 } 707 p->p_traceflag |= facs; 708 if (td->td_ucred->cr_uid == 0) 709 p->p_traceflag |= KTRFAC_ROOT; 710 } else { 711 /* KTROP_CLEAR */ 712 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 713 /* no more tracing */ 714 p->p_traceflag = 0; 715 tracevp = p->p_tracevp; 716 p->p_tracevp = NULL; 717 tracecred = p->p_tracecred; 718 p->p_tracecred = NULL; 719 } 720 } 721 mtx_unlock(&ktrace_mtx); 722 PROC_UNLOCK(p); 723 if (tracevp != NULL) { 724 mtx_lock(&Giant); 725 vrele(tracevp); 726 mtx_unlock(&Giant); 727 } 728 if (tracecred != NULL) 729 crfree(tracecred); 730 731 return (1); 732 } 733 734 static int 735 ktrsetchildren(td, top, ops, facs, vp) 736 struct thread *td; 737 struct proc *top; 738 int ops, facs; 739 struct vnode *vp; 740 { 741 register struct proc *p; 742 register int ret = 0; 743 744 p = top; 745 sx_assert(&proctree_lock, SX_LOCKED); 746 for (;;) { 747 ret |= ktrops(td, p, ops, facs, vp); 748 /* 749 * If this process has children, descend to them next, 750 * otherwise do any siblings, and if done with this level, 751 * follow back up the tree (but not past top). 752 */ 753 if (!LIST_EMPTY(&p->p_children)) 754 p = LIST_FIRST(&p->p_children); 755 else for (;;) { 756 if (p == top) 757 return (ret); 758 if (LIST_NEXT(p, p_sibling)) { 759 p = LIST_NEXT(p, p_sibling); 760 break; 761 } 762 p = p->p_pptr; 763 } 764 } 765 /*NOTREACHED*/ 766 } 767 768 static void 769 ktr_writerequest(struct ktr_request *req) 770 { 771 struct ktr_header *kth; 772 struct vnode *vp; 773 struct proc *p; 774 struct thread *td; 775 struct ucred *cred; 776 struct uio auio; 777 struct iovec aiov[3]; 778 struct mount *mp; 779 int datalen, buflen, vrele_count; 780 int error; 781 782 vp = req->ktr_vp; 783 /* 784 * If vp is NULL, the vp has been cleared out from under this 785 * request, so just drop it. 786 */ 787 if (vp == NULL) 788 return; 789 kth = &req->ktr_header; 790 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP]; 791 buflen = kth->ktr_len; 792 cred = req->ktr_cred; 793 td = curthread; 794 auio.uio_iov = &aiov[0]; 795 auio.uio_offset = 0; 796 auio.uio_segflg = UIO_SYSSPACE; 797 auio.uio_rw = UIO_WRITE; 798 aiov[0].iov_base = (caddr_t)kth; 799 aiov[0].iov_len = sizeof(struct ktr_header); 800 auio.uio_resid = sizeof(struct ktr_header); 801 auio.uio_iovcnt = 1; 802 auio.uio_td = td; 803 if (datalen != 0) { 804 aiov[1].iov_base = (caddr_t)&req->ktr_data; 805 aiov[1].iov_len = datalen; 806 auio.uio_resid += datalen; 807 auio.uio_iovcnt++; 808 kth->ktr_len += datalen; 809 } 810 if (buflen != 0) { 811 KASSERT(kth->ktr_buffer != NULL, ("ktrace: nothing to write")); 812 aiov[auio.uio_iovcnt].iov_base = kth->ktr_buffer; 813 aiov[auio.uio_iovcnt].iov_len = buflen; 814 auio.uio_resid += buflen; 815 auio.uio_iovcnt++; 816 } 817 mtx_lock(&Giant); 818 vn_start_write(vp, &mp, V_WAIT); 819 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 820 (void)VOP_LEASE(vp, td, cred, LEASE_WRITE); 821 #ifdef MAC 822 error = mac_check_vnode_write(cred, NOCRED, vp); 823 if (error == 0) 824 #endif 825 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred); 826 VOP_UNLOCK(vp, 0, td); 827 vn_finished_write(mp); 828 mtx_unlock(&Giant); 829 if (!error) 830 return; 831 /* 832 * If error encountered, give up tracing on this vnode. We defer 833 * all the vrele()'s on the vnode until after we are finished walking 834 * the various lists to avoid needlessly holding locks. 835 */ 836 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", 837 error); 838 vrele_count = 0; 839 /* 840 * First, clear this vnode from being used by any processes in the 841 * system. 842 * XXX - If one process gets an EPERM writing to the vnode, should 843 * we really do this? Other processes might have suitable 844 * credentials for the operation. 845 */ 846 cred = NULL; 847 sx_slock(&allproc_lock); 848 LIST_FOREACH(p, &allproc, p_list) { 849 PROC_LOCK(p); 850 if (p->p_tracevp == vp) { 851 mtx_lock(&ktrace_mtx); 852 p->p_tracevp = NULL; 853 p->p_traceflag = 0; 854 cred = p->p_tracecred; 855 p->p_tracecred = NULL; 856 mtx_unlock(&ktrace_mtx); 857 vrele_count++; 858 } 859 PROC_UNLOCK(p); 860 if (cred != NULL) { 861 crfree(cred); 862 cred = NULL; 863 } 864 } 865 sx_sunlock(&allproc_lock); 866 /* 867 * Second, clear this vnode from any pending requests. 868 */ 869 mtx_lock(&ktrace_mtx); 870 STAILQ_FOREACH(req, &ktr_todo, ktr_list) { 871 if (req->ktr_vp == vp) { 872 req->ktr_vp = NULL; 873 vrele_count++; 874 } 875 } 876 mtx_unlock(&ktrace_mtx); 877 mtx_lock(&Giant); 878 while (vrele_count-- > 0) 879 vrele(vp); 880 mtx_unlock(&Giant); 881 } 882 883 /* 884 * Return true if caller has permission to set the ktracing state 885 * of target. Essentially, the target can't possess any 886 * more permissions than the caller. KTRFAC_ROOT signifies that 887 * root previously set the tracing status on the target process, and 888 * so, only root may further change it. 889 */ 890 static int 891 ktrcanset(td, targetp) 892 struct thread *td; 893 struct proc *targetp; 894 { 895 896 PROC_LOCK_ASSERT(targetp, MA_OWNED); 897 if (targetp->p_traceflag & KTRFAC_ROOT && 898 suser_cred(td->td_ucred, PRISON_ROOT)) 899 return (0); 900 901 if (p_candebug(td, targetp) != 0) 902 return (0); 903 904 return (1); 905 } 906 907 #endif /* KTRACE */ 908