1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 34 * $FreeBSD$ 35 */ 36 37 #include "opt_ktrace.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/sysproto.h> 42 #include <sys/kernel.h> 43 #include <sys/proc.h> 44 #include <sys/fcntl.h> 45 #include <sys/lock.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/ktrace.h> 49 #include <sys/malloc.h> 50 #include <sys/syslog.h> 51 52 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 53 54 #ifdef KTRACE 55 static struct ktr_header *ktrgetheader __P((int type)); 56 static void ktrwrite __P((struct vnode *, struct ktr_header *, struct uio *)); 57 static int ktrcanset __P((struct proc *,struct proc *)); 58 static int ktrsetchildren __P((struct proc *,struct proc *,int,int,struct vnode *)); 59 static int ktrops __P((struct proc *,struct proc *,int,int,struct vnode *)); 60 61 62 static struct ktr_header * 63 ktrgetheader(type) 64 int type; 65 { 66 register struct ktr_header *kth; 67 struct proc *p = curproc; /* XXX */ 68 69 MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header), 70 M_KTRACE, M_WAITOK); 71 kth->ktr_type = type; 72 microtime(&kth->ktr_time); 73 kth->ktr_pid = p->p_pid; 74 bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN + 1); 75 return (kth); 76 } 77 78 void 79 ktrsyscall(vp, code, narg, args) 80 struct vnode *vp; 81 int code, narg; 82 register_t args[]; 83 { 84 struct ktr_header *kth; 85 struct ktr_syscall *ktp; 86 register int len = offsetof(struct ktr_syscall, ktr_args) + 87 (narg * sizeof(register_t)); 88 struct proc *p = curproc; /* XXX */ 89 register_t *argp; 90 int i; 91 92 p->p_traceflag |= KTRFAC_ACTIVE; 93 kth = ktrgetheader(KTR_SYSCALL); 94 MALLOC(ktp, struct ktr_syscall *, len, M_KTRACE, M_WAITOK); 95 ktp->ktr_code = code; 96 ktp->ktr_narg = narg; 97 argp = &ktp->ktr_args[0]; 98 for (i = 0; i < narg; i++) 99 *argp++ = args[i]; 100 kth->ktr_buffer = (caddr_t)ktp; 101 kth->ktr_len = len; 102 ktrwrite(vp, kth, NULL); 103 FREE(ktp, M_KTRACE); 104 FREE(kth, M_KTRACE); 105 p->p_traceflag &= ~KTRFAC_ACTIVE; 106 } 107 108 void 109 ktrsysret(vp, code, error, retval) 110 struct vnode *vp; 111 int code, error; 112 register_t retval; 113 { 114 struct ktr_header *kth; 115 struct ktr_sysret ktp; 116 struct proc *p = curproc; /* XXX */ 117 118 p->p_traceflag |= KTRFAC_ACTIVE; 119 kth = ktrgetheader(KTR_SYSRET); 120 ktp.ktr_code = code; 121 ktp.ktr_error = error; 122 ktp.ktr_retval = retval; /* what about val2 ? */ 123 124 kth->ktr_buffer = (caddr_t)&ktp; 125 kth->ktr_len = sizeof(struct ktr_sysret); 126 127 ktrwrite(vp, kth, NULL); 128 FREE(kth, M_KTRACE); 129 p->p_traceflag &= ~KTRFAC_ACTIVE; 130 } 131 132 void 133 ktrnamei(vp, path) 134 struct vnode *vp; 135 char *path; 136 { 137 struct ktr_header *kth; 138 struct proc *p = curproc; /* XXX */ 139 140 p->p_traceflag |= KTRFAC_ACTIVE; 141 kth = ktrgetheader(KTR_NAMEI); 142 kth->ktr_len = strlen(path); 143 kth->ktr_buffer = path; 144 145 ktrwrite(vp, kth, NULL); 146 FREE(kth, M_KTRACE); 147 p->p_traceflag &= ~KTRFAC_ACTIVE; 148 } 149 150 void 151 ktrgenio(vp, fd, rw, uio, error) 152 struct vnode *vp; 153 int fd; 154 enum uio_rw rw; 155 struct uio *uio; 156 int error; 157 { 158 struct ktr_header *kth; 159 struct ktr_genio ktg; 160 struct proc *p = curproc; /* XXX */ 161 162 if (error) 163 return; 164 p->p_traceflag |= KTRFAC_ACTIVE; 165 kth = ktrgetheader(KTR_GENIO); 166 ktg.ktr_fd = fd; 167 ktg.ktr_rw = rw; 168 kth->ktr_buffer = (caddr_t)&ktg; 169 kth->ktr_len = sizeof(struct ktr_genio); 170 uio->uio_offset = 0; 171 uio->uio_rw = UIO_WRITE; 172 173 ktrwrite(vp, kth, uio); 174 FREE(kth, M_KTRACE); 175 p->p_traceflag &= ~KTRFAC_ACTIVE; 176 } 177 178 void 179 ktrpsig(vp, sig, action, mask, code) 180 struct vnode *vp; 181 int sig; 182 sig_t action; 183 sigset_t *mask; 184 int code; 185 { 186 struct ktr_header *kth; 187 struct ktr_psig kp; 188 struct proc *p = curproc; /* XXX */ 189 190 p->p_traceflag |= KTRFAC_ACTIVE; 191 kth = ktrgetheader(KTR_PSIG); 192 kp.signo = (char)sig; 193 kp.action = action; 194 kp.mask = *mask; 195 kp.code = code; 196 kth->ktr_buffer = (caddr_t)&kp; 197 kth->ktr_len = sizeof (struct ktr_psig); 198 199 ktrwrite(vp, kth, NULL); 200 FREE(kth, M_KTRACE); 201 p->p_traceflag &= ~KTRFAC_ACTIVE; 202 } 203 204 void 205 ktrcsw(vp, out, user) 206 struct vnode *vp; 207 int out, user; 208 { 209 struct ktr_header *kth; 210 struct ktr_csw kc; 211 struct proc *p = curproc; /* XXX */ 212 213 p->p_traceflag |= KTRFAC_ACTIVE; 214 kth = ktrgetheader(KTR_CSW); 215 kc.out = out; 216 kc.user = user; 217 kth->ktr_buffer = (caddr_t)&kc; 218 kth->ktr_len = sizeof (struct ktr_csw); 219 220 ktrwrite(vp, kth, NULL); 221 FREE(kth, M_KTRACE); 222 p->p_traceflag &= ~KTRFAC_ACTIVE; 223 } 224 #endif 225 226 /* Interface and common routines */ 227 228 /* 229 * ktrace system call 230 */ 231 #ifndef _SYS_SYSPROTO_H_ 232 struct ktrace_args { 233 char *fname; 234 int ops; 235 int facs; 236 int pid; 237 }; 238 #endif 239 /* ARGSUSED */ 240 int 241 ktrace(curp, uap) 242 struct proc *curp; 243 register struct ktrace_args *uap; 244 { 245 #ifdef KTRACE 246 register struct vnode *vp = NULL; 247 register struct proc *p; 248 struct pgrp *pg; 249 int facs = uap->facs & ~KTRFAC_ROOT; 250 int ops = KTROP(uap->ops); 251 int descend = uap->ops & KTRFLAG_DESCEND; 252 int ret = 0; 253 int flags, error = 0; 254 struct nameidata nd; 255 256 curp->p_traceflag |= KTRFAC_ACTIVE; 257 if (ops != KTROP_CLEAR) { 258 /* 259 * an operation which requires a file argument. 260 */ 261 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, curp); 262 flags = FREAD | FWRITE | O_NOFOLLOW; 263 error = vn_open(&nd, &flags, 0); 264 if (error) { 265 curp->p_traceflag &= ~KTRFAC_ACTIVE; 266 return (error); 267 } 268 NDFREE(&nd, NDF_ONLY_PNBUF); 269 vp = nd.ni_vp; 270 VOP_UNLOCK(vp, 0, curp); 271 if (vp->v_type != VREG) { 272 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp); 273 curp->p_traceflag &= ~KTRFAC_ACTIVE; 274 return (EACCES); 275 } 276 } 277 /* 278 * Clear all uses of the tracefile 279 */ 280 if (ops == KTROP_CLEARFILE) { 281 lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC); 282 LIST_FOREACH(p, &allproc, p_list) { 283 if (p->p_tracep == vp) { 284 if (ktrcanset(curp, p)) { 285 p->p_tracep = NULL; 286 p->p_traceflag = 0; 287 (void) vn_close(vp, FREAD|FWRITE, 288 p->p_ucred, p); 289 } else 290 error = EPERM; 291 } 292 } 293 lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC); 294 goto done; 295 } 296 /* 297 * need something to (un)trace (XXX - why is this here?) 298 */ 299 if (!facs) { 300 error = EINVAL; 301 goto done; 302 } 303 /* 304 * do it 305 */ 306 if (uap->pid < 0) { 307 /* 308 * by process group 309 */ 310 pg = pgfind(-uap->pid); 311 if (pg == NULL) { 312 error = ESRCH; 313 goto done; 314 } 315 LIST_FOREACH(p, &pg->pg_members, p_pglist) 316 if (descend) 317 ret |= ktrsetchildren(curp, p, ops, facs, vp); 318 else 319 ret |= ktrops(curp, p, ops, facs, vp); 320 321 } else { 322 /* 323 * by pid 324 */ 325 p = pfind(uap->pid); 326 if (p == NULL) { 327 error = ESRCH; 328 goto done; 329 } 330 if (descend) 331 ret |= ktrsetchildren(curp, p, ops, facs, vp); 332 else 333 ret |= ktrops(curp, p, ops, facs, vp); 334 } 335 if (!ret) 336 error = EPERM; 337 done: 338 if (vp != NULL) 339 (void) vn_close(vp, FWRITE, curp->p_ucred, curp); 340 curp->p_traceflag &= ~KTRFAC_ACTIVE; 341 return (error); 342 #else 343 return ENOSYS; 344 #endif 345 } 346 347 /* 348 * utrace system call 349 */ 350 /* ARGSUSED */ 351 int 352 utrace(curp, uap) 353 struct proc *curp; 354 register struct utrace_args *uap; 355 { 356 #ifdef KTRACE 357 struct ktr_header *kth; 358 struct proc *p = curproc; /* XXX */ 359 register caddr_t cp; 360 361 if (!KTRPOINT(p, KTR_USER)) 362 return (0); 363 p->p_traceflag |= KTRFAC_ACTIVE; 364 kth = ktrgetheader(KTR_USER); 365 MALLOC(cp, caddr_t, uap->len, M_KTRACE, M_WAITOK); 366 if (!copyin(uap->addr, cp, uap->len)) { 367 kth->ktr_buffer = cp; 368 kth->ktr_len = uap->len; 369 ktrwrite(p->p_tracep, kth, NULL); 370 } 371 FREE(kth, M_KTRACE); 372 FREE(cp, M_KTRACE); 373 p->p_traceflag &= ~KTRFAC_ACTIVE; 374 375 return (0); 376 #else 377 return (ENOSYS); 378 #endif 379 } 380 381 #ifdef KTRACE 382 static int 383 ktrops(curp, p, ops, facs, vp) 384 struct proc *p, *curp; 385 int ops, facs; 386 struct vnode *vp; 387 { 388 389 if (!ktrcanset(curp, p)) 390 return (0); 391 if (ops == KTROP_SET) { 392 if (p->p_tracep != vp) { 393 /* 394 * if trace file already in use, relinquish 395 */ 396 if (p->p_tracep != NULL) 397 vrele(p->p_tracep); 398 VREF(vp); 399 p->p_tracep = vp; 400 } 401 p->p_traceflag |= facs; 402 if (curp->p_ucred->cr_uid == 0) 403 p->p_traceflag |= KTRFAC_ROOT; 404 } else { 405 /* KTROP_CLEAR */ 406 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 407 /* no more tracing */ 408 p->p_traceflag = 0; 409 if (p->p_tracep != NULL) { 410 vrele(p->p_tracep); 411 p->p_tracep = NULL; 412 } 413 } 414 } 415 416 return (1); 417 } 418 419 static int 420 ktrsetchildren(curp, top, ops, facs, vp) 421 struct proc *curp, *top; 422 int ops, facs; 423 struct vnode *vp; 424 { 425 register struct proc *p; 426 register int ret = 0; 427 428 p = top; 429 for (;;) { 430 ret |= ktrops(curp, p, ops, facs, vp); 431 /* 432 * If this process has children, descend to them next, 433 * otherwise do any siblings, and if done with this level, 434 * follow back up the tree (but not past top). 435 */ 436 if (!LIST_EMPTY(&p->p_children)) 437 p = LIST_FIRST(&p->p_children); 438 else for (;;) { 439 if (p == top) 440 return (ret); 441 if (LIST_NEXT(p, p_sibling)) { 442 p = LIST_NEXT(p, p_sibling); 443 break; 444 } 445 p = p->p_pptr; 446 } 447 } 448 /*NOTREACHED*/ 449 } 450 451 static void 452 ktrwrite(vp, kth, uio) 453 struct vnode *vp; 454 register struct ktr_header *kth; 455 struct uio *uio; 456 { 457 struct uio auio; 458 struct iovec aiov[2]; 459 struct proc *p = curproc; /* XXX */ 460 struct mount *mp; 461 int error; 462 463 if (vp == NULL) 464 return; 465 auio.uio_iov = &aiov[0]; 466 auio.uio_offset = 0; 467 auio.uio_segflg = UIO_SYSSPACE; 468 auio.uio_rw = UIO_WRITE; 469 aiov[0].iov_base = (caddr_t)kth; 470 aiov[0].iov_len = sizeof(struct ktr_header); 471 auio.uio_resid = sizeof(struct ktr_header); 472 auio.uio_iovcnt = 1; 473 auio.uio_procp = curproc; 474 if (kth->ktr_len > 0) { 475 auio.uio_iovcnt++; 476 aiov[1].iov_base = kth->ktr_buffer; 477 aiov[1].iov_len = kth->ktr_len; 478 auio.uio_resid += kth->ktr_len; 479 if (uio != NULL) 480 kth->ktr_len += uio->uio_resid; 481 } 482 vn_start_write(vp, &mp, V_WAIT); 483 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 484 (void)VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); 485 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, p->p_ucred); 486 if (error == 0 && uio != NULL) { 487 (void)VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); 488 error = VOP_WRITE(vp, uio, IO_UNIT | IO_APPEND, p->p_ucred); 489 } 490 VOP_UNLOCK(vp, 0, p); 491 vn_finished_write(mp); 492 if (!error) 493 return; 494 /* 495 * If error encountered, give up tracing on this vnode. 496 */ 497 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", 498 error); 499 lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC); 500 LIST_FOREACH(p, &allproc, p_list) { 501 if (p->p_tracep == vp) { 502 p->p_tracep = NULL; 503 p->p_traceflag = 0; 504 vrele(vp); 505 } 506 } 507 lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC); 508 } 509 510 /* 511 * Return true if caller has permission to set the ktracing state 512 * of target. Essentially, the target can't possess any 513 * more permissions than the caller. KTRFAC_ROOT signifies that 514 * root previously set the tracing status on the target process, and 515 * so, only root may further change it. 516 * 517 * XXX: These checks are stronger than for ptrace() 518 * 519 * TODO: check groups. use caller effective gid. 520 */ 521 static int 522 ktrcanset(callp, targetp) 523 struct proc *callp, *targetp; 524 { 525 register struct pcred *caller = callp->p_cred; 526 register struct pcred *target = targetp->p_cred; 527 528 if (!PRISON_CHECK(callp, targetp)) 529 return (0); 530 if ((caller->pc_ucred->cr_uid == target->p_ruid && 531 target->p_ruid == target->p_svuid && 532 caller->p_rgid == target->p_rgid && /* XXX */ 533 target->p_rgid == target->p_svgid && 534 (targetp->p_traceflag & KTRFAC_ROOT) == 0) || 535 caller->pc_ucred->cr_uid == 0) 536 return (1); 537 538 return (0); 539 } 540 541 #endif /* KTRACE */ 542