1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 34 * $FreeBSD$ 35 */ 36 37 #include "opt_ktrace.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/sysproto.h> 42 #include <sys/kernel.h> 43 #include <sys/proc.h> 44 #include <sys/fcntl.h> 45 #include <sys/lock.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/ktrace.h> 49 #include <sys/malloc.h> 50 #include <sys/syslog.h> 51 52 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 53 54 #ifdef KTRACE 55 static struct ktr_header *ktrgetheader __P((int type)); 56 static void ktrwrite __P((struct vnode *, struct ktr_header *, struct uio *)); 57 static int ktrcanset __P((struct proc *,struct proc *)); 58 static int ktrsetchildren __P((struct proc *,struct proc *,int,int,struct vnode *)); 59 static int ktrops __P((struct proc *,struct proc *,int,int,struct vnode *)); 60 61 62 static struct ktr_header * 63 ktrgetheader(type) 64 int type; 65 { 66 register struct ktr_header *kth; 67 struct proc *p = curproc; /* XXX */ 68 69 MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header), 70 M_KTRACE, M_WAITOK); 71 kth->ktr_type = type; 72 microtime(&kth->ktr_time); 73 kth->ktr_pid = p->p_pid; 74 bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN + 1); 75 return (kth); 76 } 77 78 void 79 ktrsyscall(vp, code, narg, args) 80 struct vnode *vp; 81 int code, narg; 82 register_t args[]; 83 { 84 struct ktr_header *kth; 85 struct ktr_syscall *ktp; 86 register int len = offsetof(struct ktr_syscall, ktr_args) + 87 (narg * sizeof(register_t)); 88 struct proc *p = curproc; /* XXX */ 89 register_t *argp; 90 int i; 91 92 p->p_traceflag |= KTRFAC_ACTIVE; 93 kth = ktrgetheader(KTR_SYSCALL); 94 MALLOC(ktp, struct ktr_syscall *, len, M_KTRACE, M_WAITOK); 95 ktp->ktr_code = code; 96 ktp->ktr_narg = narg; 97 argp = &ktp->ktr_args[0]; 98 for (i = 0; i < narg; i++) 99 *argp++ = args[i]; 100 kth->ktr_buffer = (caddr_t)ktp; 101 kth->ktr_len = len; 102 ktrwrite(vp, kth, NULL); 103 FREE(ktp, M_KTRACE); 104 FREE(kth, M_KTRACE); 105 p->p_traceflag &= ~KTRFAC_ACTIVE; 106 } 107 108 void 109 ktrsysret(vp, code, error, retval) 110 struct vnode *vp; 111 int code, error; 112 register_t retval; 113 { 114 struct ktr_header *kth; 115 struct ktr_sysret ktp; 116 struct proc *p = curproc; /* XXX */ 117 118 p->p_traceflag |= KTRFAC_ACTIVE; 119 kth = ktrgetheader(KTR_SYSRET); 120 ktp.ktr_code = code; 121 ktp.ktr_error = error; 122 ktp.ktr_retval = retval; /* what about val2 ? */ 123 124 kth->ktr_buffer = (caddr_t)&ktp; 125 kth->ktr_len = sizeof(struct ktr_sysret); 126 127 ktrwrite(vp, kth, NULL); 128 FREE(kth, M_KTRACE); 129 p->p_traceflag &= ~KTRFAC_ACTIVE; 130 } 131 132 void 133 ktrnamei(vp, path) 134 struct vnode *vp; 135 char *path; 136 { 137 struct ktr_header *kth; 138 struct proc *p = curproc; /* XXX */ 139 140 p->p_traceflag |= KTRFAC_ACTIVE; 141 kth = ktrgetheader(KTR_NAMEI); 142 kth->ktr_len = strlen(path); 143 kth->ktr_buffer = path; 144 145 ktrwrite(vp, kth, NULL); 146 FREE(kth, M_KTRACE); 147 p->p_traceflag &= ~KTRFAC_ACTIVE; 148 } 149 150 void 151 ktrgenio(vp, fd, rw, uio, error) 152 struct vnode *vp; 153 int fd; 154 enum uio_rw rw; 155 struct uio *uio; 156 int error; 157 { 158 struct ktr_header *kth; 159 struct ktr_genio ktg; 160 struct proc *p = curproc; /* XXX */ 161 162 if (error) 163 return; 164 p->p_traceflag |= KTRFAC_ACTIVE; 165 kth = ktrgetheader(KTR_GENIO); 166 ktg.ktr_fd = fd; 167 ktg.ktr_rw = rw; 168 kth->ktr_buffer = (caddr_t)&ktg; 169 kth->ktr_len = sizeof(struct ktr_genio); 170 uio->uio_offset = 0; 171 uio->uio_rw = UIO_WRITE; 172 173 ktrwrite(vp, kth, uio); 174 FREE(kth, M_KTRACE); 175 p->p_traceflag &= ~KTRFAC_ACTIVE; 176 } 177 178 void 179 ktrpsig(vp, sig, action, mask, code) 180 struct vnode *vp; 181 int sig; 182 sig_t action; 183 sigset_t *mask; 184 int code; 185 { 186 struct ktr_header *kth; 187 struct ktr_psig kp; 188 struct proc *p = curproc; /* XXX */ 189 190 p->p_traceflag |= KTRFAC_ACTIVE; 191 kth = ktrgetheader(KTR_PSIG); 192 kp.signo = (char)sig; 193 kp.action = action; 194 kp.mask = *mask; 195 kp.code = code; 196 kth->ktr_buffer = (caddr_t)&kp; 197 kth->ktr_len = sizeof (struct ktr_psig); 198 199 ktrwrite(vp, kth, NULL); 200 FREE(kth, M_KTRACE); 201 p->p_traceflag &= ~KTRFAC_ACTIVE; 202 } 203 204 void 205 ktrcsw(vp, out, user) 206 struct vnode *vp; 207 int out, user; 208 { 209 struct ktr_header *kth; 210 struct ktr_csw kc; 211 struct proc *p = curproc; /* XXX */ 212 213 p->p_traceflag |= KTRFAC_ACTIVE; 214 kth = ktrgetheader(KTR_CSW); 215 kc.out = out; 216 kc.user = user; 217 kth->ktr_buffer = (caddr_t)&kc; 218 kth->ktr_len = sizeof (struct ktr_csw); 219 220 ktrwrite(vp, kth, NULL); 221 FREE(kth, M_KTRACE); 222 p->p_traceflag &= ~KTRFAC_ACTIVE; 223 } 224 #endif 225 226 /* Interface and common routines */ 227 228 /* 229 * ktrace system call 230 */ 231 #ifndef _SYS_SYSPROTO_H_ 232 struct ktrace_args { 233 char *fname; 234 int ops; 235 int facs; 236 int pid; 237 }; 238 #endif 239 /* ARGSUSED */ 240 int 241 ktrace(curp, uap) 242 struct proc *curp; 243 register struct ktrace_args *uap; 244 { 245 #ifdef KTRACE 246 register struct vnode *vp = NULL; 247 register struct proc *p; 248 struct pgrp *pg; 249 int facs = uap->facs & ~KTRFAC_ROOT; 250 int ops = KTROP(uap->ops); 251 int descend = uap->ops & KTRFLAG_DESCEND; 252 int ret = 0; 253 int flags, error = 0; 254 struct nameidata nd; 255 256 curp->p_traceflag |= KTRFAC_ACTIVE; 257 if (ops != KTROP_CLEAR) { 258 /* 259 * an operation which requires a file argument. 260 */ 261 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, curp); 262 flags = FREAD | FWRITE | O_NOFOLLOW; 263 error = vn_open(&nd, &flags, 0); 264 if (error) { 265 curp->p_traceflag &= ~KTRFAC_ACTIVE; 266 return (error); 267 } 268 NDFREE(&nd, NDF_ONLY_PNBUF); 269 vp = nd.ni_vp; 270 VOP_UNLOCK(vp, 0, curp); 271 if (vp->v_type != VREG) { 272 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp); 273 curp->p_traceflag &= ~KTRFAC_ACTIVE; 274 return (EACCES); 275 } 276 } 277 /* 278 * Clear all uses of the tracefile 279 */ 280 if (ops == KTROP_CLEARFILE) { 281 ALLPROC_LOCK(AP_SHARED); 282 LIST_FOREACH(p, &allproc, p_list) { 283 if (p->p_tracep == vp) { 284 if (ktrcanset(curp, p)) { 285 p->p_tracep = NULL; 286 p->p_traceflag = 0; 287 (void) vn_close(vp, FREAD|FWRITE, 288 p->p_ucred, p); 289 } else 290 error = EPERM; 291 } 292 } 293 ALLPROC_LOCK(AP_RELEASE); 294 goto done; 295 } 296 /* 297 * need something to (un)trace (XXX - why is this here?) 298 */ 299 if (!facs) { 300 error = EINVAL; 301 goto done; 302 } 303 /* 304 * do it 305 */ 306 if (uap->pid < 0) { 307 /* 308 * by process group 309 */ 310 pg = pgfind(-uap->pid); 311 if (pg == NULL) { 312 error = ESRCH; 313 goto done; 314 } 315 LIST_FOREACH(p, &pg->pg_members, p_pglist) 316 if (descend) 317 ret |= ktrsetchildren(curp, p, ops, facs, vp); 318 else 319 ret |= ktrops(curp, p, ops, facs, vp); 320 } else { 321 /* 322 * by pid 323 */ 324 p = pfind(uap->pid); 325 if (p == NULL) { 326 error = ESRCH; 327 goto done; 328 } 329 if (descend) 330 ret |= ktrsetchildren(curp, p, ops, facs, vp); 331 else 332 ret |= ktrops(curp, p, ops, facs, vp); 333 } 334 if (!ret) 335 error = EPERM; 336 done: 337 if (vp != NULL) 338 (void) vn_close(vp, FWRITE, curp->p_ucred, curp); 339 curp->p_traceflag &= ~KTRFAC_ACTIVE; 340 return (error); 341 #else 342 return ENOSYS; 343 #endif 344 } 345 346 /* 347 * utrace system call 348 */ 349 /* ARGSUSED */ 350 int 351 utrace(curp, uap) 352 struct proc *curp; 353 register struct utrace_args *uap; 354 { 355 #ifdef KTRACE 356 struct ktr_header *kth; 357 struct proc *p = curproc; /* XXX */ 358 register caddr_t cp; 359 360 if (!KTRPOINT(p, KTR_USER)) 361 return (0); 362 if (uap->len > KTR_USER_MAXLEN) 363 return (EINVAL); 364 p->p_traceflag |= KTRFAC_ACTIVE; 365 kth = ktrgetheader(KTR_USER); 366 MALLOC(cp, caddr_t, uap->len, M_KTRACE, M_WAITOK); 367 if (!copyin(uap->addr, cp, uap->len)) { 368 kth->ktr_buffer = cp; 369 kth->ktr_len = uap->len; 370 ktrwrite(p->p_tracep, kth, NULL); 371 } 372 FREE(kth, M_KTRACE); 373 FREE(cp, M_KTRACE); 374 p->p_traceflag &= ~KTRFAC_ACTIVE; 375 376 return (0); 377 #else 378 return (ENOSYS); 379 #endif 380 } 381 382 #ifdef KTRACE 383 static int 384 ktrops(curp, p, ops, facs, vp) 385 struct proc *p, *curp; 386 int ops, facs; 387 struct vnode *vp; 388 { 389 390 if (!ktrcanset(curp, p)) 391 return (0); 392 if (ops == KTROP_SET) { 393 if (p->p_tracep != vp) { 394 /* 395 * if trace file already in use, relinquish 396 */ 397 if (p->p_tracep != NULL) 398 vrele(p->p_tracep); 399 VREF(vp); 400 p->p_tracep = vp; 401 } 402 p->p_traceflag |= facs; 403 if (curp->p_ucred->cr_uid == 0) 404 p->p_traceflag |= KTRFAC_ROOT; 405 } else { 406 /* KTROP_CLEAR */ 407 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 408 /* no more tracing */ 409 p->p_traceflag = 0; 410 if (p->p_tracep != NULL) { 411 vrele(p->p_tracep); 412 p->p_tracep = NULL; 413 } 414 } 415 } 416 417 return (1); 418 } 419 420 static int 421 ktrsetchildren(curp, top, ops, facs, vp) 422 struct proc *curp, *top; 423 int ops, facs; 424 struct vnode *vp; 425 { 426 register struct proc *p; 427 register int ret = 0; 428 429 p = top; 430 PROCTREE_LOCK(PT_SHARED); 431 for (;;) { 432 ret |= ktrops(curp, p, ops, facs, vp); 433 /* 434 * If this process has children, descend to them next, 435 * otherwise do any siblings, and if done with this level, 436 * follow back up the tree (but not past top). 437 */ 438 if (!LIST_EMPTY(&p->p_children)) 439 p = LIST_FIRST(&p->p_children); 440 else for (;;) { 441 if (p == top) { 442 PROCTREE_LOCK(PT_RELEASE); 443 return (ret); 444 } 445 if (LIST_NEXT(p, p_sibling)) { 446 p = LIST_NEXT(p, p_sibling); 447 break; 448 } 449 p = p->p_pptr; 450 } 451 } 452 /*NOTREACHED*/ 453 } 454 455 static void 456 ktrwrite(vp, kth, uio) 457 struct vnode *vp; 458 register struct ktr_header *kth; 459 struct uio *uio; 460 { 461 struct uio auio; 462 struct iovec aiov[2]; 463 struct proc *p = curproc; /* XXX */ 464 struct mount *mp; 465 int error; 466 467 if (vp == NULL) 468 return; 469 auio.uio_iov = &aiov[0]; 470 auio.uio_offset = 0; 471 auio.uio_segflg = UIO_SYSSPACE; 472 auio.uio_rw = UIO_WRITE; 473 aiov[0].iov_base = (caddr_t)kth; 474 aiov[0].iov_len = sizeof(struct ktr_header); 475 auio.uio_resid = sizeof(struct ktr_header); 476 auio.uio_iovcnt = 1; 477 auio.uio_procp = curproc; 478 if (kth->ktr_len > 0) { 479 auio.uio_iovcnt++; 480 aiov[1].iov_base = kth->ktr_buffer; 481 aiov[1].iov_len = kth->ktr_len; 482 auio.uio_resid += kth->ktr_len; 483 if (uio != NULL) 484 kth->ktr_len += uio->uio_resid; 485 } 486 vn_start_write(vp, &mp, V_WAIT); 487 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 488 (void)VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); 489 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, p->p_ucred); 490 if (error == 0 && uio != NULL) { 491 (void)VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); 492 error = VOP_WRITE(vp, uio, IO_UNIT | IO_APPEND, p->p_ucred); 493 } 494 VOP_UNLOCK(vp, 0, p); 495 vn_finished_write(mp); 496 if (!error) 497 return; 498 /* 499 * If error encountered, give up tracing on this vnode. 500 */ 501 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", 502 error); 503 ALLPROC_LOCK(AP_SHARED); 504 LIST_FOREACH(p, &allproc, p_list) { 505 if (p->p_tracep == vp) { 506 p->p_tracep = NULL; 507 p->p_traceflag = 0; 508 vrele(vp); 509 } 510 } 511 ALLPROC_LOCK(AP_RELEASE); 512 } 513 514 /* 515 * Return true if caller has permission to set the ktracing state 516 * of target. Essentially, the target can't possess any 517 * more permissions than the caller. KTRFAC_ROOT signifies that 518 * root previously set the tracing status on the target process, and 519 * so, only root may further change it. 520 * 521 * XXX: These checks are stronger than for ptrace() 522 * 523 * TODO: check groups. use caller effective gid. 524 */ 525 static int 526 ktrcanset(callp, targetp) 527 struct proc *callp, *targetp; 528 { 529 register struct pcred *caller = callp->p_cred; 530 register struct pcred *target = targetp->p_cred; 531 532 if (!PRISON_CHECK(callp, targetp)) 533 return (0); 534 if ((caller->pc_ucred->cr_uid == target->p_ruid && 535 target->p_ruid == target->p_svuid && 536 caller->p_rgid == target->p_rgid && /* XXX */ 537 target->p_rgid == target->p_svgid && 538 (targetp->p_traceflag & KTRFAC_ROOT) == 0) || 539 caller->pc_ucred->cr_uid == 0) 540 return (1); 541 542 return (0); 543 } 544 545 #endif /* KTRACE */ 546