1 /*- 2 * Copyright (c) 2000-2004 3 * Poul-Henning Kamp. All rights reserved. 4 * Copyright (c) 1989, 1992-1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software donated to Berkeley by 8 * Jan-Simon Pendry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kernfs_vnops.c 8.15 (Berkeley) 5/21/95 32 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43 33 * 34 * $FreeBSD$ 35 */ 36 37 /* 38 * TODO: 39 * remove empty directories 40 * mkdir: want it ? 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/conf.h> 46 #include <sys/dirent.h> 47 #include <sys/fcntl.h> 48 #include <sys/file.h> 49 #include <sys/filedesc.h> 50 #include <sys/filio.h> 51 #include <sys/jail.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/malloc.h> 55 #include <sys/mount.h> 56 #include <sys/namei.h> 57 #include <sys/priv.h> 58 #include <sys/proc.h> 59 #include <sys/stat.h> 60 #include <sys/sx.h> 61 #include <sys/time.h> 62 #include <sys/ttycom.h> 63 #include <sys/unistd.h> 64 #include <sys/vnode.h> 65 66 static struct vop_vector devfs_vnodeops; 67 static struct vop_vector devfs_specops; 68 static struct fileops devfs_ops_f; 69 70 #include <fs/devfs/devfs.h> 71 #include <fs/devfs/devfs_int.h> 72 73 #include <security/mac/mac_framework.h> 74 75 static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data"); 76 77 struct mtx devfs_de_interlock; 78 MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF); 79 struct sx clone_drain_lock; 80 SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock"); 81 struct mtx cdevpriv_mtx; 82 MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF); 83 84 static int 85 devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp) 86 { 87 88 *dswp = devvn_refthread(fp->f_vnode, devp); 89 if (*devp != fp->f_data) { 90 if (*dswp != NULL) 91 dev_relthread(*devp); 92 return (ENXIO); 93 } 94 KASSERT((*devp)->si_refcount > 0, 95 ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp))); 96 if (*dswp == NULL) 97 return (ENXIO); 98 curthread->td_fpop = fp; 99 return (0); 100 } 101 102 int 103 devfs_get_cdevpriv(void **datap) 104 { 105 struct file *fp; 106 struct cdev_privdata *p; 107 int error; 108 109 fp = curthread->td_fpop; 110 if (fp == NULL) 111 return (EBADF); 112 p = fp->f_cdevpriv; 113 if (p != NULL) { 114 error = 0; 115 *datap = p->cdpd_data; 116 } else 117 error = ENOENT; 118 return (error); 119 } 120 121 int 122 devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t priv_dtr) 123 { 124 struct file *fp; 125 struct cdev_priv *cdp; 126 struct cdev_privdata *p; 127 int error; 128 129 fp = curthread->td_fpop; 130 if (fp == NULL) 131 return (ENOENT); 132 cdp = cdev2priv((struct cdev *)fp->f_data); 133 p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK); 134 p->cdpd_data = priv; 135 p->cdpd_dtr = priv_dtr; 136 p->cdpd_fp = fp; 137 mtx_lock(&cdevpriv_mtx); 138 if (fp->f_cdevpriv == NULL) { 139 LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list); 140 fp->f_cdevpriv = p; 141 mtx_unlock(&cdevpriv_mtx); 142 error = 0; 143 } else { 144 mtx_unlock(&cdevpriv_mtx); 145 free(p, M_CDEVPDATA); 146 error = EBUSY; 147 } 148 return (error); 149 } 150 151 void 152 devfs_destroy_cdevpriv(struct cdev_privdata *p) 153 { 154 155 mtx_assert(&cdevpriv_mtx, MA_OWNED); 156 p->cdpd_fp->f_cdevpriv = NULL; 157 LIST_REMOVE(p, cdpd_list); 158 mtx_unlock(&cdevpriv_mtx); 159 (p->cdpd_dtr)(p->cdpd_data); 160 free(p, M_CDEVPDATA); 161 } 162 163 void 164 devfs_fpdrop(struct file *fp) 165 { 166 struct cdev_privdata *p; 167 168 mtx_lock(&cdevpriv_mtx); 169 if ((p = fp->f_cdevpriv) == NULL) { 170 mtx_unlock(&cdevpriv_mtx); 171 return; 172 } 173 devfs_destroy_cdevpriv(p); 174 } 175 176 void 177 devfs_clear_cdevpriv(void) 178 { 179 struct file *fp; 180 181 fp = curthread->td_fpop; 182 if (fp == NULL) 183 return; 184 devfs_fpdrop(fp); 185 } 186 187 static int 188 devfs_vptocnp(struct vop_vptocnp_args *ap) 189 { 190 struct vnode *vp = ap->a_vp; 191 struct vnode **dvp = ap->a_vpp; 192 struct devfs_mount *dmp; 193 char *buf = ap->a_buf; 194 int *buflen = ap->a_buflen; 195 struct devfs_dirent *dd, *de; 196 int i, error; 197 198 dmp = VFSTODEVFS(vp->v_mount); 199 i = *buflen; 200 dd = vp->v_data; 201 error = 0; 202 203 sx_xlock(&dmp->dm_lock); 204 205 if (vp->v_type == VCHR) { 206 i -= strlen(dd->de_cdp->cdp_c.si_name); 207 if (i < 0) { 208 error = ENOMEM; 209 goto finished; 210 } 211 bcopy(dd->de_cdp->cdp_c.si_name, buf + i, 212 strlen(dd->de_cdp->cdp_c.si_name)); 213 de = dd->de_dir; 214 } else if (vp->v_type == VDIR) { 215 if (dd == dmp->dm_rootdir) { 216 *dvp = vp; 217 vhold(*dvp); 218 goto finished; 219 } 220 i -= dd->de_dirent->d_namlen; 221 if (i < 0) { 222 error = ENOMEM; 223 goto finished; 224 } 225 bcopy(dd->de_dirent->d_name, buf + i, 226 dd->de_dirent->d_namlen); 227 de = dd; 228 } else { 229 error = ENOENT; 230 goto finished; 231 } 232 *buflen = i; 233 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 234 de = TAILQ_NEXT(de, de_list); /* ".." */ 235 de = de->de_dir; 236 mtx_lock(&devfs_de_interlock); 237 *dvp = de->de_vnode; 238 if (*dvp != NULL) { 239 VI_LOCK(*dvp); 240 mtx_unlock(&devfs_de_interlock); 241 vholdl(*dvp); 242 VI_UNLOCK(*dvp); 243 } else { 244 mtx_unlock(&devfs_de_interlock); 245 error = ENOENT; 246 } 247 finished: 248 sx_xunlock(&dmp->dm_lock); 249 return (error); 250 } 251 252 /* 253 * Construct the fully qualified path name relative to the mountpoint 254 */ 255 static char * 256 devfs_fqpn(char *buf, struct vnode *dvp, struct componentname *cnp) 257 { 258 int i; 259 struct devfs_dirent *de, *dd; 260 struct devfs_mount *dmp; 261 262 dmp = VFSTODEVFS(dvp->v_mount); 263 dd = dvp->v_data; 264 i = SPECNAMELEN; 265 buf[i] = '\0'; 266 i -= cnp->cn_namelen; 267 if (i < 0) 268 return (NULL); 269 bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen); 270 de = dd; 271 while (de != dmp->dm_rootdir) { 272 i--; 273 if (i < 0) 274 return (NULL); 275 buf[i] = '/'; 276 i -= de->de_dirent->d_namlen; 277 if (i < 0) 278 return (NULL); 279 bcopy(de->de_dirent->d_name, buf + i, 280 de->de_dirent->d_namlen); 281 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 282 de = TAILQ_NEXT(de, de_list); /* ".." */ 283 de = de->de_dir; 284 } 285 return (buf + i); 286 } 287 288 static int 289 devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp, 290 struct devfs_dirent *de) 291 { 292 int not_found; 293 294 not_found = 0; 295 if (de->de_flags & DE_DOOMED) 296 not_found = 1; 297 if (DEVFS_DE_DROP(de)) { 298 KASSERT(not_found == 1, ("DEVFS de dropped but not doomed")); 299 devfs_dirent_free(de); 300 } 301 if (DEVFS_DMP_DROP(dmp)) { 302 KASSERT(not_found == 1, 303 ("DEVFS mount struct freed before dirent")); 304 not_found = 2; 305 sx_xunlock(&dmp->dm_lock); 306 devfs_unmount_final(dmp); 307 } 308 if (not_found == 1 || (drop_dm_lock && not_found != 2)) 309 sx_unlock(&dmp->dm_lock); 310 return (not_found); 311 } 312 313 static void 314 devfs_insmntque_dtr(struct vnode *vp, void *arg) 315 { 316 struct devfs_dirent *de; 317 318 de = (struct devfs_dirent *)arg; 319 mtx_lock(&devfs_de_interlock); 320 vp->v_data = NULL; 321 de->de_vnode = NULL; 322 mtx_unlock(&devfs_de_interlock); 323 vgone(vp); 324 vput(vp); 325 } 326 327 /* 328 * devfs_allocv shall be entered with dmp->dm_lock held, and it drops 329 * it on return. 330 */ 331 int 332 devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp) 333 { 334 int error; 335 struct vnode *vp; 336 struct cdev *dev; 337 struct devfs_mount *dmp; 338 339 dmp = VFSTODEVFS(mp); 340 if (de->de_flags & DE_DOOMED) { 341 sx_xunlock(&dmp->dm_lock); 342 return (ENOENT); 343 } 344 DEVFS_DE_HOLD(de); 345 DEVFS_DMP_HOLD(dmp); 346 mtx_lock(&devfs_de_interlock); 347 vp = de->de_vnode; 348 if (vp != NULL) { 349 VI_LOCK(vp); 350 mtx_unlock(&devfs_de_interlock); 351 sx_xunlock(&dmp->dm_lock); 352 error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread); 353 sx_xlock(&dmp->dm_lock); 354 if (devfs_allocv_drop_refs(0, dmp, de)) { 355 if (error == 0) 356 vput(vp); 357 return (ENOENT); 358 } 359 else if (error) { 360 sx_xunlock(&dmp->dm_lock); 361 return (error); 362 } 363 sx_xunlock(&dmp->dm_lock); 364 *vpp = vp; 365 return (0); 366 } 367 mtx_unlock(&devfs_de_interlock); 368 if (de->de_dirent->d_type == DT_CHR) { 369 if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) { 370 devfs_allocv_drop_refs(1, dmp, de); 371 return (ENOENT); 372 } 373 dev = &de->de_cdp->cdp_c; 374 } else { 375 dev = NULL; 376 } 377 error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp); 378 if (error != 0) { 379 devfs_allocv_drop_refs(1, dmp, de); 380 printf("devfs_allocv: failed to allocate new vnode\n"); 381 return (error); 382 } 383 384 if (de->de_dirent->d_type == DT_CHR) { 385 vp->v_type = VCHR; 386 VI_LOCK(vp); 387 dev_lock(); 388 dev_refl(dev); 389 /* XXX: v_rdev should be protect by vnode lock */ 390 vp->v_rdev = dev; 391 KASSERT(vp->v_usecount == 1, 392 ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount)); 393 dev->si_usecount += vp->v_usecount; 394 dev_unlock(); 395 VI_UNLOCK(vp); 396 vp->v_op = &devfs_specops; 397 } else if (de->de_dirent->d_type == DT_DIR) { 398 vp->v_type = VDIR; 399 } else if (de->de_dirent->d_type == DT_LNK) { 400 vp->v_type = VLNK; 401 } else { 402 vp->v_type = VBAD; 403 } 404 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS); 405 mtx_lock(&devfs_de_interlock); 406 vp->v_data = de; 407 de->de_vnode = vp; 408 mtx_unlock(&devfs_de_interlock); 409 error = insmntque1(vp, mp, devfs_insmntque_dtr, de); 410 if (error != 0) { 411 (void) devfs_allocv_drop_refs(1, dmp, de); 412 return (error); 413 } 414 if (devfs_allocv_drop_refs(0, dmp, de)) { 415 vput(vp); 416 return (ENOENT); 417 } 418 #ifdef MAC 419 mac_devfs_vnode_associate(mp, de, vp); 420 #endif 421 sx_xunlock(&dmp->dm_lock); 422 *vpp = vp; 423 return (0); 424 } 425 426 static int 427 devfs_access(struct vop_access_args *ap) 428 { 429 struct vnode *vp = ap->a_vp; 430 struct devfs_dirent *de; 431 int error; 432 433 de = vp->v_data; 434 if (vp->v_type == VDIR) 435 de = de->de_dir; 436 437 error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid, 438 ap->a_accmode, ap->a_cred, NULL); 439 if (error == 0) 440 return (0); 441 if (error != EACCES) 442 return (error); 443 /* We do, however, allow access to the controlling terminal */ 444 if (!(ap->a_td->td_proc->p_flag & P_CONTROLT)) 445 return (error); 446 if (ap->a_td->td_proc->p_session->s_ttydp == de->de_cdp) 447 return (0); 448 return (error); 449 } 450 451 /* ARGSUSED */ 452 static int 453 devfs_close(struct vop_close_args *ap) 454 { 455 struct vnode *vp = ap->a_vp, *oldvp; 456 struct thread *td = ap->a_td; 457 struct cdev *dev = vp->v_rdev; 458 struct cdevsw *dsw; 459 int vp_locked, error; 460 461 /* 462 * Hack: a tty device that is a controlling terminal 463 * has a reference from the session structure. 464 * We cannot easily tell that a character device is 465 * a controlling terminal, unless it is the closing 466 * process' controlling terminal. In that case, 467 * if the reference count is 2 (this last descriptor 468 * plus the session), release the reference from the session. 469 */ 470 oldvp = NULL; 471 sx_xlock(&proctree_lock); 472 if (td && vp == td->td_proc->p_session->s_ttyvp) { 473 SESS_LOCK(td->td_proc->p_session); 474 VI_LOCK(vp); 475 if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) { 476 td->td_proc->p_session->s_ttyvp = NULL; 477 td->td_proc->p_session->s_ttydp = NULL; 478 oldvp = vp; 479 } 480 VI_UNLOCK(vp); 481 SESS_UNLOCK(td->td_proc->p_session); 482 } 483 sx_xunlock(&proctree_lock); 484 if (oldvp != NULL) 485 vrele(oldvp); 486 /* 487 * We do not want to really close the device if it 488 * is still in use unless we are trying to close it 489 * forcibly. Since every use (buffer, vnode, swap, cmap) 490 * holds a reference to the vnode, and because we mark 491 * any other vnodes that alias this device, when the 492 * sum of the reference counts on all the aliased 493 * vnodes descends to one, we are on last close. 494 */ 495 dsw = dev_refthread(dev); 496 if (dsw == NULL) 497 return (ENXIO); 498 VI_LOCK(vp); 499 if (vp->v_iflag & VI_DOOMED) { 500 /* Forced close. */ 501 } else if (dsw->d_flags & D_TRACKCLOSE) { 502 /* Keep device updated on status. */ 503 } else if (count_dev(dev) > 1) { 504 VI_UNLOCK(vp); 505 dev_relthread(dev); 506 return (0); 507 } 508 vholdl(vp); 509 VI_UNLOCK(vp); 510 vp_locked = VOP_ISLOCKED(vp); 511 VOP_UNLOCK(vp, 0); 512 KASSERT(dev->si_refcount > 0, 513 ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev))); 514 error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td); 515 dev_relthread(dev); 516 vn_lock(vp, vp_locked | LK_RETRY); 517 vdrop(vp); 518 return (error); 519 } 520 521 static int 522 devfs_close_f(struct file *fp, struct thread *td) 523 { 524 int error; 525 struct file *fpop; 526 527 fpop = td->td_fpop; 528 td->td_fpop = fp; 529 error = vnops.fo_close(fp, td); 530 td->td_fpop = fpop; 531 return (error); 532 } 533 534 static int 535 devfs_fsync(struct vop_fsync_args *ap) 536 { 537 int error; 538 struct bufobj *bo; 539 struct devfs_dirent *de; 540 541 if (!vn_isdisk(ap->a_vp, &error)) { 542 bo = &ap->a_vp->v_bufobj; 543 de = ap->a_vp->v_data; 544 if (error == ENXIO && bo->bo_dirty.bv_cnt > 0) { 545 printf("Device %s went missing before all of the data " 546 "could be written to it; expect data loss.\n", 547 de->de_dirent->d_name); 548 549 error = vop_stdfsync(ap); 550 if (bo->bo_dirty.bv_cnt != 0 || error != 0) 551 panic("devfs_fsync: vop_stdfsync failed."); 552 } 553 554 return (0); 555 } 556 557 return (vop_stdfsync(ap)); 558 } 559 560 static int 561 devfs_getattr(struct vop_getattr_args *ap) 562 { 563 struct vnode *vp = ap->a_vp; 564 struct vattr *vap = ap->a_vap; 565 int error = 0; 566 struct devfs_dirent *de; 567 struct cdev *dev; 568 569 de = vp->v_data; 570 KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp)); 571 if (vp->v_type == VDIR) { 572 de = de->de_dir; 573 KASSERT(de != NULL, 574 ("Null dir dirent in devfs_getattr vp=%p", vp)); 575 } 576 vap->va_uid = de->de_uid; 577 vap->va_gid = de->de_gid; 578 vap->va_mode = de->de_mode; 579 if (vp->v_type == VLNK) 580 vap->va_size = strlen(de->de_symlink); 581 else if (vp->v_type == VDIR) 582 vap->va_size = vap->va_bytes = DEV_BSIZE; 583 else 584 vap->va_size = 0; 585 if (vp->v_type != VDIR) 586 vap->va_bytes = 0; 587 vap->va_blocksize = DEV_BSIZE; 588 vap->va_type = vp->v_type; 589 590 #define fix(aa) \ 591 do { \ 592 if ((aa).tv_sec <= 3600) { \ 593 (aa).tv_sec = boottime.tv_sec; \ 594 (aa).tv_nsec = boottime.tv_usec * 1000; \ 595 } \ 596 } while (0) 597 598 if (vp->v_type != VCHR) { 599 fix(de->de_atime); 600 vap->va_atime = de->de_atime; 601 fix(de->de_mtime); 602 vap->va_mtime = de->de_mtime; 603 fix(de->de_ctime); 604 vap->va_ctime = de->de_ctime; 605 } else { 606 dev = vp->v_rdev; 607 fix(dev->si_atime); 608 vap->va_atime = dev->si_atime; 609 fix(dev->si_mtime); 610 vap->va_mtime = dev->si_mtime; 611 fix(dev->si_ctime); 612 vap->va_ctime = dev->si_ctime; 613 614 vap->va_rdev = cdev2priv(dev)->cdp_inode; 615 } 616 vap->va_gen = 0; 617 vap->va_flags = 0; 618 vap->va_filerev = 0; 619 vap->va_nlink = de->de_links; 620 vap->va_fileid = de->de_inode; 621 622 return (error); 623 } 624 625 /* ARGSUSED */ 626 static int 627 devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td) 628 { 629 struct cdev *dev; 630 struct cdevsw *dsw; 631 struct vnode *vp; 632 struct vnode *vpold; 633 int error, i; 634 const char *p; 635 struct fiodgname_arg *fgn; 636 struct file *fpop; 637 638 fpop = td->td_fpop; 639 error = devfs_fp_check(fp, &dev, &dsw); 640 if (error) 641 return (error); 642 643 if (com == FIODTYPE) { 644 *(int *)data = dsw->d_flags & D_TYPEMASK; 645 td->td_fpop = fpop; 646 dev_relthread(dev); 647 return (0); 648 } else if (com == FIODGNAME) { 649 fgn = data; 650 p = devtoname(dev); 651 i = strlen(p) + 1; 652 if (i > fgn->len) 653 error = EINVAL; 654 else 655 error = copyout(p, fgn->buf, i); 656 td->td_fpop = fpop; 657 dev_relthread(dev); 658 return (error); 659 } 660 error = dsw->d_ioctl(dev, com, data, fp->f_flag, td); 661 td->td_fpop = NULL; 662 dev_relthread(dev); 663 if (error == ENOIOCTL) 664 error = ENOTTY; 665 if (error == 0 && com == TIOCSCTTY) { 666 vp = fp->f_vnode; 667 668 /* Do nothing if reassigning same control tty */ 669 sx_slock(&proctree_lock); 670 if (td->td_proc->p_session->s_ttyvp == vp) { 671 sx_sunlock(&proctree_lock); 672 return (0); 673 } 674 675 vpold = td->td_proc->p_session->s_ttyvp; 676 VREF(vp); 677 SESS_LOCK(td->td_proc->p_session); 678 td->td_proc->p_session->s_ttyvp = vp; 679 td->td_proc->p_session->s_ttydp = cdev2priv(dev); 680 SESS_UNLOCK(td->td_proc->p_session); 681 682 sx_sunlock(&proctree_lock); 683 684 /* Get rid of reference to old control tty */ 685 if (vpold) 686 vrele(vpold); 687 } 688 return (error); 689 } 690 691 /* ARGSUSED */ 692 static int 693 devfs_kqfilter_f(struct file *fp, struct knote *kn) 694 { 695 struct cdev *dev; 696 struct cdevsw *dsw; 697 int error; 698 struct file *fpop; 699 struct thread *td; 700 701 td = curthread; 702 fpop = td->td_fpop; 703 error = devfs_fp_check(fp, &dev, &dsw); 704 if (error) 705 return (error); 706 error = dsw->d_kqfilter(dev, kn); 707 td->td_fpop = fpop; 708 dev_relthread(dev); 709 return (error); 710 } 711 712 static inline int 713 devfs_prison_check(struct devfs_dirent *de, struct thread *td) 714 { 715 struct cdev_priv *cdp; 716 struct ucred *dcr; 717 int error; 718 719 cdp = de->de_cdp; 720 if (cdp == NULL) 721 return (0); 722 dcr = cdp->cdp_c.si_cred; 723 if (dcr == NULL) 724 return (0); 725 726 error = prison_check(td->td_ucred, dcr); 727 if (error == 0) 728 return (0); 729 /* We do, however, allow access to the controlling terminal */ 730 if (!(td->td_proc->p_flag & P_CONTROLT)) 731 return (error); 732 if (td->td_proc->p_session->s_ttydp == cdp) 733 return (0); 734 return (error); 735 } 736 737 static int 738 devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock) 739 { 740 struct componentname *cnp; 741 struct vnode *dvp, **vpp; 742 struct thread *td; 743 struct devfs_dirent *de, *dd; 744 struct devfs_dirent **dde; 745 struct devfs_mount *dmp; 746 struct cdev *cdev; 747 int error, flags, nameiop; 748 char specname[SPECNAMELEN + 1], *pname; 749 750 cnp = ap->a_cnp; 751 vpp = ap->a_vpp; 752 dvp = ap->a_dvp; 753 pname = cnp->cn_nameptr; 754 td = cnp->cn_thread; 755 flags = cnp->cn_flags; 756 nameiop = cnp->cn_nameiop; 757 dmp = VFSTODEVFS(dvp->v_mount); 758 dd = dvp->v_data; 759 *vpp = NULLVP; 760 761 if ((flags & ISLASTCN) && nameiop == RENAME) 762 return (EOPNOTSUPP); 763 764 if (dvp->v_type != VDIR) 765 return (ENOTDIR); 766 767 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT)) 768 return (EIO); 769 770 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td); 771 if (error) 772 return (error); 773 774 if (cnp->cn_namelen == 1 && *pname == '.') { 775 if ((flags & ISLASTCN) && nameiop != LOOKUP) 776 return (EINVAL); 777 *vpp = dvp; 778 VREF(dvp); 779 return (0); 780 } 781 782 if (flags & ISDOTDOT) { 783 if ((flags & ISLASTCN) && nameiop != LOOKUP) 784 return (EINVAL); 785 VOP_UNLOCK(dvp, 0); 786 de = TAILQ_FIRST(&dd->de_dlist); /* "." */ 787 de = TAILQ_NEXT(de, de_list); /* ".." */ 788 de = de->de_dir; 789 error = devfs_allocv(de, dvp->v_mount, vpp); 790 *dm_unlock = 0; 791 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 792 return (error); 793 } 794 795 DEVFS_DMP_HOLD(dmp); 796 devfs_populate(dmp); 797 if (DEVFS_DMP_DROP(dmp)) { 798 *dm_unlock = 0; 799 sx_xunlock(&dmp->dm_lock); 800 devfs_unmount_final(dmp); 801 return (ENOENT); 802 } 803 dd = dvp->v_data; 804 de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen); 805 while (de == NULL) { /* While(...) so we can use break */ 806 807 if (nameiop == DELETE) 808 return (ENOENT); 809 810 /* 811 * OK, we didn't have an entry for the name we were asked for 812 * so we try to see if anybody can create it on demand. 813 */ 814 pname = devfs_fqpn(specname, dvp, cnp); 815 if (pname == NULL) 816 break; 817 818 cdev = NULL; 819 DEVFS_DMP_HOLD(dmp); 820 sx_xunlock(&dmp->dm_lock); 821 sx_slock(&clone_drain_lock); 822 EVENTHANDLER_INVOKE(dev_clone, 823 td->td_ucred, pname, strlen(pname), &cdev); 824 sx_sunlock(&clone_drain_lock); 825 sx_xlock(&dmp->dm_lock); 826 if (DEVFS_DMP_DROP(dmp)) { 827 *dm_unlock = 0; 828 sx_xunlock(&dmp->dm_lock); 829 devfs_unmount_final(dmp); 830 return (ENOENT); 831 } 832 if (cdev == NULL) 833 break; 834 835 DEVFS_DMP_HOLD(dmp); 836 devfs_populate(dmp); 837 if (DEVFS_DMP_DROP(dmp)) { 838 *dm_unlock = 0; 839 sx_xunlock(&dmp->dm_lock); 840 devfs_unmount_final(dmp); 841 return (ENOENT); 842 } 843 844 dev_lock(); 845 dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx]; 846 if (dde != NULL && *dde != NULL) 847 de = *dde; 848 dev_unlock(); 849 dev_rel(cdev); 850 break; 851 } 852 853 if (de == NULL || de->de_flags & DE_WHITEOUT) { 854 if ((nameiop == CREATE || nameiop == RENAME) && 855 (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) { 856 cnp->cn_flags |= SAVENAME; 857 return (EJUSTRETURN); 858 } 859 return (ENOENT); 860 } 861 862 if (devfs_prison_check(de, td)) 863 return (ENOENT); 864 865 if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) { 866 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 867 if (error) 868 return (error); 869 if (*vpp == dvp) { 870 VREF(dvp); 871 *vpp = dvp; 872 return (0); 873 } 874 } 875 error = devfs_allocv(de, dvp->v_mount, vpp); 876 *dm_unlock = 0; 877 return (error); 878 } 879 880 static int 881 devfs_lookup(struct vop_lookup_args *ap) 882 { 883 int j; 884 struct devfs_mount *dmp; 885 int dm_unlock; 886 887 dmp = VFSTODEVFS(ap->a_dvp->v_mount); 888 dm_unlock = 1; 889 sx_xlock(&dmp->dm_lock); 890 j = devfs_lookupx(ap, &dm_unlock); 891 if (dm_unlock == 1) 892 sx_xunlock(&dmp->dm_lock); 893 return (j); 894 } 895 896 static int 897 devfs_mknod(struct vop_mknod_args *ap) 898 { 899 struct componentname *cnp; 900 struct vnode *dvp, **vpp; 901 struct devfs_dirent *dd, *de; 902 struct devfs_mount *dmp; 903 int error; 904 905 /* 906 * The only type of node we should be creating here is a 907 * character device, for anything else return EOPNOTSUPP. 908 */ 909 if (ap->a_vap->va_type != VCHR) 910 return (EOPNOTSUPP); 911 dvp = ap->a_dvp; 912 dmp = VFSTODEVFS(dvp->v_mount); 913 914 cnp = ap->a_cnp; 915 vpp = ap->a_vpp; 916 dd = dvp->v_data; 917 918 error = ENOENT; 919 sx_xlock(&dmp->dm_lock); 920 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 921 if (cnp->cn_namelen != de->de_dirent->d_namlen) 922 continue; 923 if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name, 924 de->de_dirent->d_namlen) != 0) 925 continue; 926 if (de->de_flags & DE_WHITEOUT) 927 break; 928 goto notfound; 929 } 930 if (de == NULL) 931 goto notfound; 932 de->de_flags &= ~DE_WHITEOUT; 933 error = devfs_allocv(de, dvp->v_mount, vpp); 934 return (error); 935 notfound: 936 sx_xunlock(&dmp->dm_lock); 937 return (error); 938 } 939 940 /* ARGSUSED */ 941 static int 942 devfs_open(struct vop_open_args *ap) 943 { 944 struct thread *td = ap->a_td; 945 struct vnode *vp = ap->a_vp; 946 struct cdev *dev = vp->v_rdev; 947 struct file *fp = ap->a_fp; 948 int error; 949 struct cdevsw *dsw; 950 struct file *fpop; 951 952 if (vp->v_type == VBLK) 953 return (ENXIO); 954 955 if (dev == NULL) 956 return (ENXIO); 957 958 /* Make this field valid before any I/O in d_open. */ 959 if (dev->si_iosize_max == 0) 960 dev->si_iosize_max = DFLTPHYS; 961 962 dsw = dev_refthread(dev); 963 if (dsw == NULL) 964 return (ENXIO); 965 966 /* XXX: Special casing of ttys for deadfs. Probably redundant. */ 967 if (dsw->d_flags & D_TTY) 968 vp->v_vflag |= VV_ISTTY; 969 970 VOP_UNLOCK(vp, 0); 971 972 fpop = td->td_fpop; 973 td->td_fpop = fp; 974 if (fp != NULL) { 975 fp->f_data = dev; 976 fp->f_vnode = vp; 977 } 978 if (dsw->d_fdopen != NULL) 979 error = dsw->d_fdopen(dev, ap->a_mode, td, fp); 980 else 981 error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td); 982 td->td_fpop = fpop; 983 984 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 985 986 dev_relthread(dev); 987 988 if (error) 989 return (error); 990 991 #if 0 /* /dev/console */ 992 KASSERT(fp != NULL, 993 ("Could not vnode bypass device on NULL fp")); 994 #else 995 if(fp == NULL) 996 return (error); 997 #endif 998 if (fp->f_ops == &badfileops) 999 finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f); 1000 return (error); 1001 } 1002 1003 static int 1004 devfs_pathconf(struct vop_pathconf_args *ap) 1005 { 1006 1007 switch (ap->a_name) { 1008 case _PC_MAC_PRESENT: 1009 #ifdef MAC 1010 /* 1011 * If MAC is enabled, devfs automatically supports 1012 * trivial non-persistant label storage. 1013 */ 1014 *ap->a_retval = 1; 1015 #else 1016 *ap->a_retval = 0; 1017 #endif 1018 return (0); 1019 default: 1020 return (vop_stdpathconf(ap)); 1021 } 1022 /* NOTREACHED */ 1023 } 1024 1025 /* ARGSUSED */ 1026 static int 1027 devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td) 1028 { 1029 struct cdev *dev; 1030 struct cdevsw *dsw; 1031 int error; 1032 struct file *fpop; 1033 1034 fpop = td->td_fpop; 1035 error = devfs_fp_check(fp, &dev, &dsw); 1036 if (error) 1037 return (poll_no_poll(events)); 1038 error = dsw->d_poll(dev, events, td); 1039 td->td_fpop = fpop; 1040 dev_relthread(dev); 1041 return(error); 1042 } 1043 1044 /* 1045 * Print out the contents of a special device vnode. 1046 */ 1047 static int 1048 devfs_print(struct vop_print_args *ap) 1049 { 1050 1051 printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev)); 1052 return (0); 1053 } 1054 1055 /* ARGSUSED */ 1056 static int 1057 devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) 1058 { 1059 struct cdev *dev; 1060 int ioflag, error, resid; 1061 struct cdevsw *dsw; 1062 struct file *fpop; 1063 1064 fpop = td->td_fpop; 1065 error = devfs_fp_check(fp, &dev, &dsw); 1066 if (error) 1067 return (error); 1068 resid = uio->uio_resid; 1069 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT); 1070 if (ioflag & O_DIRECT) 1071 ioflag |= IO_DIRECT; 1072 1073 if ((flags & FOF_OFFSET) == 0) 1074 uio->uio_offset = fp->f_offset; 1075 1076 error = dsw->d_read(dev, uio, ioflag); 1077 if (uio->uio_resid != resid || (error == 0 && resid != 0)) 1078 vfs_timestamp(&dev->si_atime); 1079 td->td_fpop = fpop; 1080 dev_relthread(dev); 1081 1082 if ((flags & FOF_OFFSET) == 0) 1083 fp->f_offset = uio->uio_offset; 1084 fp->f_nextoff = uio->uio_offset; 1085 return (error); 1086 } 1087 1088 static int 1089 devfs_readdir(struct vop_readdir_args *ap) 1090 { 1091 int error; 1092 struct uio *uio; 1093 struct dirent *dp; 1094 struct devfs_dirent *dd; 1095 struct devfs_dirent *de; 1096 struct devfs_mount *dmp; 1097 off_t off; 1098 int *tmp_ncookies = NULL; 1099 1100 if (ap->a_vp->v_type != VDIR) 1101 return (ENOTDIR); 1102 1103 uio = ap->a_uio; 1104 if (uio->uio_offset < 0) 1105 return (EINVAL); 1106 1107 /* 1108 * XXX: This is a temporary hack to get around this filesystem not 1109 * supporting cookies. We store the location of the ncookies pointer 1110 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent() 1111 * and set the number of cookies to 0. We then set the pointer to 1112 * NULL so that vfs_read_dirent doesn't try to call realloc() on 1113 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies 1114 * pointer to its original location before returning to the caller. 1115 */ 1116 if (ap->a_ncookies != NULL) { 1117 tmp_ncookies = ap->a_ncookies; 1118 *ap->a_ncookies = 0; 1119 ap->a_ncookies = NULL; 1120 } 1121 1122 dmp = VFSTODEVFS(ap->a_vp->v_mount); 1123 sx_xlock(&dmp->dm_lock); 1124 DEVFS_DMP_HOLD(dmp); 1125 devfs_populate(dmp); 1126 if (DEVFS_DMP_DROP(dmp)) { 1127 sx_xunlock(&dmp->dm_lock); 1128 devfs_unmount_final(dmp); 1129 if (tmp_ncookies != NULL) 1130 ap->a_ncookies = tmp_ncookies; 1131 return (EIO); 1132 } 1133 error = 0; 1134 de = ap->a_vp->v_data; 1135 off = 0; 1136 TAILQ_FOREACH(dd, &de->de_dlist, de_list) { 1137 KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__)); 1138 if (dd->de_flags & DE_WHITEOUT) 1139 continue; 1140 if (devfs_prison_check(dd, uio->uio_td)) 1141 continue; 1142 if (dd->de_dirent->d_type == DT_DIR) 1143 de = dd->de_dir; 1144 else 1145 de = dd; 1146 dp = dd->de_dirent; 1147 if (dp->d_reclen > uio->uio_resid) 1148 break; 1149 dp->d_fileno = de->de_inode; 1150 if (off >= uio->uio_offset) { 1151 error = vfs_read_dirent(ap, dp, off); 1152 if (error) 1153 break; 1154 } 1155 off += dp->d_reclen; 1156 } 1157 sx_xunlock(&dmp->dm_lock); 1158 uio->uio_offset = off; 1159 1160 /* 1161 * Restore ap->a_ncookies if it wasn't originally NULL in the first 1162 * place. 1163 */ 1164 if (tmp_ncookies != NULL) 1165 ap->a_ncookies = tmp_ncookies; 1166 1167 return (error); 1168 } 1169 1170 static int 1171 devfs_readlink(struct vop_readlink_args *ap) 1172 { 1173 struct devfs_dirent *de; 1174 1175 de = ap->a_vp->v_data; 1176 return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio)); 1177 } 1178 1179 static int 1180 devfs_reclaim(struct vop_reclaim_args *ap) 1181 { 1182 struct vnode *vp = ap->a_vp; 1183 struct devfs_dirent *de; 1184 struct cdev *dev; 1185 1186 mtx_lock(&devfs_de_interlock); 1187 de = vp->v_data; 1188 if (de != NULL) { 1189 de->de_vnode = NULL; 1190 vp->v_data = NULL; 1191 } 1192 mtx_unlock(&devfs_de_interlock); 1193 1194 vnode_destroy_vobject(vp); 1195 1196 VI_LOCK(vp); 1197 dev_lock(); 1198 dev = vp->v_rdev; 1199 vp->v_rdev = NULL; 1200 1201 if (dev == NULL) { 1202 dev_unlock(); 1203 VI_UNLOCK(vp); 1204 return (0); 1205 } 1206 1207 dev->si_usecount -= vp->v_usecount; 1208 dev_unlock(); 1209 VI_UNLOCK(vp); 1210 dev_rel(dev); 1211 return (0); 1212 } 1213 1214 static int 1215 devfs_remove(struct vop_remove_args *ap) 1216 { 1217 struct vnode *vp = ap->a_vp; 1218 struct devfs_dirent *dd; 1219 struct devfs_dirent *de; 1220 struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount); 1221 1222 sx_xlock(&dmp->dm_lock); 1223 dd = ap->a_dvp->v_data; 1224 de = vp->v_data; 1225 if (de->de_cdp == NULL) { 1226 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 1227 devfs_delete(dmp, de, 1); 1228 } else { 1229 de->de_flags |= DE_WHITEOUT; 1230 } 1231 sx_xunlock(&dmp->dm_lock); 1232 return (0); 1233 } 1234 1235 /* 1236 * Revoke is called on a tty when a terminal session ends. The vnode 1237 * is orphaned by setting v_op to deadfs so we need to let go of it 1238 * as well so that we create a new one next time around. 1239 * 1240 */ 1241 static int 1242 devfs_revoke(struct vop_revoke_args *ap) 1243 { 1244 struct vnode *vp = ap->a_vp, *vp2; 1245 struct cdev *dev; 1246 struct cdev_priv *cdp; 1247 struct devfs_dirent *de; 1248 int i; 1249 1250 KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL")); 1251 1252 dev = vp->v_rdev; 1253 cdp = cdev2priv(dev); 1254 1255 dev_lock(); 1256 cdp->cdp_inuse++; 1257 dev_unlock(); 1258 1259 vhold(vp); 1260 vgone(vp); 1261 vdrop(vp); 1262 1263 VOP_UNLOCK(vp,0); 1264 loop: 1265 for (;;) { 1266 mtx_lock(&devfs_de_interlock); 1267 dev_lock(); 1268 vp2 = NULL; 1269 for (i = 0; i <= cdp->cdp_maxdirent; i++) { 1270 de = cdp->cdp_dirents[i]; 1271 if (de == NULL) 1272 continue; 1273 1274 vp2 = de->de_vnode; 1275 if (vp2 != NULL) { 1276 dev_unlock(); 1277 VI_LOCK(vp2); 1278 mtx_unlock(&devfs_de_interlock); 1279 if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, 1280 curthread)) 1281 goto loop; 1282 vhold(vp2); 1283 vgone(vp2); 1284 vdrop(vp2); 1285 vput(vp2); 1286 break; 1287 } 1288 } 1289 if (vp2 != NULL) { 1290 continue; 1291 } 1292 dev_unlock(); 1293 mtx_unlock(&devfs_de_interlock); 1294 break; 1295 } 1296 dev_lock(); 1297 cdp->cdp_inuse--; 1298 if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) { 1299 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 1300 dev_unlock(); 1301 dev_rel(&cdp->cdp_c); 1302 } else 1303 dev_unlock(); 1304 1305 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1306 return (0); 1307 } 1308 1309 static int 1310 devfs_rioctl(struct vop_ioctl_args *ap) 1311 { 1312 struct vnode *vp; 1313 struct devfs_mount *dmp; 1314 int error; 1315 1316 vp = ap->a_vp; 1317 vn_lock(vp, LK_SHARED | LK_RETRY); 1318 if (vp->v_iflag & VI_DOOMED) { 1319 VOP_UNLOCK(vp, 0); 1320 return (EBADF); 1321 } 1322 dmp = VFSTODEVFS(vp->v_mount); 1323 sx_xlock(&dmp->dm_lock); 1324 VOP_UNLOCK(vp, 0); 1325 DEVFS_DMP_HOLD(dmp); 1326 devfs_populate(dmp); 1327 if (DEVFS_DMP_DROP(dmp)) { 1328 sx_xunlock(&dmp->dm_lock); 1329 devfs_unmount_final(dmp); 1330 return (ENOENT); 1331 } 1332 error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td); 1333 sx_xunlock(&dmp->dm_lock); 1334 return (error); 1335 } 1336 1337 static int 1338 devfs_rread(struct vop_read_args *ap) 1339 { 1340 1341 if (ap->a_vp->v_type != VDIR) 1342 return (EINVAL); 1343 return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL)); 1344 } 1345 1346 static int 1347 devfs_setattr(struct vop_setattr_args *ap) 1348 { 1349 struct devfs_dirent *de; 1350 struct vattr *vap; 1351 struct vnode *vp; 1352 struct thread *td; 1353 int c, error; 1354 uid_t uid; 1355 gid_t gid; 1356 1357 vap = ap->a_vap; 1358 vp = ap->a_vp; 1359 td = curthread; 1360 if ((vap->va_type != VNON) || 1361 (vap->va_nlink != VNOVAL) || 1362 (vap->va_fsid != VNOVAL) || 1363 (vap->va_fileid != VNOVAL) || 1364 (vap->va_blocksize != VNOVAL) || 1365 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1366 (vap->va_rdev != VNOVAL) || 1367 ((int)vap->va_bytes != VNOVAL) || 1368 (vap->va_gen != VNOVAL)) { 1369 return (EINVAL); 1370 } 1371 1372 de = vp->v_data; 1373 if (vp->v_type == VDIR) 1374 de = de->de_dir; 1375 1376 error = c = 0; 1377 if (vap->va_uid == (uid_t)VNOVAL) 1378 uid = de->de_uid; 1379 else 1380 uid = vap->va_uid; 1381 if (vap->va_gid == (gid_t)VNOVAL) 1382 gid = de->de_gid; 1383 else 1384 gid = vap->va_gid; 1385 if (uid != de->de_uid || gid != de->de_gid) { 1386 if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid || 1387 (gid != de->de_gid && !groupmember(gid, ap->a_cred))) { 1388 error = priv_check(td, PRIV_VFS_CHOWN); 1389 if (error) 1390 return (error); 1391 } 1392 de->de_uid = uid; 1393 de->de_gid = gid; 1394 c = 1; 1395 } 1396 1397 if (vap->va_mode != (mode_t)VNOVAL) { 1398 if (ap->a_cred->cr_uid != de->de_uid) { 1399 error = priv_check(td, PRIV_VFS_ADMIN); 1400 if (error) 1401 return (error); 1402 } 1403 de->de_mode = vap->va_mode; 1404 c = 1; 1405 } 1406 1407 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1408 /* See the comment in ufs_vnops::ufs_setattr(). */ 1409 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) && 1410 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1411 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td)))) 1412 return (error); 1413 if (vap->va_atime.tv_sec != VNOVAL) { 1414 if (vp->v_type == VCHR) 1415 vp->v_rdev->si_atime = vap->va_atime; 1416 else 1417 de->de_atime = vap->va_atime; 1418 } 1419 if (vap->va_mtime.tv_sec != VNOVAL) { 1420 if (vp->v_type == VCHR) 1421 vp->v_rdev->si_mtime = vap->va_mtime; 1422 else 1423 de->de_mtime = vap->va_mtime; 1424 } 1425 c = 1; 1426 } 1427 1428 if (c) { 1429 if (vp->v_type == VCHR) 1430 vfs_timestamp(&vp->v_rdev->si_ctime); 1431 else 1432 vfs_timestamp(&de->de_mtime); 1433 } 1434 return (0); 1435 } 1436 1437 #ifdef MAC 1438 static int 1439 devfs_setlabel(struct vop_setlabel_args *ap) 1440 { 1441 struct vnode *vp; 1442 struct devfs_dirent *de; 1443 1444 vp = ap->a_vp; 1445 de = vp->v_data; 1446 1447 mac_vnode_relabel(ap->a_cred, vp, ap->a_label); 1448 mac_devfs_update(vp->v_mount, de, vp); 1449 1450 return (0); 1451 } 1452 #endif 1453 1454 static int 1455 devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td) 1456 { 1457 1458 return (vnops.fo_stat(fp, sb, cred, td)); 1459 } 1460 1461 static int 1462 devfs_symlink(struct vop_symlink_args *ap) 1463 { 1464 int i, error; 1465 struct devfs_dirent *dd; 1466 struct devfs_dirent *de; 1467 struct devfs_mount *dmp; 1468 1469 error = priv_check(curthread, PRIV_DEVFS_SYMLINK); 1470 if (error) 1471 return(error); 1472 dmp = VFSTODEVFS(ap->a_dvp->v_mount); 1473 dd = ap->a_dvp->v_data; 1474 de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen); 1475 de->de_uid = 0; 1476 de->de_gid = 0; 1477 de->de_mode = 0755; 1478 de->de_inode = alloc_unr(devfs_inos); 1479 de->de_dirent->d_type = DT_LNK; 1480 i = strlen(ap->a_target) + 1; 1481 de->de_symlink = malloc(i, M_DEVFS, M_WAITOK); 1482 bcopy(ap->a_target, de->de_symlink, i); 1483 sx_xlock(&dmp->dm_lock); 1484 #ifdef MAC 1485 mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de); 1486 #endif 1487 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 1488 return (devfs_allocv(de, ap->a_dvp->v_mount, ap->a_vpp)); 1489 } 1490 1491 static int 1492 devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td) 1493 { 1494 1495 return (vnops.fo_truncate(fp, length, cred, td)); 1496 } 1497 1498 /* ARGSUSED */ 1499 static int 1500 devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) 1501 { 1502 struct cdev *dev; 1503 int error, ioflag, resid; 1504 struct cdevsw *dsw; 1505 struct file *fpop; 1506 1507 fpop = td->td_fpop; 1508 error = devfs_fp_check(fp, &dev, &dsw); 1509 if (error) 1510 return (error); 1511 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td)); 1512 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC); 1513 if (ioflag & O_DIRECT) 1514 ioflag |= IO_DIRECT; 1515 if ((flags & FOF_OFFSET) == 0) 1516 uio->uio_offset = fp->f_offset; 1517 1518 resid = uio->uio_resid; 1519 1520 error = dsw->d_write(dev, uio, ioflag); 1521 if (uio->uio_resid != resid || (error == 0 && resid != 0)) { 1522 vfs_timestamp(&dev->si_ctime); 1523 dev->si_mtime = dev->si_ctime; 1524 } 1525 td->td_fpop = fpop; 1526 dev_relthread(dev); 1527 1528 if ((flags & FOF_OFFSET) == 0) 1529 fp->f_offset = uio->uio_offset; 1530 fp->f_nextoff = uio->uio_offset; 1531 return (error); 1532 } 1533 1534 dev_t 1535 dev2udev(struct cdev *x) 1536 { 1537 if (x == NULL) 1538 return (NODEV); 1539 return (cdev2priv(x)->cdp_inode); 1540 } 1541 1542 static struct fileops devfs_ops_f = { 1543 .fo_read = devfs_read_f, 1544 .fo_write = devfs_write_f, 1545 .fo_truncate = devfs_truncate_f, 1546 .fo_ioctl = devfs_ioctl_f, 1547 .fo_poll = devfs_poll_f, 1548 .fo_kqfilter = devfs_kqfilter_f, 1549 .fo_stat = devfs_stat_f, 1550 .fo_close = devfs_close_f, 1551 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 1552 }; 1553 1554 static struct vop_vector devfs_vnodeops = { 1555 .vop_default = &default_vnodeops, 1556 1557 .vop_access = devfs_access, 1558 .vop_getattr = devfs_getattr, 1559 .vop_ioctl = devfs_rioctl, 1560 .vop_lookup = devfs_lookup, 1561 .vop_mknod = devfs_mknod, 1562 .vop_pathconf = devfs_pathconf, 1563 .vop_read = devfs_rread, 1564 .vop_readdir = devfs_readdir, 1565 .vop_readlink = devfs_readlink, 1566 .vop_reclaim = devfs_reclaim, 1567 .vop_remove = devfs_remove, 1568 .vop_revoke = devfs_revoke, 1569 .vop_setattr = devfs_setattr, 1570 #ifdef MAC 1571 .vop_setlabel = devfs_setlabel, 1572 #endif 1573 .vop_symlink = devfs_symlink, 1574 .vop_vptocnp = devfs_vptocnp, 1575 }; 1576 1577 static struct vop_vector devfs_specops = { 1578 .vop_default = &default_vnodeops, 1579 1580 .vop_access = devfs_access, 1581 .vop_bmap = VOP_PANIC, 1582 .vop_close = devfs_close, 1583 .vop_create = VOP_PANIC, 1584 .vop_fsync = devfs_fsync, 1585 .vop_getattr = devfs_getattr, 1586 .vop_link = VOP_PANIC, 1587 .vop_mkdir = VOP_PANIC, 1588 .vop_mknod = VOP_PANIC, 1589 .vop_open = devfs_open, 1590 .vop_pathconf = devfs_pathconf, 1591 .vop_print = devfs_print, 1592 .vop_read = VOP_PANIC, 1593 .vop_readdir = VOP_PANIC, 1594 .vop_readlink = VOP_PANIC, 1595 .vop_reallocblks = VOP_PANIC, 1596 .vop_reclaim = devfs_reclaim, 1597 .vop_remove = devfs_remove, 1598 .vop_rename = VOP_PANIC, 1599 .vop_revoke = devfs_revoke, 1600 .vop_rmdir = VOP_PANIC, 1601 .vop_setattr = devfs_setattr, 1602 #ifdef MAC 1603 .vop_setlabel = devfs_setlabel, 1604 #endif 1605 .vop_strategy = VOP_PANIC, 1606 .vop_symlink = VOP_PANIC, 1607 .vop_vptocnp = devfs_vptocnp, 1608 .vop_write = VOP_PANIC, 1609 }; 1610 1611 /* 1612 * Our calling convention to the device drivers used to be that we passed 1613 * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_ 1614 * flags instead since that's what open(), close() and ioctl() takes and 1615 * we don't really want vnode.h in device drivers. 1616 * We solved the source compatibility by redefining some vnode flags to 1617 * be the same as the fcntl ones and by sending down the bitwise OR of 1618 * the respective fcntl/vnode flags. These CTASSERTS make sure nobody 1619 * pulls the rug out under this. 1620 */ 1621 CTASSERT(O_NONBLOCK == IO_NDELAY); 1622 CTASSERT(O_FSYNC == IO_SYNC); 1623