1 /*- 2 * Copyright (c) 2000-2004 3 * Poul-Henning Kamp. All rights reserved. 4 * Copyright (c) 1989, 1992-1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software donated to Berkeley by 8 * Jan-Simon Pendry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kernfs_vnops.c 8.15 (Berkeley) 5/21/95 32 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43 33 * 34 * $FreeBSD$ 35 */ 36 37 /* 38 * TODO: 39 * remove empty directories 40 * mkdir: want it ? 41 */ 42 43 #include "opt_mac.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/conf.h> 48 #include <sys/dirent.h> 49 #include <sys/fcntl.h> 50 #include <sys/file.h> 51 #include <sys/filedesc.h> 52 #include <sys/filio.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mount.h> 57 #include <sys/namei.h> 58 #include <sys/priv.h> 59 #include <sys/proc.h> 60 #include <sys/stat.h> 61 #include <sys/sx.h> 62 #include <sys/time.h> 63 #include <sys/ttycom.h> 64 #include <sys/unistd.h> 65 #include <sys/vnode.h> 66 67 static struct vop_vector devfs_vnodeops; 68 static struct vop_vector devfs_specops; 69 static struct fileops devfs_ops_f; 70 71 #include <fs/devfs/devfs.h> 72 #include <fs/devfs/devfs_int.h> 73 74 #include <security/mac/mac_framework.h> 75 76 static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data"); 77 78 struct mtx devfs_de_interlock; 79 MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF); 80 struct sx clone_drain_lock; 81 SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock"); 82 struct mtx cdevpriv_mtx; 83 MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF); 84 85 static int 86 devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp) 87 { 88 89 *dswp = devvn_refthread(fp->f_vnode, devp); 90 if (*devp != fp->f_data) { 91 if (*dswp != NULL) 92 dev_relthread(*devp); 93 return (ENXIO); 94 } 95 KASSERT((*devp)->si_refcount > 0, 96 ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp))); 97 if (*dswp == NULL) 98 return (ENXIO); 99 curthread->td_fpop = fp; 100 return (0); 101 } 102 103 int 104 devfs_get_cdevpriv(void **datap) 105 { 106 struct file *fp; 107 struct cdev_privdata *p; 108 int error; 109 110 fp = curthread->td_fpop; 111 if (fp == NULL) 112 return (EBADF); 113 mtx_lock(&cdevpriv_mtx); 114 p = fp->f_cdevpriv; 115 mtx_unlock(&cdevpriv_mtx); 116 if (p != NULL) { 117 error = 0; 118 *datap = p->cdpd_data; 119 } else 120 error = ENOENT; 121 return (error); 122 } 123 124 int 125 devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t priv_dtr) 126 { 127 struct file *fp; 128 struct cdev_priv *cdp; 129 struct cdev_privdata *p; 130 int error; 131 132 fp = curthread->td_fpop; 133 if (fp == NULL) 134 return (ENOENT); 135 cdp = ((struct cdev *)fp->f_data)->si_priv; 136 p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK); 137 p->cdpd_data = priv; 138 p->cdpd_dtr = priv_dtr; 139 p->cdpd_fp = fp; 140 mtx_lock(&cdevpriv_mtx); 141 if (fp->f_cdevpriv == NULL) { 142 LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list); 143 fp->f_cdevpriv = p; 144 mtx_unlock(&cdevpriv_mtx); 145 error = 0; 146 } else { 147 mtx_unlock(&cdevpriv_mtx); 148 free(p, M_CDEVPDATA); 149 error = EBUSY; 150 } 151 return (error); 152 } 153 154 void 155 devfs_destroy_cdevpriv(struct cdev_privdata *p) 156 { 157 158 mtx_assert(&cdevpriv_mtx, MA_OWNED); 159 p->cdpd_fp->f_cdevpriv = NULL; 160 LIST_REMOVE(p, cdpd_list); 161 mtx_unlock(&cdevpriv_mtx); 162 (p->cdpd_dtr)(p->cdpd_data); 163 free(p, M_CDEVPDATA); 164 } 165 166 void 167 devfs_fpdrop(struct file *fp) 168 { 169 struct cdev_privdata *p; 170 171 mtx_lock(&cdevpriv_mtx); 172 if ((p = fp->f_cdevpriv) == NULL) { 173 mtx_unlock(&cdevpriv_mtx); 174 return; 175 } 176 devfs_destroy_cdevpriv(p); 177 } 178 179 void 180 devfs_clear_cdevpriv(void) 181 { 182 struct file *fp; 183 184 fp = curthread->td_fpop; 185 if (fp == NULL) 186 return; 187 devfs_fpdrop(fp); 188 } 189 190 /* 191 * Construct the fully qualified path name relative to the mountpoint 192 */ 193 static char * 194 devfs_fqpn(char *buf, struct vnode *dvp, struct componentname *cnp) 195 { 196 int i; 197 struct devfs_dirent *de, *dd; 198 struct devfs_mount *dmp; 199 200 dmp = VFSTODEVFS(dvp->v_mount); 201 dd = dvp->v_data; 202 i = SPECNAMELEN; 203 buf[i] = '\0'; 204 i -= cnp->cn_namelen; 205 if (i < 0) 206 return (NULL); 207 bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen); 208 de = dd; 209 while (de != dmp->dm_rootdir) { 210 i--; 211 if (i < 0) 212 return (NULL); 213 buf[i] = '/'; 214 i -= de->de_dirent->d_namlen; 215 if (i < 0) 216 return (NULL); 217 bcopy(de->de_dirent->d_name, buf + i, 218 de->de_dirent->d_namlen); 219 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 220 de = TAILQ_NEXT(de, de_list); /* ".." */ 221 de = de->de_dir; 222 } 223 return (buf + i); 224 } 225 226 static int 227 devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp, 228 struct devfs_dirent *de) 229 { 230 int not_found; 231 232 not_found = 0; 233 if (de->de_flags & DE_DOOMED) 234 not_found = 1; 235 if (DEVFS_DE_DROP(de)) { 236 KASSERT(not_found == 1, ("DEVFS de dropped but not doomed")); 237 devfs_dirent_free(de); 238 } 239 if (DEVFS_DMP_DROP(dmp)) { 240 KASSERT(not_found == 1, 241 ("DEVFS mount struct freed before dirent")); 242 not_found = 2; 243 sx_xunlock(&dmp->dm_lock); 244 devfs_unmount_final(dmp); 245 } 246 if (not_found == 1 || (drop_dm_lock && not_found != 2)) 247 sx_unlock(&dmp->dm_lock); 248 return (not_found); 249 } 250 251 static void 252 devfs_insmntque_dtr(struct vnode *vp, void *arg) 253 { 254 struct devfs_dirent *de; 255 256 de = (struct devfs_dirent *)arg; 257 mtx_lock(&devfs_de_interlock); 258 vp->v_data = NULL; 259 de->de_vnode = NULL; 260 mtx_unlock(&devfs_de_interlock); 261 vgone(vp); 262 vput(vp); 263 } 264 265 /* 266 * devfs_allocv shall be entered with dmp->dm_lock held, and it drops 267 * it on return. 268 */ 269 int 270 devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp, struct thread *td) 271 { 272 int error; 273 struct vnode *vp; 274 struct cdev *dev; 275 struct devfs_mount *dmp; 276 277 KASSERT(td == curthread, ("devfs_allocv: td != curthread")); 278 dmp = VFSTODEVFS(mp); 279 if (de->de_flags & DE_DOOMED) { 280 sx_xunlock(&dmp->dm_lock); 281 return (ENOENT); 282 } 283 DEVFS_DE_HOLD(de); 284 DEVFS_DMP_HOLD(dmp); 285 mtx_lock(&devfs_de_interlock); 286 vp = de->de_vnode; 287 if (vp != NULL) { 288 VI_LOCK(vp); 289 mtx_unlock(&devfs_de_interlock); 290 sx_xunlock(&dmp->dm_lock); 291 error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td); 292 sx_xlock(&dmp->dm_lock); 293 if (devfs_allocv_drop_refs(0, dmp, de)) { 294 if (error == 0) 295 vput(vp); 296 return (ENOENT); 297 } 298 else if (error) { 299 sx_xunlock(&dmp->dm_lock); 300 return (error); 301 } 302 sx_xunlock(&dmp->dm_lock); 303 *vpp = vp; 304 return (0); 305 } 306 mtx_unlock(&devfs_de_interlock); 307 if (de->de_dirent->d_type == DT_CHR) { 308 if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) { 309 devfs_allocv_drop_refs(1, dmp, de); 310 return (ENOENT); 311 } 312 dev = &de->de_cdp->cdp_c; 313 } else { 314 dev = NULL; 315 } 316 error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp); 317 if (error != 0) { 318 devfs_allocv_drop_refs(1, dmp, de); 319 printf("devfs_allocv: failed to allocate new vnode\n"); 320 return (error); 321 } 322 323 if (de->de_dirent->d_type == DT_CHR) { 324 vp->v_type = VCHR; 325 VI_LOCK(vp); 326 dev_lock(); 327 dev_refl(dev); 328 /* XXX: v_rdev should be protect by vnode lock */ 329 vp->v_rdev = dev; 330 KASSERT(vp->v_usecount == 1, 331 ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount)); 332 dev->si_usecount += vp->v_usecount; 333 dev_unlock(); 334 VI_UNLOCK(vp); 335 vp->v_op = &devfs_specops; 336 } else if (de->de_dirent->d_type == DT_DIR) { 337 vp->v_type = VDIR; 338 } else if (de->de_dirent->d_type == DT_LNK) { 339 vp->v_type = VLNK; 340 } else { 341 vp->v_type = VBAD; 342 } 343 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 344 mtx_lock(&devfs_de_interlock); 345 vp->v_data = de; 346 de->de_vnode = vp; 347 mtx_unlock(&devfs_de_interlock); 348 error = insmntque1(vp, mp, devfs_insmntque_dtr, de); 349 if (error != 0) { 350 (void) devfs_allocv_drop_refs(1, dmp, de); 351 return (error); 352 } 353 if (devfs_allocv_drop_refs(0, dmp, de)) { 354 vput(vp); 355 return (ENOENT); 356 } 357 #ifdef MAC 358 mac_devfs_vnode_associate(mp, de, vp); 359 #endif 360 sx_xunlock(&dmp->dm_lock); 361 *vpp = vp; 362 return (0); 363 } 364 365 static int 366 devfs_access(struct vop_access_args *ap) 367 { 368 struct vnode *vp = ap->a_vp; 369 struct devfs_dirent *de; 370 int error; 371 372 de = vp->v_data; 373 if (vp->v_type == VDIR) 374 de = de->de_dir; 375 376 error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid, 377 ap->a_mode, ap->a_cred, NULL); 378 if (!error) 379 return (error); 380 if (error != EACCES) 381 return (error); 382 /* We do, however, allow access to the controlling terminal */ 383 if (!(ap->a_td->td_proc->p_flag & P_CONTROLT)) 384 return (error); 385 if (ap->a_td->td_proc->p_session->s_ttyvp == de->de_vnode) 386 return (0); 387 return (error); 388 } 389 390 /* ARGSUSED */ 391 static int 392 devfs_advlock(struct vop_advlock_args *ap) 393 { 394 395 return (ap->a_flags & F_FLOCK ? EOPNOTSUPP : EINVAL); 396 } 397 398 /* ARGSUSED */ 399 static int 400 devfs_close(struct vop_close_args *ap) 401 { 402 struct vnode *vp = ap->a_vp, *oldvp; 403 struct thread *td = ap->a_td; 404 struct cdev *dev = vp->v_rdev; 405 struct cdevsw *dsw; 406 int vp_locked, error; 407 408 /* 409 * Hack: a tty device that is a controlling terminal 410 * has a reference from the session structure. 411 * We cannot easily tell that a character device is 412 * a controlling terminal, unless it is the closing 413 * process' controlling terminal. In that case, 414 * if the reference count is 2 (this last descriptor 415 * plus the session), release the reference from the session. 416 */ 417 oldvp = NULL; 418 sx_xlock(&proctree_lock); 419 if (td && vp == td->td_proc->p_session->s_ttyvp) { 420 SESS_LOCK(td->td_proc->p_session); 421 VI_LOCK(vp); 422 if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) { 423 td->td_proc->p_session->s_ttyvp = NULL; 424 oldvp = vp; 425 } 426 VI_UNLOCK(vp); 427 SESS_UNLOCK(td->td_proc->p_session); 428 } 429 sx_xunlock(&proctree_lock); 430 if (oldvp != NULL) 431 vrele(oldvp); 432 /* 433 * We do not want to really close the device if it 434 * is still in use unless we are trying to close it 435 * forcibly. Since every use (buffer, vnode, swap, cmap) 436 * holds a reference to the vnode, and because we mark 437 * any other vnodes that alias this device, when the 438 * sum of the reference counts on all the aliased 439 * vnodes descends to one, we are on last close. 440 */ 441 dsw = dev_refthread(dev); 442 if (dsw == NULL) 443 return (ENXIO); 444 VI_LOCK(vp); 445 if (vp->v_iflag & VI_DOOMED) { 446 /* Forced close. */ 447 } else if (dsw->d_flags & D_TRACKCLOSE) { 448 /* Keep device updated on status. */ 449 } else if (count_dev(dev) > 1) { 450 VI_UNLOCK(vp); 451 dev_relthread(dev); 452 return (0); 453 } 454 vholdl(vp); 455 VI_UNLOCK(vp); 456 vp_locked = VOP_ISLOCKED(vp); 457 VOP_UNLOCK(vp, 0); 458 KASSERT(dev->si_refcount > 0, 459 ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev))); 460 error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td); 461 dev_relthread(dev); 462 vn_lock(vp, vp_locked | LK_RETRY); 463 vdrop(vp); 464 return (error); 465 } 466 467 static int 468 devfs_close_f(struct file *fp, struct thread *td) 469 { 470 int error; 471 472 curthread->td_fpop = fp; 473 error = vnops.fo_close(fp, td); 474 curthread->td_fpop = NULL; 475 return (error); 476 } 477 478 /* ARGSUSED */ 479 static int 480 devfs_fsync(struct vop_fsync_args *ap) 481 { 482 if (!vn_isdisk(ap->a_vp, NULL)) 483 return (0); 484 485 return (vop_stdfsync(ap)); 486 } 487 488 static int 489 devfs_getattr(struct vop_getattr_args *ap) 490 { 491 struct vnode *vp = ap->a_vp; 492 struct vattr *vap = ap->a_vap; 493 int error = 0; 494 struct devfs_dirent *de; 495 struct cdev *dev; 496 497 de = vp->v_data; 498 KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp)); 499 if (vp->v_type == VDIR) { 500 de = de->de_dir; 501 KASSERT(de != NULL, 502 ("Null dir dirent in devfs_getattr vp=%p", vp)); 503 } 504 bzero((caddr_t) vap, sizeof(*vap)); 505 vattr_null(vap); 506 vap->va_uid = de->de_uid; 507 vap->va_gid = de->de_gid; 508 vap->va_mode = de->de_mode; 509 if (vp->v_type == VLNK) 510 vap->va_size = strlen(de->de_symlink); 511 else if (vp->v_type == VDIR) 512 vap->va_size = vap->va_bytes = DEV_BSIZE; 513 else 514 vap->va_size = 0; 515 if (vp->v_type != VDIR) 516 vap->va_bytes = 0; 517 vap->va_blocksize = DEV_BSIZE; 518 vap->va_type = vp->v_type; 519 520 #define fix(aa) \ 521 do { \ 522 if ((aa).tv_sec <= 3600) { \ 523 (aa).tv_sec = boottime.tv_sec; \ 524 (aa).tv_nsec = boottime.tv_usec * 1000; \ 525 } \ 526 } while (0) 527 528 if (vp->v_type != VCHR) { 529 fix(de->de_atime); 530 vap->va_atime = de->de_atime; 531 fix(de->de_mtime); 532 vap->va_mtime = de->de_mtime; 533 fix(de->de_ctime); 534 vap->va_ctime = de->de_ctime; 535 } else { 536 dev = vp->v_rdev; 537 fix(dev->si_atime); 538 vap->va_atime = dev->si_atime; 539 fix(dev->si_mtime); 540 vap->va_mtime = dev->si_mtime; 541 fix(dev->si_ctime); 542 vap->va_ctime = dev->si_ctime; 543 544 vap->va_rdev = dev->si_priv->cdp_inode; 545 } 546 vap->va_gen = 0; 547 vap->va_flags = 0; 548 vap->va_nlink = de->de_links; 549 vap->va_fileid = de->de_inode; 550 551 return (error); 552 } 553 554 /* ARGSUSED */ 555 static int 556 devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td) 557 { 558 struct cdev *dev; 559 struct cdevsw *dsw; 560 struct vnode *vp; 561 struct vnode *vpold; 562 int error, i; 563 const char *p; 564 struct fiodgname_arg *fgn; 565 566 error = devfs_fp_check(fp, &dev, &dsw); 567 if (error) 568 return (error); 569 570 if (com == FIODTYPE) { 571 *(int *)data = dsw->d_flags & D_TYPEMASK; 572 td->td_fpop = NULL; 573 dev_relthread(dev); 574 return (0); 575 } else if (com == FIODGNAME) { 576 fgn = data; 577 p = devtoname(dev); 578 i = strlen(p) + 1; 579 if (i > fgn->len) 580 error = EINVAL; 581 else 582 error = copyout(p, fgn->buf, i); 583 td->td_fpop = NULL; 584 dev_relthread(dev); 585 return (error); 586 } 587 error = dsw->d_ioctl(dev, com, data, fp->f_flag, td); 588 td->td_fpop = NULL; 589 dev_relthread(dev); 590 if (error == ENOIOCTL) 591 error = ENOTTY; 592 if (error == 0 && com == TIOCSCTTY) { 593 vp = fp->f_vnode; 594 595 /* Do nothing if reassigning same control tty */ 596 sx_slock(&proctree_lock); 597 if (td->td_proc->p_session->s_ttyvp == vp) { 598 sx_sunlock(&proctree_lock); 599 return (0); 600 } 601 602 mtx_lock(&Giant); /* XXX TTY */ 603 604 vpold = td->td_proc->p_session->s_ttyvp; 605 VREF(vp); 606 SESS_LOCK(td->td_proc->p_session); 607 td->td_proc->p_session->s_ttyvp = vp; 608 SESS_UNLOCK(td->td_proc->p_session); 609 610 sx_sunlock(&proctree_lock); 611 612 /* Get rid of reference to old control tty */ 613 if (vpold) 614 vrele(vpold); 615 mtx_unlock(&Giant); /* XXX TTY */ 616 } 617 return (error); 618 } 619 620 /* ARGSUSED */ 621 static int 622 devfs_kqfilter_f(struct file *fp, struct knote *kn) 623 { 624 struct cdev *dev; 625 struct cdevsw *dsw; 626 int error; 627 628 error = devfs_fp_check(fp, &dev, &dsw); 629 if (error) 630 return (error); 631 error = dsw->d_kqfilter(dev, kn); 632 curthread->td_fpop = NULL; 633 dev_relthread(dev); 634 return (error); 635 } 636 637 static int 638 devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock) 639 { 640 struct componentname *cnp; 641 struct vnode *dvp, **vpp; 642 struct thread *td; 643 struct devfs_dirent *de, *dd; 644 struct devfs_dirent **dde; 645 struct devfs_mount *dmp; 646 struct cdev *cdev; 647 int error, flags, nameiop; 648 char specname[SPECNAMELEN + 1], *pname; 649 650 cnp = ap->a_cnp; 651 vpp = ap->a_vpp; 652 dvp = ap->a_dvp; 653 pname = cnp->cn_nameptr; 654 td = cnp->cn_thread; 655 flags = cnp->cn_flags; 656 nameiop = cnp->cn_nameiop; 657 dmp = VFSTODEVFS(dvp->v_mount); 658 dd = dvp->v_data; 659 *vpp = NULLVP; 660 661 if ((flags & ISLASTCN) && nameiop == RENAME) 662 return (EOPNOTSUPP); 663 664 if (dvp->v_type != VDIR) 665 return (ENOTDIR); 666 667 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT)) 668 return (EIO); 669 670 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td); 671 if (error) 672 return (error); 673 674 if (cnp->cn_namelen == 1 && *pname == '.') { 675 if ((flags & ISLASTCN) && nameiop != LOOKUP) 676 return (EINVAL); 677 *vpp = dvp; 678 VREF(dvp); 679 return (0); 680 } 681 682 if (flags & ISDOTDOT) { 683 if ((flags & ISLASTCN) && nameiop != LOOKUP) 684 return (EINVAL); 685 VOP_UNLOCK(dvp, 0); 686 de = TAILQ_FIRST(&dd->de_dlist); /* "." */ 687 de = TAILQ_NEXT(de, de_list); /* ".." */ 688 de = de->de_dir; 689 error = devfs_allocv(de, dvp->v_mount, vpp, td); 690 *dm_unlock = 0; 691 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 692 return (error); 693 } 694 695 DEVFS_DMP_HOLD(dmp); 696 devfs_populate(dmp); 697 if (DEVFS_DMP_DROP(dmp)) { 698 *dm_unlock = 0; 699 sx_xunlock(&dmp->dm_lock); 700 devfs_unmount_final(dmp); 701 return (ENOENT); 702 } 703 dd = dvp->v_data; 704 de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen); 705 while (de == NULL) { /* While(...) so we can use break */ 706 707 if (nameiop == DELETE) 708 return (ENOENT); 709 710 /* 711 * OK, we didn't have an entry for the name we were asked for 712 * so we try to see if anybody can create it on demand. 713 */ 714 pname = devfs_fqpn(specname, dvp, cnp); 715 if (pname == NULL) 716 break; 717 718 cdev = NULL; 719 DEVFS_DMP_HOLD(dmp); 720 sx_xunlock(&dmp->dm_lock); 721 sx_slock(&clone_drain_lock); 722 EVENTHANDLER_INVOKE(dev_clone, 723 td->td_ucred, pname, strlen(pname), &cdev); 724 sx_sunlock(&clone_drain_lock); 725 sx_xlock(&dmp->dm_lock); 726 if (DEVFS_DMP_DROP(dmp)) { 727 *dm_unlock = 0; 728 sx_xunlock(&dmp->dm_lock); 729 devfs_unmount_final(dmp); 730 return (ENOENT); 731 } 732 if (cdev == NULL) 733 break; 734 735 DEVFS_DMP_HOLD(dmp); 736 devfs_populate(dmp); 737 if (DEVFS_DMP_DROP(dmp)) { 738 *dm_unlock = 0; 739 sx_xunlock(&dmp->dm_lock); 740 devfs_unmount_final(dmp); 741 return (ENOENT); 742 } 743 744 dev_lock(); 745 dde = &cdev->si_priv->cdp_dirents[dmp->dm_idx]; 746 if (dde != NULL && *dde != NULL) 747 de = *dde; 748 dev_unlock(); 749 dev_rel(cdev); 750 break; 751 } 752 753 if (de == NULL || de->de_flags & DE_WHITEOUT) { 754 if ((nameiop == CREATE || nameiop == RENAME) && 755 (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) { 756 cnp->cn_flags |= SAVENAME; 757 return (EJUSTRETURN); 758 } 759 return (ENOENT); 760 } 761 762 if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) { 763 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 764 if (error) 765 return (error); 766 if (*vpp == dvp) { 767 VREF(dvp); 768 *vpp = dvp; 769 return (0); 770 } 771 } 772 error = devfs_allocv(de, dvp->v_mount, vpp, td); 773 *dm_unlock = 0; 774 return (error); 775 } 776 777 static int 778 devfs_lookup(struct vop_lookup_args *ap) 779 { 780 int j; 781 struct devfs_mount *dmp; 782 int dm_unlock; 783 784 dmp = VFSTODEVFS(ap->a_dvp->v_mount); 785 dm_unlock = 1; 786 sx_xlock(&dmp->dm_lock); 787 j = devfs_lookupx(ap, &dm_unlock); 788 if (dm_unlock == 1) 789 sx_xunlock(&dmp->dm_lock); 790 return (j); 791 } 792 793 static int 794 devfs_mknod(struct vop_mknod_args *ap) 795 { 796 struct componentname *cnp; 797 struct vnode *dvp, **vpp; 798 struct thread *td; 799 struct devfs_dirent *dd, *de; 800 struct devfs_mount *dmp; 801 int error; 802 803 /* 804 * The only type of node we should be creating here is a 805 * character device, for anything else return EOPNOTSUPP. 806 */ 807 if (ap->a_vap->va_type != VCHR) 808 return (EOPNOTSUPP); 809 dvp = ap->a_dvp; 810 dmp = VFSTODEVFS(dvp->v_mount); 811 812 cnp = ap->a_cnp; 813 vpp = ap->a_vpp; 814 td = cnp->cn_thread; 815 dd = dvp->v_data; 816 817 error = ENOENT; 818 sx_xlock(&dmp->dm_lock); 819 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 820 if (cnp->cn_namelen != de->de_dirent->d_namlen) 821 continue; 822 if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name, 823 de->de_dirent->d_namlen) != 0) 824 continue; 825 if (de->de_flags & DE_WHITEOUT) 826 break; 827 goto notfound; 828 } 829 if (de == NULL) 830 goto notfound; 831 de->de_flags &= ~DE_WHITEOUT; 832 error = devfs_allocv(de, dvp->v_mount, vpp, td); 833 return (error); 834 notfound: 835 sx_xunlock(&dmp->dm_lock); 836 return (error); 837 } 838 839 /* ARGSUSED */ 840 static int 841 devfs_open(struct vop_open_args *ap) 842 { 843 struct thread *td = ap->a_td; 844 struct vnode *vp = ap->a_vp; 845 struct cdev *dev = vp->v_rdev; 846 struct file *fp = ap->a_fp; 847 int error; 848 struct cdevsw *dsw; 849 850 if (vp->v_type == VBLK) 851 return (ENXIO); 852 853 if (dev == NULL) 854 return (ENXIO); 855 856 /* Make this field valid before any I/O in d_open. */ 857 if (dev->si_iosize_max == 0) 858 dev->si_iosize_max = DFLTPHYS; 859 860 dsw = dev_refthread(dev); 861 if (dsw == NULL) 862 return (ENXIO); 863 864 /* XXX: Special casing of ttys for deadfs. Probably redundant. */ 865 if (dsw->d_flags & D_TTY) 866 vp->v_vflag |= VV_ISTTY; 867 868 VOP_UNLOCK(vp, 0); 869 870 if (fp != NULL) { 871 td->td_fpop = fp; 872 fp->f_data = dev; 873 } 874 if (dsw->d_fdopen != NULL) 875 error = dsw->d_fdopen(dev, ap->a_mode, td, fp); 876 else 877 error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td); 878 td->td_fpop = NULL; 879 880 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 881 882 dev_relthread(dev); 883 884 if (error) 885 return (error); 886 887 #if 0 /* /dev/console */ 888 KASSERT(fp != NULL, 889 ("Could not vnode bypass device on NULL fp")); 890 #else 891 if(fp == NULL) 892 return (error); 893 #endif 894 KASSERT(fp->f_ops == &badfileops, 895 ("Could not vnode bypass device on fdops %p", fp->f_ops)); 896 finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f); 897 return (error); 898 } 899 900 static int 901 devfs_pathconf(struct vop_pathconf_args *ap) 902 { 903 904 switch (ap->a_name) { 905 case _PC_MAC_PRESENT: 906 #ifdef MAC 907 /* 908 * If MAC is enabled, devfs automatically supports 909 * trivial non-persistant label storage. 910 */ 911 *ap->a_retval = 1; 912 #else 913 *ap->a_retval = 0; 914 #endif 915 return (0); 916 default: 917 return (vop_stdpathconf(ap)); 918 } 919 /* NOTREACHED */ 920 } 921 922 /* ARGSUSED */ 923 static int 924 devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td) 925 { 926 struct cdev *dev; 927 struct cdevsw *dsw; 928 int error; 929 930 error = devfs_fp_check(fp, &dev, &dsw); 931 if (error) 932 return (error); 933 error = dsw->d_poll(dev, events, td); 934 curthread->td_fpop = NULL; 935 dev_relthread(dev); 936 return(error); 937 } 938 939 /* 940 * Print out the contents of a special device vnode. 941 */ 942 static int 943 devfs_print(struct vop_print_args *ap) 944 { 945 946 printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev)); 947 return (0); 948 } 949 950 /* ARGSUSED */ 951 static int 952 devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) 953 { 954 struct cdev *dev; 955 int ioflag, error, resid; 956 struct cdevsw *dsw; 957 958 error = devfs_fp_check(fp, &dev, &dsw); 959 if (error) 960 return (error); 961 resid = uio->uio_resid; 962 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT); 963 if (ioflag & O_DIRECT) 964 ioflag |= IO_DIRECT; 965 966 if ((flags & FOF_OFFSET) == 0) 967 uio->uio_offset = fp->f_offset; 968 969 error = dsw->d_read(dev, uio, ioflag); 970 if (uio->uio_resid != resid || (error == 0 && resid != 0)) 971 vfs_timestamp(&dev->si_atime); 972 curthread->td_fpop = NULL; 973 dev_relthread(dev); 974 975 if ((flags & FOF_OFFSET) == 0) 976 fp->f_offset = uio->uio_offset; 977 fp->f_nextoff = uio->uio_offset; 978 return (error); 979 } 980 981 static int 982 devfs_readdir(struct vop_readdir_args *ap) 983 { 984 int error; 985 struct uio *uio; 986 struct dirent *dp; 987 struct devfs_dirent *dd; 988 struct devfs_dirent *de; 989 struct devfs_mount *dmp; 990 off_t off, oldoff; 991 int *tmp_ncookies = NULL; 992 993 if (ap->a_vp->v_type != VDIR) 994 return (ENOTDIR); 995 996 uio = ap->a_uio; 997 if (uio->uio_offset < 0) 998 return (EINVAL); 999 1000 /* 1001 * XXX: This is a temporary hack to get around this filesystem not 1002 * supporting cookies. We store the location of the ncookies pointer 1003 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent() 1004 * and set the number of cookies to 0. We then set the pointer to 1005 * NULL so that vfs_read_dirent doesn't try to call realloc() on 1006 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies 1007 * pointer to its original location before returning to the caller. 1008 */ 1009 if (ap->a_ncookies != NULL) { 1010 tmp_ncookies = ap->a_ncookies; 1011 *ap->a_ncookies = 0; 1012 ap->a_ncookies = NULL; 1013 } 1014 1015 dmp = VFSTODEVFS(ap->a_vp->v_mount); 1016 sx_xlock(&dmp->dm_lock); 1017 DEVFS_DMP_HOLD(dmp); 1018 devfs_populate(dmp); 1019 if (DEVFS_DMP_DROP(dmp)) { 1020 sx_xunlock(&dmp->dm_lock); 1021 devfs_unmount_final(dmp); 1022 if (tmp_ncookies != NULL) 1023 ap->a_ncookies = tmp_ncookies; 1024 return (EIO); 1025 } 1026 error = 0; 1027 de = ap->a_vp->v_data; 1028 off = 0; 1029 oldoff = uio->uio_offset; 1030 TAILQ_FOREACH(dd, &de->de_dlist, de_list) { 1031 KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__)); 1032 if (dd->de_flags & DE_WHITEOUT) 1033 continue; 1034 if (dd->de_dirent->d_type == DT_DIR) 1035 de = dd->de_dir; 1036 else 1037 de = dd; 1038 dp = dd->de_dirent; 1039 if (dp->d_reclen > uio->uio_resid) 1040 break; 1041 dp->d_fileno = de->de_inode; 1042 if (off >= uio->uio_offset) { 1043 error = vfs_read_dirent(ap, dp, off); 1044 if (error) 1045 break; 1046 } 1047 off += dp->d_reclen; 1048 } 1049 sx_xunlock(&dmp->dm_lock); 1050 uio->uio_offset = off; 1051 1052 /* 1053 * Restore ap->a_ncookies if it wasn't originally NULL in the first 1054 * place. 1055 */ 1056 if (tmp_ncookies != NULL) 1057 ap->a_ncookies = tmp_ncookies; 1058 1059 return (error); 1060 } 1061 1062 static int 1063 devfs_readlink(struct vop_readlink_args *ap) 1064 { 1065 struct devfs_dirent *de; 1066 1067 de = ap->a_vp->v_data; 1068 return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio)); 1069 } 1070 1071 static int 1072 devfs_reclaim(struct vop_reclaim_args *ap) 1073 { 1074 struct vnode *vp = ap->a_vp; 1075 struct devfs_dirent *de; 1076 struct cdev *dev; 1077 1078 mtx_lock(&devfs_de_interlock); 1079 de = vp->v_data; 1080 if (de != NULL) { 1081 de->de_vnode = NULL; 1082 vp->v_data = NULL; 1083 } 1084 mtx_unlock(&devfs_de_interlock); 1085 1086 vnode_destroy_vobject(vp); 1087 1088 VI_LOCK(vp); 1089 dev_lock(); 1090 dev = vp->v_rdev; 1091 vp->v_rdev = NULL; 1092 1093 if (dev == NULL) { 1094 dev_unlock(); 1095 VI_UNLOCK(vp); 1096 return (0); 1097 } 1098 1099 dev->si_usecount -= vp->v_usecount; 1100 dev_unlock(); 1101 VI_UNLOCK(vp); 1102 dev_rel(dev); 1103 return (0); 1104 } 1105 1106 static int 1107 devfs_remove(struct vop_remove_args *ap) 1108 { 1109 struct vnode *vp = ap->a_vp; 1110 struct devfs_dirent *dd; 1111 struct devfs_dirent *de; 1112 struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount); 1113 1114 sx_xlock(&dmp->dm_lock); 1115 dd = ap->a_dvp->v_data; 1116 de = vp->v_data; 1117 if (de->de_cdp == NULL) { 1118 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 1119 devfs_delete(dmp, de, 1); 1120 } else { 1121 de->de_flags |= DE_WHITEOUT; 1122 } 1123 sx_xunlock(&dmp->dm_lock); 1124 return (0); 1125 } 1126 1127 /* 1128 * Revoke is called on a tty when a terminal session ends. The vnode 1129 * is orphaned by setting v_op to deadfs so we need to let go of it 1130 * as well so that we create a new one next time around. 1131 * 1132 */ 1133 static int 1134 devfs_revoke(struct vop_revoke_args *ap) 1135 { 1136 struct vnode *vp = ap->a_vp, *vp2; 1137 struct cdev *dev; 1138 struct cdev_priv *cdp; 1139 struct devfs_dirent *de; 1140 int i; 1141 1142 KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL")); 1143 1144 dev = vp->v_rdev; 1145 cdp = dev->si_priv; 1146 1147 dev_lock(); 1148 cdp->cdp_inuse++; 1149 dev_unlock(); 1150 1151 vhold(vp); 1152 vgone(vp); 1153 vdrop(vp); 1154 1155 VOP_UNLOCK(vp,0); 1156 loop: 1157 for (;;) { 1158 mtx_lock(&devfs_de_interlock); 1159 dev_lock(); 1160 vp2 = NULL; 1161 for (i = 0; i <= cdp->cdp_maxdirent; i++) { 1162 de = cdp->cdp_dirents[i]; 1163 if (de == NULL) 1164 continue; 1165 1166 vp2 = de->de_vnode; 1167 if (vp2 != NULL) { 1168 dev_unlock(); 1169 VI_LOCK(vp2); 1170 mtx_unlock(&devfs_de_interlock); 1171 if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, 1172 curthread)) 1173 goto loop; 1174 vhold(vp2); 1175 vgone(vp2); 1176 vdrop(vp2); 1177 vput(vp2); 1178 break; 1179 } 1180 } 1181 if (vp2 != NULL) { 1182 continue; 1183 } 1184 dev_unlock(); 1185 mtx_unlock(&devfs_de_interlock); 1186 break; 1187 } 1188 dev_lock(); 1189 cdp->cdp_inuse--; 1190 if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) { 1191 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 1192 dev_unlock(); 1193 dev_rel(&cdp->cdp_c); 1194 } else 1195 dev_unlock(); 1196 1197 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1198 return (0); 1199 } 1200 1201 static int 1202 devfs_rioctl(struct vop_ioctl_args *ap) 1203 { 1204 int error; 1205 struct devfs_mount *dmp; 1206 1207 dmp = VFSTODEVFS(ap->a_vp->v_mount); 1208 sx_xlock(&dmp->dm_lock); 1209 DEVFS_DMP_HOLD(dmp); 1210 devfs_populate(dmp); 1211 if (DEVFS_DMP_DROP(dmp)) { 1212 sx_xunlock(&dmp->dm_lock); 1213 devfs_unmount_final(dmp); 1214 return (ENOENT); 1215 } 1216 error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td); 1217 sx_xunlock(&dmp->dm_lock); 1218 return (error); 1219 } 1220 1221 static int 1222 devfs_rread(struct vop_read_args *ap) 1223 { 1224 1225 if (ap->a_vp->v_type != VDIR) 1226 return (EINVAL); 1227 return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL)); 1228 } 1229 1230 static int 1231 devfs_setattr(struct vop_setattr_args *ap) 1232 { 1233 struct devfs_dirent *de; 1234 struct vattr *vap; 1235 struct vnode *vp; 1236 int c, error; 1237 uid_t uid; 1238 gid_t gid; 1239 1240 vap = ap->a_vap; 1241 vp = ap->a_vp; 1242 if ((vap->va_type != VNON) || 1243 (vap->va_nlink != VNOVAL) || 1244 (vap->va_fsid != VNOVAL) || 1245 (vap->va_fileid != VNOVAL) || 1246 (vap->va_blocksize != VNOVAL) || 1247 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1248 (vap->va_rdev != VNOVAL) || 1249 ((int)vap->va_bytes != VNOVAL) || 1250 (vap->va_gen != VNOVAL)) { 1251 return (EINVAL); 1252 } 1253 1254 de = vp->v_data; 1255 if (vp->v_type == VDIR) 1256 de = de->de_dir; 1257 1258 error = c = 0; 1259 if (vap->va_uid == (uid_t)VNOVAL) 1260 uid = de->de_uid; 1261 else 1262 uid = vap->va_uid; 1263 if (vap->va_gid == (gid_t)VNOVAL) 1264 gid = de->de_gid; 1265 else 1266 gid = vap->va_gid; 1267 if (uid != de->de_uid || gid != de->de_gid) { 1268 if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid || 1269 (gid != de->de_gid && !groupmember(gid, ap->a_cred))) { 1270 error = priv_check(ap->a_td, PRIV_VFS_CHOWN); 1271 if (error) 1272 return (error); 1273 } 1274 de->de_uid = uid; 1275 de->de_gid = gid; 1276 c = 1; 1277 } 1278 1279 if (vap->va_mode != (mode_t)VNOVAL) { 1280 if (ap->a_cred->cr_uid != de->de_uid) { 1281 error = priv_check(ap->a_td, PRIV_VFS_ADMIN); 1282 if (error) 1283 return (error); 1284 } 1285 de->de_mode = vap->va_mode; 1286 c = 1; 1287 } 1288 1289 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1290 /* See the comment in ufs_vnops::ufs_setattr(). */ 1291 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td)) && 1292 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1293 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, ap->a_td)))) 1294 return (error); 1295 if (vap->va_atime.tv_sec != VNOVAL) { 1296 if (vp->v_type == VCHR) 1297 vp->v_rdev->si_atime = vap->va_atime; 1298 else 1299 de->de_atime = vap->va_atime; 1300 } 1301 if (vap->va_mtime.tv_sec != VNOVAL) { 1302 if (vp->v_type == VCHR) 1303 vp->v_rdev->si_mtime = vap->va_mtime; 1304 else 1305 de->de_mtime = vap->va_mtime; 1306 } 1307 c = 1; 1308 } 1309 1310 if (c) { 1311 if (vp->v_type == VCHR) 1312 vfs_timestamp(&vp->v_rdev->si_ctime); 1313 else 1314 vfs_timestamp(&de->de_mtime); 1315 } 1316 return (0); 1317 } 1318 1319 #ifdef MAC 1320 static int 1321 devfs_setlabel(struct vop_setlabel_args *ap) 1322 { 1323 struct vnode *vp; 1324 struct devfs_dirent *de; 1325 1326 vp = ap->a_vp; 1327 de = vp->v_data; 1328 1329 mac_vnode_relabel(ap->a_cred, vp, ap->a_label); 1330 mac_devfs_update(vp->v_mount, de, vp); 1331 1332 return (0); 1333 } 1334 #endif 1335 1336 static int 1337 devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td) 1338 { 1339 1340 return (vnops.fo_stat(fp, sb, cred, td)); 1341 } 1342 1343 static int 1344 devfs_symlink(struct vop_symlink_args *ap) 1345 { 1346 int i, error; 1347 struct devfs_dirent *dd; 1348 struct devfs_dirent *de; 1349 struct devfs_mount *dmp; 1350 struct thread *td; 1351 1352 td = ap->a_cnp->cn_thread; 1353 KASSERT(td == curthread, ("devfs_symlink: td != curthread")); 1354 1355 error = priv_check(td, PRIV_DEVFS_SYMLINK); 1356 if (error) 1357 return(error); 1358 dmp = VFSTODEVFS(ap->a_dvp->v_mount); 1359 dd = ap->a_dvp->v_data; 1360 de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen); 1361 de->de_uid = 0; 1362 de->de_gid = 0; 1363 de->de_mode = 0755; 1364 de->de_inode = alloc_unr(devfs_inos); 1365 de->de_dirent->d_type = DT_LNK; 1366 i = strlen(ap->a_target) + 1; 1367 de->de_symlink = malloc(i, M_DEVFS, M_WAITOK); 1368 bcopy(ap->a_target, de->de_symlink, i); 1369 sx_xlock(&dmp->dm_lock); 1370 #ifdef MAC 1371 mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de); 1372 #endif 1373 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 1374 return (devfs_allocv(de, ap->a_dvp->v_mount, ap->a_vpp, td)); 1375 } 1376 1377 static int 1378 devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td) 1379 { 1380 1381 return (vnops.fo_truncate(fp, length, cred, td)); 1382 } 1383 1384 /* ARGSUSED */ 1385 static int 1386 devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) 1387 { 1388 struct cdev *dev; 1389 int error, ioflag, resid; 1390 struct cdevsw *dsw; 1391 1392 error = devfs_fp_check(fp, &dev, &dsw); 1393 if (error) 1394 return (error); 1395 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td)); 1396 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC); 1397 if (ioflag & O_DIRECT) 1398 ioflag |= IO_DIRECT; 1399 if ((flags & FOF_OFFSET) == 0) 1400 uio->uio_offset = fp->f_offset; 1401 1402 resid = uio->uio_resid; 1403 1404 error = dsw->d_write(dev, uio, ioflag); 1405 if (uio->uio_resid != resid || (error == 0 && resid != 0)) { 1406 vfs_timestamp(&dev->si_ctime); 1407 dev->si_mtime = dev->si_ctime; 1408 } 1409 curthread->td_fpop = NULL; 1410 dev_relthread(dev); 1411 1412 if ((flags & FOF_OFFSET) == 0) 1413 fp->f_offset = uio->uio_offset; 1414 fp->f_nextoff = uio->uio_offset; 1415 return (error); 1416 } 1417 1418 dev_t 1419 dev2udev(struct cdev *x) 1420 { 1421 if (x == NULL) 1422 return (NODEV); 1423 return (x->si_priv->cdp_inode); 1424 } 1425 1426 static struct fileops devfs_ops_f = { 1427 .fo_read = devfs_read_f, 1428 .fo_write = devfs_write_f, 1429 .fo_truncate = devfs_truncate_f, 1430 .fo_ioctl = devfs_ioctl_f, 1431 .fo_poll = devfs_poll_f, 1432 .fo_kqfilter = devfs_kqfilter_f, 1433 .fo_stat = devfs_stat_f, 1434 .fo_close = devfs_close_f, 1435 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 1436 }; 1437 1438 static struct vop_vector devfs_vnodeops = { 1439 .vop_default = &default_vnodeops, 1440 1441 .vop_access = devfs_access, 1442 .vop_getattr = devfs_getattr, 1443 .vop_ioctl = devfs_rioctl, 1444 .vop_lookup = devfs_lookup, 1445 .vop_mknod = devfs_mknod, 1446 .vop_pathconf = devfs_pathconf, 1447 .vop_read = devfs_rread, 1448 .vop_readdir = devfs_readdir, 1449 .vop_readlink = devfs_readlink, 1450 .vop_reclaim = devfs_reclaim, 1451 .vop_remove = devfs_remove, 1452 .vop_revoke = devfs_revoke, 1453 .vop_setattr = devfs_setattr, 1454 #ifdef MAC 1455 .vop_setlabel = devfs_setlabel, 1456 #endif 1457 .vop_symlink = devfs_symlink, 1458 }; 1459 1460 static struct vop_vector devfs_specops = { 1461 .vop_default = &default_vnodeops, 1462 1463 .vop_access = devfs_access, 1464 .vop_advlock = devfs_advlock, 1465 .vop_bmap = VOP_PANIC, 1466 .vop_close = devfs_close, 1467 .vop_create = VOP_PANIC, 1468 .vop_fsync = devfs_fsync, 1469 .vop_getattr = devfs_getattr, 1470 .vop_lease = VOP_NULL, 1471 .vop_link = VOP_PANIC, 1472 .vop_mkdir = VOP_PANIC, 1473 .vop_mknod = VOP_PANIC, 1474 .vop_open = devfs_open, 1475 .vop_pathconf = devfs_pathconf, 1476 .vop_print = devfs_print, 1477 .vop_read = VOP_PANIC, 1478 .vop_readdir = VOP_PANIC, 1479 .vop_readlink = VOP_PANIC, 1480 .vop_reallocblks = VOP_PANIC, 1481 .vop_reclaim = devfs_reclaim, 1482 .vop_remove = devfs_remove, 1483 .vop_rename = VOP_PANIC, 1484 .vop_revoke = devfs_revoke, 1485 .vop_rmdir = VOP_PANIC, 1486 .vop_setattr = devfs_setattr, 1487 #ifdef MAC 1488 .vop_setlabel = devfs_setlabel, 1489 #endif 1490 .vop_strategy = VOP_PANIC, 1491 .vop_symlink = VOP_PANIC, 1492 .vop_write = VOP_PANIC, 1493 }; 1494 1495 /* 1496 * Our calling convention to the device drivers used to be that we passed 1497 * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_ 1498 * flags instead since that's what open(), close() and ioctl() takes and 1499 * we don't really want vnode.h in device drivers. 1500 * We solved the source compatibility by redefining some vnode flags to 1501 * be the same as the fcntl ones and by sending down the bitwise OR of 1502 * the respective fcntl/vnode flags. These CTASSERTS make sure nobody 1503 * pulls the rug out under this. 1504 */ 1505 CTASSERT(O_NONBLOCK == IO_NDELAY); 1506 CTASSERT(O_FSYNC == IO_SYNC); 1507