1 /*- 2 * Copyright (c) 2000-2004 3 * Poul-Henning Kamp. All rights reserved. 4 * Copyright (c) 1989, 1992-1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software donated to Berkeley by 8 * Jan-Simon Pendry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kernfs_vnops.c 8.15 (Berkeley) 5/21/95 32 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43 33 * 34 * $FreeBSD$ 35 */ 36 37 /* 38 * TODO: 39 * remove empty directories 40 * mkdir: want it ? 41 */ 42 43 #include "opt_mac.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/conf.h> 48 #include <sys/dirent.h> 49 #include <sys/fcntl.h> 50 #include <sys/file.h> 51 #include <sys/filedesc.h> 52 #include <sys/filio.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mount.h> 57 #include <sys/namei.h> 58 #include <sys/priv.h> 59 #include <sys/proc.h> 60 #include <sys/stat.h> 61 #include <sys/sx.h> 62 #include <sys/time.h> 63 #include <sys/ttycom.h> 64 #include <sys/unistd.h> 65 #include <sys/vnode.h> 66 67 static struct vop_vector devfs_vnodeops; 68 static struct vop_vector devfs_specops; 69 static struct fileops devfs_ops_f; 70 71 #include <fs/devfs/devfs.h> 72 #include <fs/devfs/devfs_int.h> 73 74 #include <security/mac/mac_framework.h> 75 76 static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data"); 77 78 struct mtx devfs_de_interlock; 79 MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF); 80 struct sx clone_drain_lock; 81 SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock"); 82 struct mtx cdevpriv_mtx; 83 MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF); 84 85 static int 86 devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp) 87 { 88 89 *dswp = devvn_refthread(fp->f_vnode, devp); 90 if (*devp != fp->f_data) { 91 if (*dswp != NULL) 92 dev_relthread(*devp); 93 return (ENXIO); 94 } 95 KASSERT((*devp)->si_refcount > 0, 96 ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp))); 97 if (*dswp == NULL) 98 return (ENXIO); 99 curthread->td_fpop = fp; 100 return (0); 101 } 102 103 int 104 devfs_get_cdevpriv(void **datap) 105 { 106 struct file *fp; 107 struct cdev_privdata *p; 108 int error; 109 110 fp = curthread->td_fpop; 111 if (fp == NULL) 112 return (EBADF); 113 p = fp->f_cdevpriv; 114 if (p != NULL) { 115 error = 0; 116 *datap = p->cdpd_data; 117 } else 118 error = ENOENT; 119 return (error); 120 } 121 122 int 123 devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t priv_dtr) 124 { 125 struct file *fp; 126 struct cdev_priv *cdp; 127 struct cdev_privdata *p; 128 int error; 129 130 fp = curthread->td_fpop; 131 if (fp == NULL) 132 return (ENOENT); 133 cdp = cdev2priv((struct cdev *)fp->f_data); 134 p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK); 135 p->cdpd_data = priv; 136 p->cdpd_dtr = priv_dtr; 137 p->cdpd_fp = fp; 138 mtx_lock(&cdevpriv_mtx); 139 if (fp->f_cdevpriv == NULL) { 140 LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list); 141 fp->f_cdevpriv = p; 142 mtx_unlock(&cdevpriv_mtx); 143 error = 0; 144 } else { 145 mtx_unlock(&cdevpriv_mtx); 146 free(p, M_CDEVPDATA); 147 error = EBUSY; 148 } 149 return (error); 150 } 151 152 void 153 devfs_destroy_cdevpriv(struct cdev_privdata *p) 154 { 155 156 mtx_assert(&cdevpriv_mtx, MA_OWNED); 157 p->cdpd_fp->f_cdevpriv = NULL; 158 LIST_REMOVE(p, cdpd_list); 159 mtx_unlock(&cdevpriv_mtx); 160 (p->cdpd_dtr)(p->cdpd_data); 161 free(p, M_CDEVPDATA); 162 } 163 164 void 165 devfs_fpdrop(struct file *fp) 166 { 167 struct cdev_privdata *p; 168 169 mtx_lock(&cdevpriv_mtx); 170 if ((p = fp->f_cdevpriv) == NULL) { 171 mtx_unlock(&cdevpriv_mtx); 172 return; 173 } 174 devfs_destroy_cdevpriv(p); 175 } 176 177 void 178 devfs_clear_cdevpriv(void) 179 { 180 struct file *fp; 181 182 fp = curthread->td_fpop; 183 if (fp == NULL) 184 return; 185 devfs_fpdrop(fp); 186 } 187 188 static int 189 devfs_vptocnp(struct vop_vptocnp_args *ap) 190 { 191 struct vnode *vp = ap->a_vp; 192 struct vnode **dvp = ap->a_vpp; 193 struct devfs_mount *dmp; 194 char *buf = ap->a_buf; 195 int *buflen = ap->a_buflen; 196 struct devfs_dirent *dd, *de; 197 int i, error; 198 199 dmp = VFSTODEVFS(vp->v_mount); 200 i = *buflen; 201 dd = vp->v_data; 202 error = 0; 203 204 sx_xlock(&dmp->dm_lock); 205 206 if (vp->v_type == VCHR) { 207 i -= strlen(dd->de_cdp->cdp_c.si_name); 208 if (i < 0) { 209 error = ENOMEM; 210 goto finished; 211 } 212 bcopy(dd->de_cdp->cdp_c.si_name, buf + i, 213 strlen(dd->de_cdp->cdp_c.si_name)); 214 de = dd->de_dir; 215 } else if (vp->v_type == VDIR) { 216 if (dd == dmp->dm_rootdir) { 217 *dvp = vp; 218 vhold(*dvp); 219 goto finished; 220 } 221 i -= dd->de_dirent->d_namlen; 222 if (i < 0) { 223 error = ENOMEM; 224 goto finished; 225 } 226 bcopy(dd->de_dirent->d_name, buf + i, 227 dd->de_dirent->d_namlen); 228 de = dd; 229 } else { 230 error = ENOENT; 231 goto finished; 232 } 233 *buflen = i; 234 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 235 de = TAILQ_NEXT(de, de_list); /* ".." */ 236 de = de->de_dir; 237 mtx_lock(&devfs_de_interlock); 238 *dvp = de->de_vnode; 239 if (*dvp != NULL) { 240 VI_LOCK(*dvp); 241 mtx_unlock(&devfs_de_interlock); 242 vholdl(*dvp); 243 VI_UNLOCK(*dvp); 244 } else { 245 mtx_unlock(&devfs_de_interlock); 246 error = ENOENT; 247 } 248 finished: 249 sx_xunlock(&dmp->dm_lock); 250 return (error); 251 } 252 253 /* 254 * Construct the fully qualified path name relative to the mountpoint 255 */ 256 static char * 257 devfs_fqpn(char *buf, struct vnode *dvp, struct componentname *cnp) 258 { 259 int i; 260 struct devfs_dirent *de, *dd; 261 struct devfs_mount *dmp; 262 263 dmp = VFSTODEVFS(dvp->v_mount); 264 dd = dvp->v_data; 265 i = SPECNAMELEN; 266 buf[i] = '\0'; 267 i -= cnp->cn_namelen; 268 if (i < 0) 269 return (NULL); 270 bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen); 271 de = dd; 272 while (de != dmp->dm_rootdir) { 273 i--; 274 if (i < 0) 275 return (NULL); 276 buf[i] = '/'; 277 i -= de->de_dirent->d_namlen; 278 if (i < 0) 279 return (NULL); 280 bcopy(de->de_dirent->d_name, buf + i, 281 de->de_dirent->d_namlen); 282 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 283 de = TAILQ_NEXT(de, de_list); /* ".." */ 284 de = de->de_dir; 285 } 286 return (buf + i); 287 } 288 289 static int 290 devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp, 291 struct devfs_dirent *de) 292 { 293 int not_found; 294 295 not_found = 0; 296 if (de->de_flags & DE_DOOMED) 297 not_found = 1; 298 if (DEVFS_DE_DROP(de)) { 299 KASSERT(not_found == 1, ("DEVFS de dropped but not doomed")); 300 devfs_dirent_free(de); 301 } 302 if (DEVFS_DMP_DROP(dmp)) { 303 KASSERT(not_found == 1, 304 ("DEVFS mount struct freed before dirent")); 305 not_found = 2; 306 sx_xunlock(&dmp->dm_lock); 307 devfs_unmount_final(dmp); 308 } 309 if (not_found == 1 || (drop_dm_lock && not_found != 2)) 310 sx_unlock(&dmp->dm_lock); 311 return (not_found); 312 } 313 314 static void 315 devfs_insmntque_dtr(struct vnode *vp, void *arg) 316 { 317 struct devfs_dirent *de; 318 319 de = (struct devfs_dirent *)arg; 320 mtx_lock(&devfs_de_interlock); 321 vp->v_data = NULL; 322 de->de_vnode = NULL; 323 mtx_unlock(&devfs_de_interlock); 324 vgone(vp); 325 vput(vp); 326 } 327 328 /* 329 * devfs_allocv shall be entered with dmp->dm_lock held, and it drops 330 * it on return. 331 */ 332 int 333 devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp, struct thread *td) 334 { 335 int error; 336 struct vnode *vp; 337 struct cdev *dev; 338 struct devfs_mount *dmp; 339 340 KASSERT(td == curthread, ("devfs_allocv: td != curthread")); 341 dmp = VFSTODEVFS(mp); 342 if (de->de_flags & DE_DOOMED) { 343 sx_xunlock(&dmp->dm_lock); 344 return (ENOENT); 345 } 346 DEVFS_DE_HOLD(de); 347 DEVFS_DMP_HOLD(dmp); 348 mtx_lock(&devfs_de_interlock); 349 vp = de->de_vnode; 350 if (vp != NULL) { 351 VI_LOCK(vp); 352 mtx_unlock(&devfs_de_interlock); 353 sx_xunlock(&dmp->dm_lock); 354 error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td); 355 sx_xlock(&dmp->dm_lock); 356 if (devfs_allocv_drop_refs(0, dmp, de)) { 357 if (error == 0) 358 vput(vp); 359 return (ENOENT); 360 } 361 else if (error) { 362 sx_xunlock(&dmp->dm_lock); 363 return (error); 364 } 365 sx_xunlock(&dmp->dm_lock); 366 *vpp = vp; 367 return (0); 368 } 369 mtx_unlock(&devfs_de_interlock); 370 if (de->de_dirent->d_type == DT_CHR) { 371 if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) { 372 devfs_allocv_drop_refs(1, dmp, de); 373 return (ENOENT); 374 } 375 dev = &de->de_cdp->cdp_c; 376 } else { 377 dev = NULL; 378 } 379 error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp); 380 if (error != 0) { 381 devfs_allocv_drop_refs(1, dmp, de); 382 printf("devfs_allocv: failed to allocate new vnode\n"); 383 return (error); 384 } 385 386 if (de->de_dirent->d_type == DT_CHR) { 387 vp->v_type = VCHR; 388 VI_LOCK(vp); 389 dev_lock(); 390 dev_refl(dev); 391 /* XXX: v_rdev should be protect by vnode lock */ 392 vp->v_rdev = dev; 393 KASSERT(vp->v_usecount == 1, 394 ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount)); 395 dev->si_usecount += vp->v_usecount; 396 dev_unlock(); 397 VI_UNLOCK(vp); 398 vp->v_op = &devfs_specops; 399 } else if (de->de_dirent->d_type == DT_DIR) { 400 vp->v_type = VDIR; 401 } else if (de->de_dirent->d_type == DT_LNK) { 402 vp->v_type = VLNK; 403 } else { 404 vp->v_type = VBAD; 405 } 406 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS); 407 mtx_lock(&devfs_de_interlock); 408 vp->v_data = de; 409 de->de_vnode = vp; 410 mtx_unlock(&devfs_de_interlock); 411 error = insmntque1(vp, mp, devfs_insmntque_dtr, de); 412 if (error != 0) { 413 (void) devfs_allocv_drop_refs(1, dmp, de); 414 return (error); 415 } 416 if (devfs_allocv_drop_refs(0, dmp, de)) { 417 vput(vp); 418 return (ENOENT); 419 } 420 #ifdef MAC 421 mac_devfs_vnode_associate(mp, de, vp); 422 #endif 423 sx_xunlock(&dmp->dm_lock); 424 *vpp = vp; 425 return (0); 426 } 427 428 static int 429 devfs_access(struct vop_access_args *ap) 430 { 431 struct vnode *vp = ap->a_vp; 432 struct devfs_dirent *de; 433 int error; 434 435 de = vp->v_data; 436 if (vp->v_type == VDIR) 437 de = de->de_dir; 438 439 error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid, 440 ap->a_accmode, ap->a_cred, NULL); 441 if (!error) 442 return (error); 443 if (error != EACCES) 444 return (error); 445 /* We do, however, allow access to the controlling terminal */ 446 if (!(ap->a_td->td_proc->p_flag & P_CONTROLT)) 447 return (error); 448 if (ap->a_td->td_proc->p_session->s_ttyvp == de->de_vnode) 449 return (0); 450 return (error); 451 } 452 453 /* ARGSUSED */ 454 static int 455 devfs_advlock(struct vop_advlock_args *ap) 456 { 457 458 return (ap->a_flags & F_FLOCK ? EOPNOTSUPP : EINVAL); 459 } 460 461 /* ARGSUSED */ 462 static int 463 devfs_close(struct vop_close_args *ap) 464 { 465 struct vnode *vp = ap->a_vp, *oldvp; 466 struct thread *td = ap->a_td; 467 struct cdev *dev = vp->v_rdev; 468 struct cdevsw *dsw; 469 int vp_locked, error; 470 471 /* 472 * Hack: a tty device that is a controlling terminal 473 * has a reference from the session structure. 474 * We cannot easily tell that a character device is 475 * a controlling terminal, unless it is the closing 476 * process' controlling terminal. In that case, 477 * if the reference count is 2 (this last descriptor 478 * plus the session), release the reference from the session. 479 */ 480 oldvp = NULL; 481 sx_xlock(&proctree_lock); 482 if (td && vp == td->td_proc->p_session->s_ttyvp) { 483 SESS_LOCK(td->td_proc->p_session); 484 VI_LOCK(vp); 485 if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) { 486 td->td_proc->p_session->s_ttyvp = NULL; 487 oldvp = vp; 488 } 489 VI_UNLOCK(vp); 490 SESS_UNLOCK(td->td_proc->p_session); 491 } 492 sx_xunlock(&proctree_lock); 493 if (oldvp != NULL) 494 vrele(oldvp); 495 /* 496 * We do not want to really close the device if it 497 * is still in use unless we are trying to close it 498 * forcibly. Since every use (buffer, vnode, swap, cmap) 499 * holds a reference to the vnode, and because we mark 500 * any other vnodes that alias this device, when the 501 * sum of the reference counts on all the aliased 502 * vnodes descends to one, we are on last close. 503 */ 504 dsw = dev_refthread(dev); 505 if (dsw == NULL) 506 return (ENXIO); 507 VI_LOCK(vp); 508 if (vp->v_iflag & VI_DOOMED) { 509 /* Forced close. */ 510 } else if (dsw->d_flags & D_TRACKCLOSE) { 511 /* Keep device updated on status. */ 512 } else if (count_dev(dev) > 1) { 513 VI_UNLOCK(vp); 514 dev_relthread(dev); 515 return (0); 516 } 517 vholdl(vp); 518 VI_UNLOCK(vp); 519 vp_locked = VOP_ISLOCKED(vp); 520 VOP_UNLOCK(vp, 0); 521 KASSERT(dev->si_refcount > 0, 522 ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev))); 523 error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td); 524 dev_relthread(dev); 525 vn_lock(vp, vp_locked | LK_RETRY); 526 vdrop(vp); 527 return (error); 528 } 529 530 static int 531 devfs_close_f(struct file *fp, struct thread *td) 532 { 533 int error; 534 struct file *fpop; 535 536 fpop = td->td_fpop; 537 td->td_fpop = fp; 538 error = vnops.fo_close(fp, td); 539 td->td_fpop = fpop; 540 return (error); 541 } 542 543 /* ARGSUSED */ 544 static int 545 devfs_fsync(struct vop_fsync_args *ap) 546 { 547 if (!vn_isdisk(ap->a_vp, NULL)) 548 return (0); 549 550 return (vop_stdfsync(ap)); 551 } 552 553 static int 554 devfs_getattr(struct vop_getattr_args *ap) 555 { 556 struct vnode *vp = ap->a_vp; 557 struct vattr *vap = ap->a_vap; 558 int error = 0; 559 struct devfs_dirent *de; 560 struct cdev *dev; 561 562 de = vp->v_data; 563 KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp)); 564 if (vp->v_type == VDIR) { 565 de = de->de_dir; 566 KASSERT(de != NULL, 567 ("Null dir dirent in devfs_getattr vp=%p", vp)); 568 } 569 vap->va_uid = de->de_uid; 570 vap->va_gid = de->de_gid; 571 vap->va_mode = de->de_mode; 572 if (vp->v_type == VLNK) 573 vap->va_size = strlen(de->de_symlink); 574 else if (vp->v_type == VDIR) 575 vap->va_size = vap->va_bytes = DEV_BSIZE; 576 else 577 vap->va_size = 0; 578 if (vp->v_type != VDIR) 579 vap->va_bytes = 0; 580 vap->va_blocksize = DEV_BSIZE; 581 vap->va_type = vp->v_type; 582 583 #define fix(aa) \ 584 do { \ 585 if ((aa).tv_sec <= 3600) { \ 586 (aa).tv_sec = boottime.tv_sec; \ 587 (aa).tv_nsec = boottime.tv_usec * 1000; \ 588 } \ 589 } while (0) 590 591 if (vp->v_type != VCHR) { 592 fix(de->de_atime); 593 vap->va_atime = de->de_atime; 594 fix(de->de_mtime); 595 vap->va_mtime = de->de_mtime; 596 fix(de->de_ctime); 597 vap->va_ctime = de->de_ctime; 598 } else { 599 dev = vp->v_rdev; 600 fix(dev->si_atime); 601 vap->va_atime = dev->si_atime; 602 fix(dev->si_mtime); 603 vap->va_mtime = dev->si_mtime; 604 fix(dev->si_ctime); 605 vap->va_ctime = dev->si_ctime; 606 607 vap->va_rdev = cdev2priv(dev)->cdp_inode; 608 } 609 vap->va_gen = 0; 610 vap->va_flags = 0; 611 vap->va_filerev = 0; 612 vap->va_nlink = de->de_links; 613 vap->va_fileid = de->de_inode; 614 615 return (error); 616 } 617 618 /* ARGSUSED */ 619 static int 620 devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td) 621 { 622 struct cdev *dev; 623 struct cdevsw *dsw; 624 struct vnode *vp; 625 struct vnode *vpold; 626 int error, i; 627 const char *p; 628 struct fiodgname_arg *fgn; 629 struct file *fpop; 630 631 fpop = td->td_fpop; 632 error = devfs_fp_check(fp, &dev, &dsw); 633 if (error) 634 return (error); 635 636 if (com == FIODTYPE) { 637 *(int *)data = dsw->d_flags & D_TYPEMASK; 638 td->td_fpop = fpop; 639 dev_relthread(dev); 640 return (0); 641 } else if (com == FIODGNAME) { 642 fgn = data; 643 p = devtoname(dev); 644 i = strlen(p) + 1; 645 if (i > fgn->len) 646 error = EINVAL; 647 else 648 error = copyout(p, fgn->buf, i); 649 td->td_fpop = fpop; 650 dev_relthread(dev); 651 return (error); 652 } 653 error = dsw->d_ioctl(dev, com, data, fp->f_flag, td); 654 td->td_fpop = NULL; 655 dev_relthread(dev); 656 if (error == ENOIOCTL) 657 error = ENOTTY; 658 if (error == 0 && com == TIOCSCTTY) { 659 vp = fp->f_vnode; 660 661 /* Do nothing if reassigning same control tty */ 662 sx_slock(&proctree_lock); 663 if (td->td_proc->p_session->s_ttyvp == vp) { 664 sx_sunlock(&proctree_lock); 665 return (0); 666 } 667 668 vpold = td->td_proc->p_session->s_ttyvp; 669 VREF(vp); 670 SESS_LOCK(td->td_proc->p_session); 671 td->td_proc->p_session->s_ttyvp = vp; 672 SESS_UNLOCK(td->td_proc->p_session); 673 674 sx_sunlock(&proctree_lock); 675 676 /* Get rid of reference to old control tty */ 677 if (vpold) 678 vrele(vpold); 679 } 680 return (error); 681 } 682 683 /* ARGSUSED */ 684 static int 685 devfs_kqfilter_f(struct file *fp, struct knote *kn) 686 { 687 struct cdev *dev; 688 struct cdevsw *dsw; 689 int error; 690 struct file *fpop; 691 struct thread *td; 692 693 td = curthread; 694 fpop = td->td_fpop; 695 error = devfs_fp_check(fp, &dev, &dsw); 696 if (error) 697 return (error); 698 error = dsw->d_kqfilter(dev, kn); 699 td->td_fpop = fpop; 700 dev_relthread(dev); 701 return (error); 702 } 703 704 static int 705 devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock) 706 { 707 struct componentname *cnp; 708 struct vnode *dvp, **vpp; 709 struct thread *td; 710 struct devfs_dirent *de, *dd; 711 struct devfs_dirent **dde; 712 struct devfs_mount *dmp; 713 struct cdev *cdev; 714 int error, flags, nameiop; 715 char specname[SPECNAMELEN + 1], *pname; 716 717 cnp = ap->a_cnp; 718 vpp = ap->a_vpp; 719 dvp = ap->a_dvp; 720 pname = cnp->cn_nameptr; 721 td = cnp->cn_thread; 722 flags = cnp->cn_flags; 723 nameiop = cnp->cn_nameiop; 724 dmp = VFSTODEVFS(dvp->v_mount); 725 dd = dvp->v_data; 726 *vpp = NULLVP; 727 728 if ((flags & ISLASTCN) && nameiop == RENAME) 729 return (EOPNOTSUPP); 730 731 if (dvp->v_type != VDIR) 732 return (ENOTDIR); 733 734 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT)) 735 return (EIO); 736 737 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td); 738 if (error) 739 return (error); 740 741 if (cnp->cn_namelen == 1 && *pname == '.') { 742 if ((flags & ISLASTCN) && nameiop != LOOKUP) 743 return (EINVAL); 744 *vpp = dvp; 745 VREF(dvp); 746 return (0); 747 } 748 749 if (flags & ISDOTDOT) { 750 if ((flags & ISLASTCN) && nameiop != LOOKUP) 751 return (EINVAL); 752 VOP_UNLOCK(dvp, 0); 753 de = TAILQ_FIRST(&dd->de_dlist); /* "." */ 754 de = TAILQ_NEXT(de, de_list); /* ".." */ 755 de = de->de_dir; 756 error = devfs_allocv(de, dvp->v_mount, vpp, td); 757 *dm_unlock = 0; 758 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 759 return (error); 760 } 761 762 DEVFS_DMP_HOLD(dmp); 763 devfs_populate(dmp); 764 if (DEVFS_DMP_DROP(dmp)) { 765 *dm_unlock = 0; 766 sx_xunlock(&dmp->dm_lock); 767 devfs_unmount_final(dmp); 768 return (ENOENT); 769 } 770 dd = dvp->v_data; 771 de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen); 772 while (de == NULL) { /* While(...) so we can use break */ 773 774 if (nameiop == DELETE) 775 return (ENOENT); 776 777 /* 778 * OK, we didn't have an entry for the name we were asked for 779 * so we try to see if anybody can create it on demand. 780 */ 781 pname = devfs_fqpn(specname, dvp, cnp); 782 if (pname == NULL) 783 break; 784 785 cdev = NULL; 786 DEVFS_DMP_HOLD(dmp); 787 sx_xunlock(&dmp->dm_lock); 788 sx_slock(&clone_drain_lock); 789 EVENTHANDLER_INVOKE(dev_clone, 790 td->td_ucred, pname, strlen(pname), &cdev); 791 sx_sunlock(&clone_drain_lock); 792 sx_xlock(&dmp->dm_lock); 793 if (DEVFS_DMP_DROP(dmp)) { 794 *dm_unlock = 0; 795 sx_xunlock(&dmp->dm_lock); 796 devfs_unmount_final(dmp); 797 return (ENOENT); 798 } 799 if (cdev == NULL) 800 break; 801 802 DEVFS_DMP_HOLD(dmp); 803 devfs_populate(dmp); 804 if (DEVFS_DMP_DROP(dmp)) { 805 *dm_unlock = 0; 806 sx_xunlock(&dmp->dm_lock); 807 devfs_unmount_final(dmp); 808 return (ENOENT); 809 } 810 811 dev_lock(); 812 dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx]; 813 if (dde != NULL && *dde != NULL) 814 de = *dde; 815 dev_unlock(); 816 dev_rel(cdev); 817 break; 818 } 819 820 if (de == NULL || de->de_flags & DE_WHITEOUT) { 821 if ((nameiop == CREATE || nameiop == RENAME) && 822 (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) { 823 cnp->cn_flags |= SAVENAME; 824 return (EJUSTRETURN); 825 } 826 return (ENOENT); 827 } 828 829 if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) { 830 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 831 if (error) 832 return (error); 833 if (*vpp == dvp) { 834 VREF(dvp); 835 *vpp = dvp; 836 return (0); 837 } 838 } 839 error = devfs_allocv(de, dvp->v_mount, vpp, td); 840 *dm_unlock = 0; 841 return (error); 842 } 843 844 static int 845 devfs_lookup(struct vop_lookup_args *ap) 846 { 847 int j; 848 struct devfs_mount *dmp; 849 int dm_unlock; 850 851 dmp = VFSTODEVFS(ap->a_dvp->v_mount); 852 dm_unlock = 1; 853 sx_xlock(&dmp->dm_lock); 854 j = devfs_lookupx(ap, &dm_unlock); 855 if (dm_unlock == 1) 856 sx_xunlock(&dmp->dm_lock); 857 return (j); 858 } 859 860 static int 861 devfs_mknod(struct vop_mknod_args *ap) 862 { 863 struct componentname *cnp; 864 struct vnode *dvp, **vpp; 865 struct thread *td; 866 struct devfs_dirent *dd, *de; 867 struct devfs_mount *dmp; 868 int error; 869 870 /* 871 * The only type of node we should be creating here is a 872 * character device, for anything else return EOPNOTSUPP. 873 */ 874 if (ap->a_vap->va_type != VCHR) 875 return (EOPNOTSUPP); 876 dvp = ap->a_dvp; 877 dmp = VFSTODEVFS(dvp->v_mount); 878 879 cnp = ap->a_cnp; 880 vpp = ap->a_vpp; 881 td = cnp->cn_thread; 882 dd = dvp->v_data; 883 884 error = ENOENT; 885 sx_xlock(&dmp->dm_lock); 886 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 887 if (cnp->cn_namelen != de->de_dirent->d_namlen) 888 continue; 889 if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name, 890 de->de_dirent->d_namlen) != 0) 891 continue; 892 if (de->de_flags & DE_WHITEOUT) 893 break; 894 goto notfound; 895 } 896 if (de == NULL) 897 goto notfound; 898 de->de_flags &= ~DE_WHITEOUT; 899 error = devfs_allocv(de, dvp->v_mount, vpp, td); 900 return (error); 901 notfound: 902 sx_xunlock(&dmp->dm_lock); 903 return (error); 904 } 905 906 /* ARGSUSED */ 907 static int 908 devfs_open(struct vop_open_args *ap) 909 { 910 struct thread *td = ap->a_td; 911 struct vnode *vp = ap->a_vp; 912 struct cdev *dev = vp->v_rdev; 913 struct file *fp = ap->a_fp; 914 int error; 915 struct cdevsw *dsw; 916 struct file *fpop; 917 918 if (vp->v_type == VBLK) 919 return (ENXIO); 920 921 if (dev == NULL) 922 return (ENXIO); 923 924 /* Make this field valid before any I/O in d_open. */ 925 if (dev->si_iosize_max == 0) 926 dev->si_iosize_max = DFLTPHYS; 927 928 dsw = dev_refthread(dev); 929 if (dsw == NULL) 930 return (ENXIO); 931 932 /* XXX: Special casing of ttys for deadfs. Probably redundant. */ 933 if (dsw->d_flags & D_TTY) 934 vp->v_vflag |= VV_ISTTY; 935 936 VOP_UNLOCK(vp, 0); 937 938 fpop = td->td_fpop; 939 td->td_fpop = fp; 940 if (fp != NULL) 941 fp->f_data = dev; 942 if (dsw->d_fdopen != NULL) 943 error = dsw->d_fdopen(dev, ap->a_mode, td, fp); 944 else 945 error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td); 946 td->td_fpop = fpop; 947 948 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 949 950 dev_relthread(dev); 951 952 if (error) 953 return (error); 954 955 #if 0 /* /dev/console */ 956 KASSERT(fp != NULL, 957 ("Could not vnode bypass device on NULL fp")); 958 #else 959 if(fp == NULL) 960 return (error); 961 #endif 962 if (fp->f_ops == &badfileops) 963 finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f); 964 return (error); 965 } 966 967 static int 968 devfs_pathconf(struct vop_pathconf_args *ap) 969 { 970 971 switch (ap->a_name) { 972 case _PC_MAC_PRESENT: 973 #ifdef MAC 974 /* 975 * If MAC is enabled, devfs automatically supports 976 * trivial non-persistant label storage. 977 */ 978 *ap->a_retval = 1; 979 #else 980 *ap->a_retval = 0; 981 #endif 982 return (0); 983 default: 984 return (vop_stdpathconf(ap)); 985 } 986 /* NOTREACHED */ 987 } 988 989 /* ARGSUSED */ 990 static int 991 devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td) 992 { 993 struct cdev *dev; 994 struct cdevsw *dsw; 995 int error; 996 struct file *fpop; 997 998 fpop = td->td_fpop; 999 error = devfs_fp_check(fp, &dev, &dsw); 1000 if (error) 1001 return (error); 1002 error = dsw->d_poll(dev, events, td); 1003 td->td_fpop = fpop; 1004 dev_relthread(dev); 1005 return(error); 1006 } 1007 1008 /* 1009 * Print out the contents of a special device vnode. 1010 */ 1011 static int 1012 devfs_print(struct vop_print_args *ap) 1013 { 1014 1015 printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev)); 1016 return (0); 1017 } 1018 1019 /* ARGSUSED */ 1020 static int 1021 devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) 1022 { 1023 struct cdev *dev; 1024 int ioflag, error, resid; 1025 struct cdevsw *dsw; 1026 struct file *fpop; 1027 1028 fpop = td->td_fpop; 1029 error = devfs_fp_check(fp, &dev, &dsw); 1030 if (error) 1031 return (error); 1032 resid = uio->uio_resid; 1033 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT); 1034 if (ioflag & O_DIRECT) 1035 ioflag |= IO_DIRECT; 1036 1037 if ((flags & FOF_OFFSET) == 0) 1038 uio->uio_offset = fp->f_offset; 1039 1040 error = dsw->d_read(dev, uio, ioflag); 1041 if (uio->uio_resid != resid || (error == 0 && resid != 0)) 1042 vfs_timestamp(&dev->si_atime); 1043 td->td_fpop = fpop; 1044 dev_relthread(dev); 1045 1046 if ((flags & FOF_OFFSET) == 0) 1047 fp->f_offset = uio->uio_offset; 1048 fp->f_nextoff = uio->uio_offset; 1049 return (error); 1050 } 1051 1052 static int 1053 devfs_readdir(struct vop_readdir_args *ap) 1054 { 1055 int error; 1056 struct uio *uio; 1057 struct dirent *dp; 1058 struct devfs_dirent *dd; 1059 struct devfs_dirent *de; 1060 struct devfs_mount *dmp; 1061 off_t off, oldoff; 1062 int *tmp_ncookies = NULL; 1063 1064 if (ap->a_vp->v_type != VDIR) 1065 return (ENOTDIR); 1066 1067 uio = ap->a_uio; 1068 if (uio->uio_offset < 0) 1069 return (EINVAL); 1070 1071 /* 1072 * XXX: This is a temporary hack to get around this filesystem not 1073 * supporting cookies. We store the location of the ncookies pointer 1074 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent() 1075 * and set the number of cookies to 0. We then set the pointer to 1076 * NULL so that vfs_read_dirent doesn't try to call realloc() on 1077 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies 1078 * pointer to its original location before returning to the caller. 1079 */ 1080 if (ap->a_ncookies != NULL) { 1081 tmp_ncookies = ap->a_ncookies; 1082 *ap->a_ncookies = 0; 1083 ap->a_ncookies = NULL; 1084 } 1085 1086 dmp = VFSTODEVFS(ap->a_vp->v_mount); 1087 sx_xlock(&dmp->dm_lock); 1088 DEVFS_DMP_HOLD(dmp); 1089 devfs_populate(dmp); 1090 if (DEVFS_DMP_DROP(dmp)) { 1091 sx_xunlock(&dmp->dm_lock); 1092 devfs_unmount_final(dmp); 1093 if (tmp_ncookies != NULL) 1094 ap->a_ncookies = tmp_ncookies; 1095 return (EIO); 1096 } 1097 error = 0; 1098 de = ap->a_vp->v_data; 1099 off = 0; 1100 oldoff = uio->uio_offset; 1101 TAILQ_FOREACH(dd, &de->de_dlist, de_list) { 1102 KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__)); 1103 if (dd->de_flags & DE_WHITEOUT) 1104 continue; 1105 if (dd->de_dirent->d_type == DT_DIR) 1106 de = dd->de_dir; 1107 else 1108 de = dd; 1109 dp = dd->de_dirent; 1110 if (dp->d_reclen > uio->uio_resid) 1111 break; 1112 dp->d_fileno = de->de_inode; 1113 if (off >= uio->uio_offset) { 1114 error = vfs_read_dirent(ap, dp, off); 1115 if (error) 1116 break; 1117 } 1118 off += dp->d_reclen; 1119 } 1120 sx_xunlock(&dmp->dm_lock); 1121 uio->uio_offset = off; 1122 1123 /* 1124 * Restore ap->a_ncookies if it wasn't originally NULL in the first 1125 * place. 1126 */ 1127 if (tmp_ncookies != NULL) 1128 ap->a_ncookies = tmp_ncookies; 1129 1130 return (error); 1131 } 1132 1133 static int 1134 devfs_readlink(struct vop_readlink_args *ap) 1135 { 1136 struct devfs_dirent *de; 1137 1138 de = ap->a_vp->v_data; 1139 return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio)); 1140 } 1141 1142 static int 1143 devfs_reclaim(struct vop_reclaim_args *ap) 1144 { 1145 struct vnode *vp = ap->a_vp; 1146 struct devfs_dirent *de; 1147 struct cdev *dev; 1148 1149 mtx_lock(&devfs_de_interlock); 1150 de = vp->v_data; 1151 if (de != NULL) { 1152 de->de_vnode = NULL; 1153 vp->v_data = NULL; 1154 } 1155 mtx_unlock(&devfs_de_interlock); 1156 1157 vnode_destroy_vobject(vp); 1158 1159 VI_LOCK(vp); 1160 dev_lock(); 1161 dev = vp->v_rdev; 1162 vp->v_rdev = NULL; 1163 1164 if (dev == NULL) { 1165 dev_unlock(); 1166 VI_UNLOCK(vp); 1167 return (0); 1168 } 1169 1170 dev->si_usecount -= vp->v_usecount; 1171 dev_unlock(); 1172 VI_UNLOCK(vp); 1173 dev_rel(dev); 1174 return (0); 1175 } 1176 1177 static int 1178 devfs_remove(struct vop_remove_args *ap) 1179 { 1180 struct vnode *vp = ap->a_vp; 1181 struct devfs_dirent *dd; 1182 struct devfs_dirent *de; 1183 struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount); 1184 1185 sx_xlock(&dmp->dm_lock); 1186 dd = ap->a_dvp->v_data; 1187 de = vp->v_data; 1188 if (de->de_cdp == NULL) { 1189 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 1190 devfs_delete(dmp, de, 1); 1191 } else { 1192 de->de_flags |= DE_WHITEOUT; 1193 } 1194 sx_xunlock(&dmp->dm_lock); 1195 return (0); 1196 } 1197 1198 /* 1199 * Revoke is called on a tty when a terminal session ends. The vnode 1200 * is orphaned by setting v_op to deadfs so we need to let go of it 1201 * as well so that we create a new one next time around. 1202 * 1203 */ 1204 static int 1205 devfs_revoke(struct vop_revoke_args *ap) 1206 { 1207 struct vnode *vp = ap->a_vp, *vp2; 1208 struct cdev *dev; 1209 struct cdev_priv *cdp; 1210 struct devfs_dirent *de; 1211 int i; 1212 1213 KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL")); 1214 1215 dev = vp->v_rdev; 1216 cdp = cdev2priv(dev); 1217 1218 dev_lock(); 1219 cdp->cdp_inuse++; 1220 dev_unlock(); 1221 1222 vhold(vp); 1223 vgone(vp); 1224 vdrop(vp); 1225 1226 VOP_UNLOCK(vp,0); 1227 loop: 1228 for (;;) { 1229 mtx_lock(&devfs_de_interlock); 1230 dev_lock(); 1231 vp2 = NULL; 1232 for (i = 0; i <= cdp->cdp_maxdirent; i++) { 1233 de = cdp->cdp_dirents[i]; 1234 if (de == NULL) 1235 continue; 1236 1237 vp2 = de->de_vnode; 1238 if (vp2 != NULL) { 1239 dev_unlock(); 1240 VI_LOCK(vp2); 1241 mtx_unlock(&devfs_de_interlock); 1242 if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, 1243 curthread)) 1244 goto loop; 1245 vhold(vp2); 1246 vgone(vp2); 1247 vdrop(vp2); 1248 vput(vp2); 1249 break; 1250 } 1251 } 1252 if (vp2 != NULL) { 1253 continue; 1254 } 1255 dev_unlock(); 1256 mtx_unlock(&devfs_de_interlock); 1257 break; 1258 } 1259 dev_lock(); 1260 cdp->cdp_inuse--; 1261 if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) { 1262 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 1263 dev_unlock(); 1264 dev_rel(&cdp->cdp_c); 1265 } else 1266 dev_unlock(); 1267 1268 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1269 return (0); 1270 } 1271 1272 static int 1273 devfs_rioctl(struct vop_ioctl_args *ap) 1274 { 1275 int error; 1276 struct devfs_mount *dmp; 1277 1278 dmp = VFSTODEVFS(ap->a_vp->v_mount); 1279 sx_xlock(&dmp->dm_lock); 1280 DEVFS_DMP_HOLD(dmp); 1281 devfs_populate(dmp); 1282 if (DEVFS_DMP_DROP(dmp)) { 1283 sx_xunlock(&dmp->dm_lock); 1284 devfs_unmount_final(dmp); 1285 return (ENOENT); 1286 } 1287 error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td); 1288 sx_xunlock(&dmp->dm_lock); 1289 return (error); 1290 } 1291 1292 static int 1293 devfs_rread(struct vop_read_args *ap) 1294 { 1295 1296 if (ap->a_vp->v_type != VDIR) 1297 return (EINVAL); 1298 return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL)); 1299 } 1300 1301 static int 1302 devfs_setattr(struct vop_setattr_args *ap) 1303 { 1304 struct devfs_dirent *de; 1305 struct vattr *vap; 1306 struct vnode *vp; 1307 struct thread *td; 1308 int c, error; 1309 uid_t uid; 1310 gid_t gid; 1311 1312 vap = ap->a_vap; 1313 vp = ap->a_vp; 1314 td = curthread; 1315 if ((vap->va_type != VNON) || 1316 (vap->va_nlink != VNOVAL) || 1317 (vap->va_fsid != VNOVAL) || 1318 (vap->va_fileid != VNOVAL) || 1319 (vap->va_blocksize != VNOVAL) || 1320 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1321 (vap->va_rdev != VNOVAL) || 1322 ((int)vap->va_bytes != VNOVAL) || 1323 (vap->va_gen != VNOVAL)) { 1324 return (EINVAL); 1325 } 1326 1327 de = vp->v_data; 1328 if (vp->v_type == VDIR) 1329 de = de->de_dir; 1330 1331 error = c = 0; 1332 if (vap->va_uid == (uid_t)VNOVAL) 1333 uid = de->de_uid; 1334 else 1335 uid = vap->va_uid; 1336 if (vap->va_gid == (gid_t)VNOVAL) 1337 gid = de->de_gid; 1338 else 1339 gid = vap->va_gid; 1340 if (uid != de->de_uid || gid != de->de_gid) { 1341 if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid || 1342 (gid != de->de_gid && !groupmember(gid, ap->a_cred))) { 1343 error = priv_check(td, PRIV_VFS_CHOWN); 1344 if (error) 1345 return (error); 1346 } 1347 de->de_uid = uid; 1348 de->de_gid = gid; 1349 c = 1; 1350 } 1351 1352 if (vap->va_mode != (mode_t)VNOVAL) { 1353 if (ap->a_cred->cr_uid != de->de_uid) { 1354 error = priv_check(td, PRIV_VFS_ADMIN); 1355 if (error) 1356 return (error); 1357 } 1358 de->de_mode = vap->va_mode; 1359 c = 1; 1360 } 1361 1362 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1363 /* See the comment in ufs_vnops::ufs_setattr(). */ 1364 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) && 1365 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1366 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td)))) 1367 return (error); 1368 if (vap->va_atime.tv_sec != VNOVAL) { 1369 if (vp->v_type == VCHR) 1370 vp->v_rdev->si_atime = vap->va_atime; 1371 else 1372 de->de_atime = vap->va_atime; 1373 } 1374 if (vap->va_mtime.tv_sec != VNOVAL) { 1375 if (vp->v_type == VCHR) 1376 vp->v_rdev->si_mtime = vap->va_mtime; 1377 else 1378 de->de_mtime = vap->va_mtime; 1379 } 1380 c = 1; 1381 } 1382 1383 if (c) { 1384 if (vp->v_type == VCHR) 1385 vfs_timestamp(&vp->v_rdev->si_ctime); 1386 else 1387 vfs_timestamp(&de->de_mtime); 1388 } 1389 return (0); 1390 } 1391 1392 #ifdef MAC 1393 static int 1394 devfs_setlabel(struct vop_setlabel_args *ap) 1395 { 1396 struct vnode *vp; 1397 struct devfs_dirent *de; 1398 1399 vp = ap->a_vp; 1400 de = vp->v_data; 1401 1402 mac_vnode_relabel(ap->a_cred, vp, ap->a_label); 1403 mac_devfs_update(vp->v_mount, de, vp); 1404 1405 return (0); 1406 } 1407 #endif 1408 1409 static int 1410 devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td) 1411 { 1412 1413 return (vnops.fo_stat(fp, sb, cred, td)); 1414 } 1415 1416 static int 1417 devfs_symlink(struct vop_symlink_args *ap) 1418 { 1419 int i, error; 1420 struct devfs_dirent *dd; 1421 struct devfs_dirent *de; 1422 struct devfs_mount *dmp; 1423 struct thread *td; 1424 1425 td = ap->a_cnp->cn_thread; 1426 KASSERT(td == curthread, ("devfs_symlink: td != curthread")); 1427 1428 error = priv_check(td, PRIV_DEVFS_SYMLINK); 1429 if (error) 1430 return(error); 1431 dmp = VFSTODEVFS(ap->a_dvp->v_mount); 1432 dd = ap->a_dvp->v_data; 1433 de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen); 1434 de->de_uid = 0; 1435 de->de_gid = 0; 1436 de->de_mode = 0755; 1437 de->de_inode = alloc_unr(devfs_inos); 1438 de->de_dirent->d_type = DT_LNK; 1439 i = strlen(ap->a_target) + 1; 1440 de->de_symlink = malloc(i, M_DEVFS, M_WAITOK); 1441 bcopy(ap->a_target, de->de_symlink, i); 1442 sx_xlock(&dmp->dm_lock); 1443 #ifdef MAC 1444 mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de); 1445 #endif 1446 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 1447 return (devfs_allocv(de, ap->a_dvp->v_mount, ap->a_vpp, td)); 1448 } 1449 1450 static int 1451 devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td) 1452 { 1453 1454 return (vnops.fo_truncate(fp, length, cred, td)); 1455 } 1456 1457 /* ARGSUSED */ 1458 static int 1459 devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) 1460 { 1461 struct cdev *dev; 1462 int error, ioflag, resid; 1463 struct cdevsw *dsw; 1464 struct file *fpop; 1465 1466 fpop = td->td_fpop; 1467 error = devfs_fp_check(fp, &dev, &dsw); 1468 if (error) 1469 return (error); 1470 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td)); 1471 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC); 1472 if (ioflag & O_DIRECT) 1473 ioflag |= IO_DIRECT; 1474 if ((flags & FOF_OFFSET) == 0) 1475 uio->uio_offset = fp->f_offset; 1476 1477 resid = uio->uio_resid; 1478 1479 error = dsw->d_write(dev, uio, ioflag); 1480 if (uio->uio_resid != resid || (error == 0 && resid != 0)) { 1481 vfs_timestamp(&dev->si_ctime); 1482 dev->si_mtime = dev->si_ctime; 1483 } 1484 td->td_fpop = fpop; 1485 dev_relthread(dev); 1486 1487 if ((flags & FOF_OFFSET) == 0) 1488 fp->f_offset = uio->uio_offset; 1489 fp->f_nextoff = uio->uio_offset; 1490 return (error); 1491 } 1492 1493 dev_t 1494 dev2udev(struct cdev *x) 1495 { 1496 if (x == NULL) 1497 return (NODEV); 1498 return (cdev2priv(x)->cdp_inode); 1499 } 1500 1501 static struct fileops devfs_ops_f = { 1502 .fo_read = devfs_read_f, 1503 .fo_write = devfs_write_f, 1504 .fo_truncate = devfs_truncate_f, 1505 .fo_ioctl = devfs_ioctl_f, 1506 .fo_poll = devfs_poll_f, 1507 .fo_kqfilter = devfs_kqfilter_f, 1508 .fo_stat = devfs_stat_f, 1509 .fo_close = devfs_close_f, 1510 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 1511 }; 1512 1513 static struct vop_vector devfs_vnodeops = { 1514 .vop_default = &default_vnodeops, 1515 1516 .vop_access = devfs_access, 1517 .vop_getattr = devfs_getattr, 1518 .vop_ioctl = devfs_rioctl, 1519 .vop_lookup = devfs_lookup, 1520 .vop_mknod = devfs_mknod, 1521 .vop_pathconf = devfs_pathconf, 1522 .vop_read = devfs_rread, 1523 .vop_readdir = devfs_readdir, 1524 .vop_readlink = devfs_readlink, 1525 .vop_reclaim = devfs_reclaim, 1526 .vop_remove = devfs_remove, 1527 .vop_revoke = devfs_revoke, 1528 .vop_setattr = devfs_setattr, 1529 #ifdef MAC 1530 .vop_setlabel = devfs_setlabel, 1531 #endif 1532 .vop_symlink = devfs_symlink, 1533 .vop_vptocnp = devfs_vptocnp, 1534 }; 1535 1536 static struct vop_vector devfs_specops = { 1537 .vop_default = &default_vnodeops, 1538 1539 .vop_access = devfs_access, 1540 .vop_advlock = devfs_advlock, 1541 .vop_bmap = VOP_PANIC, 1542 .vop_close = devfs_close, 1543 .vop_create = VOP_PANIC, 1544 .vop_fsync = devfs_fsync, 1545 .vop_getattr = devfs_getattr, 1546 .vop_lease = VOP_NULL, 1547 .vop_link = VOP_PANIC, 1548 .vop_mkdir = VOP_PANIC, 1549 .vop_mknod = VOP_PANIC, 1550 .vop_open = devfs_open, 1551 .vop_pathconf = devfs_pathconf, 1552 .vop_print = devfs_print, 1553 .vop_read = VOP_PANIC, 1554 .vop_readdir = VOP_PANIC, 1555 .vop_readlink = VOP_PANIC, 1556 .vop_reallocblks = VOP_PANIC, 1557 .vop_reclaim = devfs_reclaim, 1558 .vop_remove = devfs_remove, 1559 .vop_rename = VOP_PANIC, 1560 .vop_revoke = devfs_revoke, 1561 .vop_rmdir = VOP_PANIC, 1562 .vop_setattr = devfs_setattr, 1563 #ifdef MAC 1564 .vop_setlabel = devfs_setlabel, 1565 #endif 1566 .vop_strategy = VOP_PANIC, 1567 .vop_symlink = VOP_PANIC, 1568 .vop_vptocnp = devfs_vptocnp, 1569 .vop_write = VOP_PANIC, 1570 }; 1571 1572 /* 1573 * Our calling convention to the device drivers used to be that we passed 1574 * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_ 1575 * flags instead since that's what open(), close() and ioctl() takes and 1576 * we don't really want vnode.h in device drivers. 1577 * We solved the source compatibility by redefining some vnode flags to 1578 * be the same as the fcntl ones and by sending down the bitwise OR of 1579 * the respective fcntl/vnode flags. These CTASSERTS make sure nobody 1580 * pulls the rug out under this. 1581 */ 1582 CTASSERT(O_NONBLOCK == IO_NDELAY); 1583 CTASSERT(O_FSYNC == IO_SYNC); 1584