1 /*- 2 * Copyright (c) 2000-2004 3 * Poul-Henning Kamp. All rights reserved. 4 * Copyright (c) 1989, 1992-1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software donated to Berkeley by 8 * Jan-Simon Pendry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kernfs_vnops.c 8.15 (Berkeley) 5/21/95 32 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43 33 * 34 * $FreeBSD$ 35 */ 36 37 /* 38 * TODO: 39 * remove empty directories 40 * mkdir: want it ? 41 */ 42 43 #include "opt_mac.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/conf.h> 48 #include <sys/dirent.h> 49 #include <sys/fcntl.h> 50 #include <sys/file.h> 51 #include <sys/filedesc.h> 52 #include <sys/filio.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mount.h> 57 #include <sys/namei.h> 58 #include <sys/priv.h> 59 #include <sys/proc.h> 60 #include <sys/stat.h> 61 #include <sys/sx.h> 62 #include <sys/time.h> 63 #include <sys/ttycom.h> 64 #include <sys/unistd.h> 65 #include <sys/vnode.h> 66 67 static struct vop_vector devfs_vnodeops; 68 static struct vop_vector devfs_specops; 69 static struct fileops devfs_ops_f; 70 71 #include <fs/devfs/devfs.h> 72 #include <fs/devfs/devfs_int.h> 73 74 #include <security/mac/mac_framework.h> 75 76 static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data"); 77 78 struct mtx devfs_de_interlock; 79 MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF); 80 struct sx clone_drain_lock; 81 SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock"); 82 struct mtx cdevpriv_mtx; 83 MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF); 84 85 static int 86 devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp) 87 { 88 89 *dswp = devvn_refthread(fp->f_vnode, devp); 90 if (*devp != fp->f_data) { 91 if (*dswp != NULL) 92 dev_relthread(*devp); 93 return (ENXIO); 94 } 95 KASSERT((*devp)->si_refcount > 0, 96 ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp))); 97 if (*dswp == NULL) 98 return (ENXIO); 99 curthread->td_fpop = fp; 100 return (0); 101 } 102 103 int 104 devfs_get_cdevpriv(void **datap) 105 { 106 struct file *fp; 107 struct cdev_privdata *p; 108 int error; 109 110 fp = curthread->td_fpop; 111 if (fp == NULL) 112 return (EBADF); 113 p = fp->f_cdevpriv; 114 if (p != NULL) { 115 error = 0; 116 *datap = p->cdpd_data; 117 } else 118 error = ENOENT; 119 return (error); 120 } 121 122 int 123 devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t priv_dtr) 124 { 125 struct file *fp; 126 struct cdev_priv *cdp; 127 struct cdev_privdata *p; 128 int error; 129 130 fp = curthread->td_fpop; 131 if (fp == NULL) 132 return (ENOENT); 133 cdp = cdev2priv((struct cdev *)fp->f_data); 134 p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK); 135 p->cdpd_data = priv; 136 p->cdpd_dtr = priv_dtr; 137 p->cdpd_fp = fp; 138 mtx_lock(&cdevpriv_mtx); 139 if (fp->f_cdevpriv == NULL) { 140 LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list); 141 fp->f_cdevpriv = p; 142 mtx_unlock(&cdevpriv_mtx); 143 error = 0; 144 } else { 145 mtx_unlock(&cdevpriv_mtx); 146 free(p, M_CDEVPDATA); 147 error = EBUSY; 148 } 149 return (error); 150 } 151 152 void 153 devfs_destroy_cdevpriv(struct cdev_privdata *p) 154 { 155 156 mtx_assert(&cdevpriv_mtx, MA_OWNED); 157 p->cdpd_fp->f_cdevpriv = NULL; 158 LIST_REMOVE(p, cdpd_list); 159 mtx_unlock(&cdevpriv_mtx); 160 (p->cdpd_dtr)(p->cdpd_data); 161 free(p, M_CDEVPDATA); 162 } 163 164 void 165 devfs_fpdrop(struct file *fp) 166 { 167 struct cdev_privdata *p; 168 169 mtx_lock(&cdevpriv_mtx); 170 if ((p = fp->f_cdevpriv) == NULL) { 171 mtx_unlock(&cdevpriv_mtx); 172 return; 173 } 174 devfs_destroy_cdevpriv(p); 175 } 176 177 void 178 devfs_clear_cdevpriv(void) 179 { 180 struct file *fp; 181 182 fp = curthread->td_fpop; 183 if (fp == NULL) 184 return; 185 devfs_fpdrop(fp); 186 } 187 188 static int 189 devfs_vptocnp(struct vop_vptocnp_args *ap) 190 { 191 struct vnode *vp = ap->a_vp; 192 struct vnode **dvp = ap->a_vpp; 193 struct devfs_mount *dmp; 194 char *buf = ap->a_buf; 195 int *buflen = ap->a_buflen; 196 struct devfs_dirent *dd, *de; 197 int i, error; 198 199 dmp = VFSTODEVFS(vp->v_mount); 200 i = *buflen; 201 dd = vp->v_data; 202 error = 0; 203 204 sx_xlock(&dmp->dm_lock); 205 206 if (vp->v_type == VCHR) { 207 i -= strlen(dd->de_cdp->cdp_c.si_name); 208 if (i < 0) { 209 error = ENOMEM; 210 goto finished; 211 } 212 bcopy(dd->de_cdp->cdp_c.si_name, buf + i, 213 strlen(dd->de_cdp->cdp_c.si_name)); 214 de = dd->de_dir; 215 } else if (vp->v_type == VDIR) { 216 if (dd == dmp->dm_rootdir) { 217 *dvp = vp; 218 vhold(*dvp); 219 goto finished; 220 } 221 i -= dd->de_dirent->d_namlen; 222 if (i < 0) { 223 error = ENOMEM; 224 goto finished; 225 } 226 bcopy(dd->de_dirent->d_name, buf + i, 227 dd->de_dirent->d_namlen); 228 de = dd; 229 } else { 230 error = ENOENT; 231 goto finished; 232 } 233 *buflen = i; 234 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 235 de = TAILQ_NEXT(de, de_list); /* ".." */ 236 de = de->de_dir; 237 mtx_lock(&devfs_de_interlock); 238 *dvp = de->de_vnode; 239 if (*dvp != NULL) { 240 VI_LOCK(*dvp); 241 mtx_unlock(&devfs_de_interlock); 242 vholdl(*dvp); 243 VI_UNLOCK(*dvp); 244 } else { 245 mtx_unlock(&devfs_de_interlock); 246 error = ENOENT; 247 } 248 finished: 249 sx_xunlock(&dmp->dm_lock); 250 return (error); 251 } 252 253 /* 254 * Construct the fully qualified path name relative to the mountpoint 255 */ 256 static char * 257 devfs_fqpn(char *buf, struct vnode *dvp, struct componentname *cnp) 258 { 259 int i; 260 struct devfs_dirent *de, *dd; 261 struct devfs_mount *dmp; 262 263 dmp = VFSTODEVFS(dvp->v_mount); 264 dd = dvp->v_data; 265 i = SPECNAMELEN; 266 buf[i] = '\0'; 267 i -= cnp->cn_namelen; 268 if (i < 0) 269 return (NULL); 270 bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen); 271 de = dd; 272 while (de != dmp->dm_rootdir) { 273 i--; 274 if (i < 0) 275 return (NULL); 276 buf[i] = '/'; 277 i -= de->de_dirent->d_namlen; 278 if (i < 0) 279 return (NULL); 280 bcopy(de->de_dirent->d_name, buf + i, 281 de->de_dirent->d_namlen); 282 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 283 de = TAILQ_NEXT(de, de_list); /* ".." */ 284 de = de->de_dir; 285 } 286 return (buf + i); 287 } 288 289 static int 290 devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp, 291 struct devfs_dirent *de) 292 { 293 int not_found; 294 295 not_found = 0; 296 if (de->de_flags & DE_DOOMED) 297 not_found = 1; 298 if (DEVFS_DE_DROP(de)) { 299 KASSERT(not_found == 1, ("DEVFS de dropped but not doomed")); 300 devfs_dirent_free(de); 301 } 302 if (DEVFS_DMP_DROP(dmp)) { 303 KASSERT(not_found == 1, 304 ("DEVFS mount struct freed before dirent")); 305 not_found = 2; 306 sx_xunlock(&dmp->dm_lock); 307 devfs_unmount_final(dmp); 308 } 309 if (not_found == 1 || (drop_dm_lock && not_found != 2)) 310 sx_unlock(&dmp->dm_lock); 311 return (not_found); 312 } 313 314 static void 315 devfs_insmntque_dtr(struct vnode *vp, void *arg) 316 { 317 struct devfs_dirent *de; 318 319 de = (struct devfs_dirent *)arg; 320 mtx_lock(&devfs_de_interlock); 321 vp->v_data = NULL; 322 de->de_vnode = NULL; 323 mtx_unlock(&devfs_de_interlock); 324 vgone(vp); 325 vput(vp); 326 } 327 328 /* 329 * devfs_allocv shall be entered with dmp->dm_lock held, and it drops 330 * it on return. 331 */ 332 int 333 devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp) 334 { 335 int error; 336 struct vnode *vp; 337 struct cdev *dev; 338 struct devfs_mount *dmp; 339 340 dmp = VFSTODEVFS(mp); 341 if (de->de_flags & DE_DOOMED) { 342 sx_xunlock(&dmp->dm_lock); 343 return (ENOENT); 344 } 345 DEVFS_DE_HOLD(de); 346 DEVFS_DMP_HOLD(dmp); 347 mtx_lock(&devfs_de_interlock); 348 vp = de->de_vnode; 349 if (vp != NULL) { 350 VI_LOCK(vp); 351 mtx_unlock(&devfs_de_interlock); 352 sx_xunlock(&dmp->dm_lock); 353 error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread); 354 sx_xlock(&dmp->dm_lock); 355 if (devfs_allocv_drop_refs(0, dmp, de)) { 356 if (error == 0) 357 vput(vp); 358 return (ENOENT); 359 } 360 else if (error) { 361 sx_xunlock(&dmp->dm_lock); 362 return (error); 363 } 364 sx_xunlock(&dmp->dm_lock); 365 *vpp = vp; 366 return (0); 367 } 368 mtx_unlock(&devfs_de_interlock); 369 if (de->de_dirent->d_type == DT_CHR) { 370 if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) { 371 devfs_allocv_drop_refs(1, dmp, de); 372 return (ENOENT); 373 } 374 dev = &de->de_cdp->cdp_c; 375 } else { 376 dev = NULL; 377 } 378 error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp); 379 if (error != 0) { 380 devfs_allocv_drop_refs(1, dmp, de); 381 printf("devfs_allocv: failed to allocate new vnode\n"); 382 return (error); 383 } 384 385 if (de->de_dirent->d_type == DT_CHR) { 386 vp->v_type = VCHR; 387 VI_LOCK(vp); 388 dev_lock(); 389 dev_refl(dev); 390 /* XXX: v_rdev should be protect by vnode lock */ 391 vp->v_rdev = dev; 392 KASSERT(vp->v_usecount == 1, 393 ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount)); 394 dev->si_usecount += vp->v_usecount; 395 dev_unlock(); 396 VI_UNLOCK(vp); 397 vp->v_op = &devfs_specops; 398 } else if (de->de_dirent->d_type == DT_DIR) { 399 vp->v_type = VDIR; 400 } else if (de->de_dirent->d_type == DT_LNK) { 401 vp->v_type = VLNK; 402 } else { 403 vp->v_type = VBAD; 404 } 405 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS); 406 mtx_lock(&devfs_de_interlock); 407 vp->v_data = de; 408 de->de_vnode = vp; 409 mtx_unlock(&devfs_de_interlock); 410 error = insmntque1(vp, mp, devfs_insmntque_dtr, de); 411 if (error != 0) { 412 (void) devfs_allocv_drop_refs(1, dmp, de); 413 return (error); 414 } 415 if (devfs_allocv_drop_refs(0, dmp, de)) { 416 vput(vp); 417 return (ENOENT); 418 } 419 #ifdef MAC 420 mac_devfs_vnode_associate(mp, de, vp); 421 #endif 422 sx_xunlock(&dmp->dm_lock); 423 *vpp = vp; 424 return (0); 425 } 426 427 static int 428 devfs_access(struct vop_access_args *ap) 429 { 430 struct vnode *vp = ap->a_vp; 431 struct devfs_dirent *de; 432 int error; 433 434 de = vp->v_data; 435 if (vp->v_type == VDIR) 436 de = de->de_dir; 437 438 error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid, 439 ap->a_accmode, ap->a_cred, NULL); 440 if (!error) 441 return (error); 442 if (error != EACCES) 443 return (error); 444 /* We do, however, allow access to the controlling terminal */ 445 if (!(ap->a_td->td_proc->p_flag & P_CONTROLT)) 446 return (error); 447 if (ap->a_td->td_proc->p_session->s_ttyvp == de->de_vnode) 448 return (0); 449 return (error); 450 } 451 452 /* ARGSUSED */ 453 static int 454 devfs_close(struct vop_close_args *ap) 455 { 456 struct vnode *vp = ap->a_vp, *oldvp; 457 struct thread *td = ap->a_td; 458 struct cdev *dev = vp->v_rdev; 459 struct cdevsw *dsw; 460 int vp_locked, error; 461 462 /* 463 * Hack: a tty device that is a controlling terminal 464 * has a reference from the session structure. 465 * We cannot easily tell that a character device is 466 * a controlling terminal, unless it is the closing 467 * process' controlling terminal. In that case, 468 * if the reference count is 2 (this last descriptor 469 * plus the session), release the reference from the session. 470 */ 471 oldvp = NULL; 472 sx_xlock(&proctree_lock); 473 if (td && vp == td->td_proc->p_session->s_ttyvp) { 474 SESS_LOCK(td->td_proc->p_session); 475 VI_LOCK(vp); 476 if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) { 477 td->td_proc->p_session->s_ttyvp = NULL; 478 oldvp = vp; 479 } 480 VI_UNLOCK(vp); 481 SESS_UNLOCK(td->td_proc->p_session); 482 } 483 sx_xunlock(&proctree_lock); 484 if (oldvp != NULL) 485 vrele(oldvp); 486 /* 487 * We do not want to really close the device if it 488 * is still in use unless we are trying to close it 489 * forcibly. Since every use (buffer, vnode, swap, cmap) 490 * holds a reference to the vnode, and because we mark 491 * any other vnodes that alias this device, when the 492 * sum of the reference counts on all the aliased 493 * vnodes descends to one, we are on last close. 494 */ 495 dsw = dev_refthread(dev); 496 if (dsw == NULL) 497 return (ENXIO); 498 VI_LOCK(vp); 499 if (vp->v_iflag & VI_DOOMED) { 500 /* Forced close. */ 501 } else if (dsw->d_flags & D_TRACKCLOSE) { 502 /* Keep device updated on status. */ 503 } else if (count_dev(dev) > 1) { 504 VI_UNLOCK(vp); 505 dev_relthread(dev); 506 return (0); 507 } 508 vholdl(vp); 509 VI_UNLOCK(vp); 510 vp_locked = VOP_ISLOCKED(vp); 511 VOP_UNLOCK(vp, 0); 512 KASSERT(dev->si_refcount > 0, 513 ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev))); 514 error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td); 515 dev_relthread(dev); 516 vn_lock(vp, vp_locked | LK_RETRY); 517 vdrop(vp); 518 return (error); 519 } 520 521 static int 522 devfs_close_f(struct file *fp, struct thread *td) 523 { 524 int error; 525 struct file *fpop; 526 527 fpop = td->td_fpop; 528 td->td_fpop = fp; 529 error = vnops.fo_close(fp, td); 530 td->td_fpop = fpop; 531 return (error); 532 } 533 534 static int 535 devfs_fsync(struct vop_fsync_args *ap) 536 { 537 int error; 538 struct bufobj *bo; 539 struct devfs_dirent *de; 540 541 if (!vn_isdisk(ap->a_vp, &error)) { 542 bo = &ap->a_vp->v_bufobj; 543 de = ap->a_vp->v_data; 544 if (error == ENXIO && bo->bo_dirty.bv_cnt > 0) { 545 printf("Device %s went missing before all of the data " 546 "could be written to it; expect data loss.\n", 547 de->de_dirent->d_name); 548 549 error = vop_stdfsync(ap); 550 if (bo->bo_dirty.bv_cnt != 0 || error != 0) 551 panic("devfs_fsync: vop_stdfsync failed."); 552 } 553 554 return (0); 555 } 556 557 return (vop_stdfsync(ap)); 558 } 559 560 static int 561 devfs_getattr(struct vop_getattr_args *ap) 562 { 563 struct vnode *vp = ap->a_vp; 564 struct vattr *vap = ap->a_vap; 565 int error = 0; 566 struct devfs_dirent *de; 567 struct cdev *dev; 568 569 de = vp->v_data; 570 KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp)); 571 if (vp->v_type == VDIR) { 572 de = de->de_dir; 573 KASSERT(de != NULL, 574 ("Null dir dirent in devfs_getattr vp=%p", vp)); 575 } 576 vap->va_uid = de->de_uid; 577 vap->va_gid = de->de_gid; 578 vap->va_mode = de->de_mode; 579 if (vp->v_type == VLNK) 580 vap->va_size = strlen(de->de_symlink); 581 else if (vp->v_type == VDIR) 582 vap->va_size = vap->va_bytes = DEV_BSIZE; 583 else 584 vap->va_size = 0; 585 if (vp->v_type != VDIR) 586 vap->va_bytes = 0; 587 vap->va_blocksize = DEV_BSIZE; 588 vap->va_type = vp->v_type; 589 590 #define fix(aa) \ 591 do { \ 592 if ((aa).tv_sec <= 3600) { \ 593 (aa).tv_sec = boottime.tv_sec; \ 594 (aa).tv_nsec = boottime.tv_usec * 1000; \ 595 } \ 596 } while (0) 597 598 if (vp->v_type != VCHR) { 599 fix(de->de_atime); 600 vap->va_atime = de->de_atime; 601 fix(de->de_mtime); 602 vap->va_mtime = de->de_mtime; 603 fix(de->de_ctime); 604 vap->va_ctime = de->de_ctime; 605 } else { 606 dev = vp->v_rdev; 607 fix(dev->si_atime); 608 vap->va_atime = dev->si_atime; 609 fix(dev->si_mtime); 610 vap->va_mtime = dev->si_mtime; 611 fix(dev->si_ctime); 612 vap->va_ctime = dev->si_ctime; 613 614 vap->va_rdev = cdev2priv(dev)->cdp_inode; 615 } 616 vap->va_gen = 0; 617 vap->va_flags = 0; 618 vap->va_filerev = 0; 619 vap->va_nlink = de->de_links; 620 vap->va_fileid = de->de_inode; 621 622 return (error); 623 } 624 625 /* ARGSUSED */ 626 static int 627 devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td) 628 { 629 struct cdev *dev; 630 struct cdevsw *dsw; 631 struct vnode *vp; 632 struct vnode *vpold; 633 int error, i; 634 const char *p; 635 struct fiodgname_arg *fgn; 636 struct file *fpop; 637 638 fpop = td->td_fpop; 639 error = devfs_fp_check(fp, &dev, &dsw); 640 if (error) 641 return (error); 642 643 if (com == FIODTYPE) { 644 *(int *)data = dsw->d_flags & D_TYPEMASK; 645 td->td_fpop = fpop; 646 dev_relthread(dev); 647 return (0); 648 } else if (com == FIODGNAME) { 649 fgn = data; 650 p = devtoname(dev); 651 i = strlen(p) + 1; 652 if (i > fgn->len) 653 error = EINVAL; 654 else 655 error = copyout(p, fgn->buf, i); 656 td->td_fpop = fpop; 657 dev_relthread(dev); 658 return (error); 659 } 660 error = dsw->d_ioctl(dev, com, data, fp->f_flag, td); 661 td->td_fpop = NULL; 662 dev_relthread(dev); 663 if (error == ENOIOCTL) 664 error = ENOTTY; 665 if (error == 0 && com == TIOCSCTTY) { 666 vp = fp->f_vnode; 667 668 /* Do nothing if reassigning same control tty */ 669 sx_slock(&proctree_lock); 670 if (td->td_proc->p_session->s_ttyvp == vp) { 671 sx_sunlock(&proctree_lock); 672 return (0); 673 } 674 675 vpold = td->td_proc->p_session->s_ttyvp; 676 VREF(vp); 677 SESS_LOCK(td->td_proc->p_session); 678 td->td_proc->p_session->s_ttyvp = vp; 679 SESS_UNLOCK(td->td_proc->p_session); 680 681 sx_sunlock(&proctree_lock); 682 683 /* Get rid of reference to old control tty */ 684 if (vpold) 685 vrele(vpold); 686 } 687 return (error); 688 } 689 690 /* ARGSUSED */ 691 static int 692 devfs_kqfilter_f(struct file *fp, struct knote *kn) 693 { 694 struct cdev *dev; 695 struct cdevsw *dsw; 696 int error; 697 struct file *fpop; 698 struct thread *td; 699 700 td = curthread; 701 fpop = td->td_fpop; 702 error = devfs_fp_check(fp, &dev, &dsw); 703 if (error) 704 return (error); 705 error = dsw->d_kqfilter(dev, kn); 706 td->td_fpop = fpop; 707 dev_relthread(dev); 708 return (error); 709 } 710 711 static int 712 devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock) 713 { 714 struct componentname *cnp; 715 struct vnode *dvp, **vpp; 716 struct thread *td; 717 struct devfs_dirent *de, *dd; 718 struct devfs_dirent **dde; 719 struct devfs_mount *dmp; 720 struct cdev *cdev; 721 int error, flags, nameiop; 722 char specname[SPECNAMELEN + 1], *pname; 723 724 cnp = ap->a_cnp; 725 vpp = ap->a_vpp; 726 dvp = ap->a_dvp; 727 pname = cnp->cn_nameptr; 728 td = cnp->cn_thread; 729 flags = cnp->cn_flags; 730 nameiop = cnp->cn_nameiop; 731 dmp = VFSTODEVFS(dvp->v_mount); 732 dd = dvp->v_data; 733 *vpp = NULLVP; 734 735 if ((flags & ISLASTCN) && nameiop == RENAME) 736 return (EOPNOTSUPP); 737 738 if (dvp->v_type != VDIR) 739 return (ENOTDIR); 740 741 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT)) 742 return (EIO); 743 744 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td); 745 if (error) 746 return (error); 747 748 if (cnp->cn_namelen == 1 && *pname == '.') { 749 if ((flags & ISLASTCN) && nameiop != LOOKUP) 750 return (EINVAL); 751 *vpp = dvp; 752 VREF(dvp); 753 return (0); 754 } 755 756 if (flags & ISDOTDOT) { 757 if ((flags & ISLASTCN) && nameiop != LOOKUP) 758 return (EINVAL); 759 VOP_UNLOCK(dvp, 0); 760 de = TAILQ_FIRST(&dd->de_dlist); /* "." */ 761 de = TAILQ_NEXT(de, de_list); /* ".." */ 762 de = de->de_dir; 763 error = devfs_allocv(de, dvp->v_mount, vpp); 764 *dm_unlock = 0; 765 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 766 return (error); 767 } 768 769 DEVFS_DMP_HOLD(dmp); 770 devfs_populate(dmp); 771 if (DEVFS_DMP_DROP(dmp)) { 772 *dm_unlock = 0; 773 sx_xunlock(&dmp->dm_lock); 774 devfs_unmount_final(dmp); 775 return (ENOENT); 776 } 777 dd = dvp->v_data; 778 de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen); 779 while (de == NULL) { /* While(...) so we can use break */ 780 781 if (nameiop == DELETE) 782 return (ENOENT); 783 784 /* 785 * OK, we didn't have an entry for the name we were asked for 786 * so we try to see if anybody can create it on demand. 787 */ 788 pname = devfs_fqpn(specname, dvp, cnp); 789 if (pname == NULL) 790 break; 791 792 cdev = NULL; 793 DEVFS_DMP_HOLD(dmp); 794 sx_xunlock(&dmp->dm_lock); 795 sx_slock(&clone_drain_lock); 796 EVENTHANDLER_INVOKE(dev_clone, 797 td->td_ucred, pname, strlen(pname), &cdev); 798 sx_sunlock(&clone_drain_lock); 799 sx_xlock(&dmp->dm_lock); 800 if (DEVFS_DMP_DROP(dmp)) { 801 *dm_unlock = 0; 802 sx_xunlock(&dmp->dm_lock); 803 devfs_unmount_final(dmp); 804 return (ENOENT); 805 } 806 if (cdev == NULL) 807 break; 808 809 DEVFS_DMP_HOLD(dmp); 810 devfs_populate(dmp); 811 if (DEVFS_DMP_DROP(dmp)) { 812 *dm_unlock = 0; 813 sx_xunlock(&dmp->dm_lock); 814 devfs_unmount_final(dmp); 815 return (ENOENT); 816 } 817 818 dev_lock(); 819 dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx]; 820 if (dde != NULL && *dde != NULL) 821 de = *dde; 822 dev_unlock(); 823 dev_rel(cdev); 824 break; 825 } 826 827 if (de == NULL || de->de_flags & DE_WHITEOUT) { 828 if ((nameiop == CREATE || nameiop == RENAME) && 829 (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) { 830 cnp->cn_flags |= SAVENAME; 831 return (EJUSTRETURN); 832 } 833 return (ENOENT); 834 } 835 836 if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) { 837 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 838 if (error) 839 return (error); 840 if (*vpp == dvp) { 841 VREF(dvp); 842 *vpp = dvp; 843 return (0); 844 } 845 } 846 error = devfs_allocv(de, dvp->v_mount, vpp); 847 *dm_unlock = 0; 848 return (error); 849 } 850 851 static int 852 devfs_lookup(struct vop_lookup_args *ap) 853 { 854 int j; 855 struct devfs_mount *dmp; 856 int dm_unlock; 857 858 dmp = VFSTODEVFS(ap->a_dvp->v_mount); 859 dm_unlock = 1; 860 sx_xlock(&dmp->dm_lock); 861 j = devfs_lookupx(ap, &dm_unlock); 862 if (dm_unlock == 1) 863 sx_xunlock(&dmp->dm_lock); 864 return (j); 865 } 866 867 static int 868 devfs_mknod(struct vop_mknod_args *ap) 869 { 870 struct componentname *cnp; 871 struct vnode *dvp, **vpp; 872 struct devfs_dirent *dd, *de; 873 struct devfs_mount *dmp; 874 int error; 875 876 /* 877 * The only type of node we should be creating here is a 878 * character device, for anything else return EOPNOTSUPP. 879 */ 880 if (ap->a_vap->va_type != VCHR) 881 return (EOPNOTSUPP); 882 dvp = ap->a_dvp; 883 dmp = VFSTODEVFS(dvp->v_mount); 884 885 cnp = ap->a_cnp; 886 vpp = ap->a_vpp; 887 dd = dvp->v_data; 888 889 error = ENOENT; 890 sx_xlock(&dmp->dm_lock); 891 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 892 if (cnp->cn_namelen != de->de_dirent->d_namlen) 893 continue; 894 if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name, 895 de->de_dirent->d_namlen) != 0) 896 continue; 897 if (de->de_flags & DE_WHITEOUT) 898 break; 899 goto notfound; 900 } 901 if (de == NULL) 902 goto notfound; 903 de->de_flags &= ~DE_WHITEOUT; 904 error = devfs_allocv(de, dvp->v_mount, vpp); 905 return (error); 906 notfound: 907 sx_xunlock(&dmp->dm_lock); 908 return (error); 909 } 910 911 /* ARGSUSED */ 912 static int 913 devfs_open(struct vop_open_args *ap) 914 { 915 struct thread *td = ap->a_td; 916 struct vnode *vp = ap->a_vp; 917 struct cdev *dev = vp->v_rdev; 918 struct file *fp = ap->a_fp; 919 int error; 920 struct cdevsw *dsw; 921 struct file *fpop; 922 923 if (vp->v_type == VBLK) 924 return (ENXIO); 925 926 if (dev == NULL) 927 return (ENXIO); 928 929 /* Make this field valid before any I/O in d_open. */ 930 if (dev->si_iosize_max == 0) 931 dev->si_iosize_max = DFLTPHYS; 932 933 dsw = dev_refthread(dev); 934 if (dsw == NULL) 935 return (ENXIO); 936 937 /* XXX: Special casing of ttys for deadfs. Probably redundant. */ 938 if (dsw->d_flags & D_TTY) 939 vp->v_vflag |= VV_ISTTY; 940 941 VOP_UNLOCK(vp, 0); 942 943 fpop = td->td_fpop; 944 td->td_fpop = fp; 945 if (fp != NULL) { 946 fp->f_data = dev; 947 fp->f_vnode = vp; 948 } 949 if (dsw->d_fdopen != NULL) 950 error = dsw->d_fdopen(dev, ap->a_mode, td, fp); 951 else 952 error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td); 953 td->td_fpop = fpop; 954 955 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 956 957 dev_relthread(dev); 958 959 if (error) 960 return (error); 961 962 #if 0 /* /dev/console */ 963 KASSERT(fp != NULL, 964 ("Could not vnode bypass device on NULL fp")); 965 #else 966 if(fp == NULL) 967 return (error); 968 #endif 969 if (fp->f_ops == &badfileops) 970 finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f); 971 return (error); 972 } 973 974 static int 975 devfs_pathconf(struct vop_pathconf_args *ap) 976 { 977 978 switch (ap->a_name) { 979 case _PC_MAC_PRESENT: 980 #ifdef MAC 981 /* 982 * If MAC is enabled, devfs automatically supports 983 * trivial non-persistant label storage. 984 */ 985 *ap->a_retval = 1; 986 #else 987 *ap->a_retval = 0; 988 #endif 989 return (0); 990 default: 991 return (vop_stdpathconf(ap)); 992 } 993 /* NOTREACHED */ 994 } 995 996 /* ARGSUSED */ 997 static int 998 devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td) 999 { 1000 struct cdev *dev; 1001 struct cdevsw *dsw; 1002 int error; 1003 struct file *fpop; 1004 1005 fpop = td->td_fpop; 1006 error = devfs_fp_check(fp, &dev, &dsw); 1007 if (error) 1008 return (poll_no_poll(events)); 1009 error = dsw->d_poll(dev, events, td); 1010 td->td_fpop = fpop; 1011 dev_relthread(dev); 1012 return(error); 1013 } 1014 1015 /* 1016 * Print out the contents of a special device vnode. 1017 */ 1018 static int 1019 devfs_print(struct vop_print_args *ap) 1020 { 1021 1022 printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev)); 1023 return (0); 1024 } 1025 1026 /* ARGSUSED */ 1027 static int 1028 devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) 1029 { 1030 struct cdev *dev; 1031 int ioflag, error, resid; 1032 struct cdevsw *dsw; 1033 struct file *fpop; 1034 1035 fpop = td->td_fpop; 1036 error = devfs_fp_check(fp, &dev, &dsw); 1037 if (error) 1038 return (error); 1039 resid = uio->uio_resid; 1040 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT); 1041 if (ioflag & O_DIRECT) 1042 ioflag |= IO_DIRECT; 1043 1044 if ((flags & FOF_OFFSET) == 0) 1045 uio->uio_offset = fp->f_offset; 1046 1047 error = dsw->d_read(dev, uio, ioflag); 1048 if (uio->uio_resid != resid || (error == 0 && resid != 0)) 1049 vfs_timestamp(&dev->si_atime); 1050 td->td_fpop = fpop; 1051 dev_relthread(dev); 1052 1053 if ((flags & FOF_OFFSET) == 0) 1054 fp->f_offset = uio->uio_offset; 1055 fp->f_nextoff = uio->uio_offset; 1056 return (error); 1057 } 1058 1059 static int 1060 devfs_readdir(struct vop_readdir_args *ap) 1061 { 1062 int error; 1063 struct uio *uio; 1064 struct dirent *dp; 1065 struct devfs_dirent *dd; 1066 struct devfs_dirent *de; 1067 struct devfs_mount *dmp; 1068 off_t off; 1069 int *tmp_ncookies = NULL; 1070 1071 if (ap->a_vp->v_type != VDIR) 1072 return (ENOTDIR); 1073 1074 uio = ap->a_uio; 1075 if (uio->uio_offset < 0) 1076 return (EINVAL); 1077 1078 /* 1079 * XXX: This is a temporary hack to get around this filesystem not 1080 * supporting cookies. We store the location of the ncookies pointer 1081 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent() 1082 * and set the number of cookies to 0. We then set the pointer to 1083 * NULL so that vfs_read_dirent doesn't try to call realloc() on 1084 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies 1085 * pointer to its original location before returning to the caller. 1086 */ 1087 if (ap->a_ncookies != NULL) { 1088 tmp_ncookies = ap->a_ncookies; 1089 *ap->a_ncookies = 0; 1090 ap->a_ncookies = NULL; 1091 } 1092 1093 dmp = VFSTODEVFS(ap->a_vp->v_mount); 1094 sx_xlock(&dmp->dm_lock); 1095 DEVFS_DMP_HOLD(dmp); 1096 devfs_populate(dmp); 1097 if (DEVFS_DMP_DROP(dmp)) { 1098 sx_xunlock(&dmp->dm_lock); 1099 devfs_unmount_final(dmp); 1100 if (tmp_ncookies != NULL) 1101 ap->a_ncookies = tmp_ncookies; 1102 return (EIO); 1103 } 1104 error = 0; 1105 de = ap->a_vp->v_data; 1106 off = 0; 1107 TAILQ_FOREACH(dd, &de->de_dlist, de_list) { 1108 KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__)); 1109 if (dd->de_flags & DE_WHITEOUT) 1110 continue; 1111 if (dd->de_dirent->d_type == DT_DIR) 1112 de = dd->de_dir; 1113 else 1114 de = dd; 1115 dp = dd->de_dirent; 1116 if (dp->d_reclen > uio->uio_resid) 1117 break; 1118 dp->d_fileno = de->de_inode; 1119 if (off >= uio->uio_offset) { 1120 error = vfs_read_dirent(ap, dp, off); 1121 if (error) 1122 break; 1123 } 1124 off += dp->d_reclen; 1125 } 1126 sx_xunlock(&dmp->dm_lock); 1127 uio->uio_offset = off; 1128 1129 /* 1130 * Restore ap->a_ncookies if it wasn't originally NULL in the first 1131 * place. 1132 */ 1133 if (tmp_ncookies != NULL) 1134 ap->a_ncookies = tmp_ncookies; 1135 1136 return (error); 1137 } 1138 1139 static int 1140 devfs_readlink(struct vop_readlink_args *ap) 1141 { 1142 struct devfs_dirent *de; 1143 1144 de = ap->a_vp->v_data; 1145 return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio)); 1146 } 1147 1148 static int 1149 devfs_reclaim(struct vop_reclaim_args *ap) 1150 { 1151 struct vnode *vp = ap->a_vp; 1152 struct devfs_dirent *de; 1153 struct cdev *dev; 1154 1155 mtx_lock(&devfs_de_interlock); 1156 de = vp->v_data; 1157 if (de != NULL) { 1158 de->de_vnode = NULL; 1159 vp->v_data = NULL; 1160 } 1161 mtx_unlock(&devfs_de_interlock); 1162 1163 vnode_destroy_vobject(vp); 1164 1165 VI_LOCK(vp); 1166 dev_lock(); 1167 dev = vp->v_rdev; 1168 vp->v_rdev = NULL; 1169 1170 if (dev == NULL) { 1171 dev_unlock(); 1172 VI_UNLOCK(vp); 1173 return (0); 1174 } 1175 1176 dev->si_usecount -= vp->v_usecount; 1177 dev_unlock(); 1178 VI_UNLOCK(vp); 1179 dev_rel(dev); 1180 return (0); 1181 } 1182 1183 static int 1184 devfs_remove(struct vop_remove_args *ap) 1185 { 1186 struct vnode *vp = ap->a_vp; 1187 struct devfs_dirent *dd; 1188 struct devfs_dirent *de; 1189 struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount); 1190 1191 sx_xlock(&dmp->dm_lock); 1192 dd = ap->a_dvp->v_data; 1193 de = vp->v_data; 1194 if (de->de_cdp == NULL) { 1195 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 1196 devfs_delete(dmp, de, 1); 1197 } else { 1198 de->de_flags |= DE_WHITEOUT; 1199 } 1200 sx_xunlock(&dmp->dm_lock); 1201 return (0); 1202 } 1203 1204 /* 1205 * Revoke is called on a tty when a terminal session ends. The vnode 1206 * is orphaned by setting v_op to deadfs so we need to let go of it 1207 * as well so that we create a new one next time around. 1208 * 1209 */ 1210 static int 1211 devfs_revoke(struct vop_revoke_args *ap) 1212 { 1213 struct vnode *vp = ap->a_vp, *vp2; 1214 struct cdev *dev; 1215 struct cdev_priv *cdp; 1216 struct devfs_dirent *de; 1217 int i; 1218 1219 KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL")); 1220 1221 dev = vp->v_rdev; 1222 cdp = cdev2priv(dev); 1223 1224 dev_lock(); 1225 cdp->cdp_inuse++; 1226 dev_unlock(); 1227 1228 vhold(vp); 1229 vgone(vp); 1230 vdrop(vp); 1231 1232 VOP_UNLOCK(vp,0); 1233 loop: 1234 for (;;) { 1235 mtx_lock(&devfs_de_interlock); 1236 dev_lock(); 1237 vp2 = NULL; 1238 for (i = 0; i <= cdp->cdp_maxdirent; i++) { 1239 de = cdp->cdp_dirents[i]; 1240 if (de == NULL) 1241 continue; 1242 1243 vp2 = de->de_vnode; 1244 if (vp2 != NULL) { 1245 dev_unlock(); 1246 VI_LOCK(vp2); 1247 mtx_unlock(&devfs_de_interlock); 1248 if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, 1249 curthread)) 1250 goto loop; 1251 vhold(vp2); 1252 vgone(vp2); 1253 vdrop(vp2); 1254 vput(vp2); 1255 break; 1256 } 1257 } 1258 if (vp2 != NULL) { 1259 continue; 1260 } 1261 dev_unlock(); 1262 mtx_unlock(&devfs_de_interlock); 1263 break; 1264 } 1265 dev_lock(); 1266 cdp->cdp_inuse--; 1267 if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) { 1268 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); 1269 dev_unlock(); 1270 dev_rel(&cdp->cdp_c); 1271 } else 1272 dev_unlock(); 1273 1274 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1275 return (0); 1276 } 1277 1278 static int 1279 devfs_rioctl(struct vop_ioctl_args *ap) 1280 { 1281 int error; 1282 struct devfs_mount *dmp; 1283 1284 dmp = VFSTODEVFS(ap->a_vp->v_mount); 1285 sx_xlock(&dmp->dm_lock); 1286 DEVFS_DMP_HOLD(dmp); 1287 devfs_populate(dmp); 1288 if (DEVFS_DMP_DROP(dmp)) { 1289 sx_xunlock(&dmp->dm_lock); 1290 devfs_unmount_final(dmp); 1291 return (ENOENT); 1292 } 1293 error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td); 1294 sx_xunlock(&dmp->dm_lock); 1295 return (error); 1296 } 1297 1298 static int 1299 devfs_rread(struct vop_read_args *ap) 1300 { 1301 1302 if (ap->a_vp->v_type != VDIR) 1303 return (EINVAL); 1304 return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL)); 1305 } 1306 1307 static int 1308 devfs_setattr(struct vop_setattr_args *ap) 1309 { 1310 struct devfs_dirent *de; 1311 struct vattr *vap; 1312 struct vnode *vp; 1313 struct thread *td; 1314 int c, error; 1315 uid_t uid; 1316 gid_t gid; 1317 1318 vap = ap->a_vap; 1319 vp = ap->a_vp; 1320 td = curthread; 1321 if ((vap->va_type != VNON) || 1322 (vap->va_nlink != VNOVAL) || 1323 (vap->va_fsid != VNOVAL) || 1324 (vap->va_fileid != VNOVAL) || 1325 (vap->va_blocksize != VNOVAL) || 1326 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1327 (vap->va_rdev != VNOVAL) || 1328 ((int)vap->va_bytes != VNOVAL) || 1329 (vap->va_gen != VNOVAL)) { 1330 return (EINVAL); 1331 } 1332 1333 de = vp->v_data; 1334 if (vp->v_type == VDIR) 1335 de = de->de_dir; 1336 1337 error = c = 0; 1338 if (vap->va_uid == (uid_t)VNOVAL) 1339 uid = de->de_uid; 1340 else 1341 uid = vap->va_uid; 1342 if (vap->va_gid == (gid_t)VNOVAL) 1343 gid = de->de_gid; 1344 else 1345 gid = vap->va_gid; 1346 if (uid != de->de_uid || gid != de->de_gid) { 1347 if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid || 1348 (gid != de->de_gid && !groupmember(gid, ap->a_cred))) { 1349 error = priv_check(td, PRIV_VFS_CHOWN); 1350 if (error) 1351 return (error); 1352 } 1353 de->de_uid = uid; 1354 de->de_gid = gid; 1355 c = 1; 1356 } 1357 1358 if (vap->va_mode != (mode_t)VNOVAL) { 1359 if (ap->a_cred->cr_uid != de->de_uid) { 1360 error = priv_check(td, PRIV_VFS_ADMIN); 1361 if (error) 1362 return (error); 1363 } 1364 de->de_mode = vap->va_mode; 1365 c = 1; 1366 } 1367 1368 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1369 /* See the comment in ufs_vnops::ufs_setattr(). */ 1370 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) && 1371 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1372 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td)))) 1373 return (error); 1374 if (vap->va_atime.tv_sec != VNOVAL) { 1375 if (vp->v_type == VCHR) 1376 vp->v_rdev->si_atime = vap->va_atime; 1377 else 1378 de->de_atime = vap->va_atime; 1379 } 1380 if (vap->va_mtime.tv_sec != VNOVAL) { 1381 if (vp->v_type == VCHR) 1382 vp->v_rdev->si_mtime = vap->va_mtime; 1383 else 1384 de->de_mtime = vap->va_mtime; 1385 } 1386 c = 1; 1387 } 1388 1389 if (c) { 1390 if (vp->v_type == VCHR) 1391 vfs_timestamp(&vp->v_rdev->si_ctime); 1392 else 1393 vfs_timestamp(&de->de_mtime); 1394 } 1395 return (0); 1396 } 1397 1398 #ifdef MAC 1399 static int 1400 devfs_setlabel(struct vop_setlabel_args *ap) 1401 { 1402 struct vnode *vp; 1403 struct devfs_dirent *de; 1404 1405 vp = ap->a_vp; 1406 de = vp->v_data; 1407 1408 mac_vnode_relabel(ap->a_cred, vp, ap->a_label); 1409 mac_devfs_update(vp->v_mount, de, vp); 1410 1411 return (0); 1412 } 1413 #endif 1414 1415 static int 1416 devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td) 1417 { 1418 1419 return (vnops.fo_stat(fp, sb, cred, td)); 1420 } 1421 1422 static int 1423 devfs_symlink(struct vop_symlink_args *ap) 1424 { 1425 int i, error; 1426 struct devfs_dirent *dd; 1427 struct devfs_dirent *de; 1428 struct devfs_mount *dmp; 1429 1430 error = priv_check(curthread, PRIV_DEVFS_SYMLINK); 1431 if (error) 1432 return(error); 1433 dmp = VFSTODEVFS(ap->a_dvp->v_mount); 1434 dd = ap->a_dvp->v_data; 1435 de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen); 1436 de->de_uid = 0; 1437 de->de_gid = 0; 1438 de->de_mode = 0755; 1439 de->de_inode = alloc_unr(devfs_inos); 1440 de->de_dirent->d_type = DT_LNK; 1441 i = strlen(ap->a_target) + 1; 1442 de->de_symlink = malloc(i, M_DEVFS, M_WAITOK); 1443 bcopy(ap->a_target, de->de_symlink, i); 1444 sx_xlock(&dmp->dm_lock); 1445 #ifdef MAC 1446 mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de); 1447 #endif 1448 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 1449 return (devfs_allocv(de, ap->a_dvp->v_mount, ap->a_vpp)); 1450 } 1451 1452 static int 1453 devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td) 1454 { 1455 1456 return (vnops.fo_truncate(fp, length, cred, td)); 1457 } 1458 1459 /* ARGSUSED */ 1460 static int 1461 devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) 1462 { 1463 struct cdev *dev; 1464 int error, ioflag, resid; 1465 struct cdevsw *dsw; 1466 struct file *fpop; 1467 1468 fpop = td->td_fpop; 1469 error = devfs_fp_check(fp, &dev, &dsw); 1470 if (error) 1471 return (error); 1472 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td)); 1473 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC); 1474 if (ioflag & O_DIRECT) 1475 ioflag |= IO_DIRECT; 1476 if ((flags & FOF_OFFSET) == 0) 1477 uio->uio_offset = fp->f_offset; 1478 1479 resid = uio->uio_resid; 1480 1481 error = dsw->d_write(dev, uio, ioflag); 1482 if (uio->uio_resid != resid || (error == 0 && resid != 0)) { 1483 vfs_timestamp(&dev->si_ctime); 1484 dev->si_mtime = dev->si_ctime; 1485 } 1486 td->td_fpop = fpop; 1487 dev_relthread(dev); 1488 1489 if ((flags & FOF_OFFSET) == 0) 1490 fp->f_offset = uio->uio_offset; 1491 fp->f_nextoff = uio->uio_offset; 1492 return (error); 1493 } 1494 1495 dev_t 1496 dev2udev(struct cdev *x) 1497 { 1498 if (x == NULL) 1499 return (NODEV); 1500 return (cdev2priv(x)->cdp_inode); 1501 } 1502 1503 static struct fileops devfs_ops_f = { 1504 .fo_read = devfs_read_f, 1505 .fo_write = devfs_write_f, 1506 .fo_truncate = devfs_truncate_f, 1507 .fo_ioctl = devfs_ioctl_f, 1508 .fo_poll = devfs_poll_f, 1509 .fo_kqfilter = devfs_kqfilter_f, 1510 .fo_stat = devfs_stat_f, 1511 .fo_close = devfs_close_f, 1512 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 1513 }; 1514 1515 static struct vop_vector devfs_vnodeops = { 1516 .vop_default = &default_vnodeops, 1517 1518 .vop_access = devfs_access, 1519 .vop_getattr = devfs_getattr, 1520 .vop_ioctl = devfs_rioctl, 1521 .vop_lookup = devfs_lookup, 1522 .vop_mknod = devfs_mknod, 1523 .vop_pathconf = devfs_pathconf, 1524 .vop_read = devfs_rread, 1525 .vop_readdir = devfs_readdir, 1526 .vop_readlink = devfs_readlink, 1527 .vop_reclaim = devfs_reclaim, 1528 .vop_remove = devfs_remove, 1529 .vop_revoke = devfs_revoke, 1530 .vop_setattr = devfs_setattr, 1531 #ifdef MAC 1532 .vop_setlabel = devfs_setlabel, 1533 #endif 1534 .vop_symlink = devfs_symlink, 1535 .vop_vptocnp = devfs_vptocnp, 1536 }; 1537 1538 static struct vop_vector devfs_specops = { 1539 .vop_default = &default_vnodeops, 1540 1541 .vop_access = devfs_access, 1542 .vop_bmap = VOP_PANIC, 1543 .vop_close = devfs_close, 1544 .vop_create = VOP_PANIC, 1545 .vop_fsync = devfs_fsync, 1546 .vop_getattr = devfs_getattr, 1547 .vop_link = VOP_PANIC, 1548 .vop_mkdir = VOP_PANIC, 1549 .vop_mknod = VOP_PANIC, 1550 .vop_open = devfs_open, 1551 .vop_pathconf = devfs_pathconf, 1552 .vop_print = devfs_print, 1553 .vop_read = VOP_PANIC, 1554 .vop_readdir = VOP_PANIC, 1555 .vop_readlink = VOP_PANIC, 1556 .vop_reallocblks = VOP_PANIC, 1557 .vop_reclaim = devfs_reclaim, 1558 .vop_remove = devfs_remove, 1559 .vop_rename = VOP_PANIC, 1560 .vop_revoke = devfs_revoke, 1561 .vop_rmdir = VOP_PANIC, 1562 .vop_setattr = devfs_setattr, 1563 #ifdef MAC 1564 .vop_setlabel = devfs_setlabel, 1565 #endif 1566 .vop_strategy = VOP_PANIC, 1567 .vop_symlink = VOP_PANIC, 1568 .vop_vptocnp = devfs_vptocnp, 1569 .vop_write = VOP_PANIC, 1570 }; 1571 1572 /* 1573 * Our calling convention to the device drivers used to be that we passed 1574 * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_ 1575 * flags instead since that's what open(), close() and ioctl() takes and 1576 * we don't really want vnode.h in device drivers. 1577 * We solved the source compatibility by redefining some vnode flags to 1578 * be the same as the fcntl ones and by sending down the bitwise OR of 1579 * the respective fcntl/vnode flags. These CTASSERTS make sure nobody 1580 * pulls the rug out under this. 1581 */ 1582 CTASSERT(O_NONBLOCK == IO_NDELAY); 1583 CTASSERT(O_FSYNC == IO_SYNC); 1584