1 /*- 2 * Copyright (c) 2000-2004 3 * Poul-Henning Kamp. All rights reserved. 4 * Copyright (c) 1989, 1992-1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software donated to Berkeley by 8 * Jan-Simon Pendry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kernfs_vnops.c 8.15 (Berkeley) 5/21/95 32 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43 33 * 34 * $FreeBSD$ 35 */ 36 37 /* 38 * TODO: 39 * remove empty directories 40 * mknod: hunt down DE_DELETED, compare name, reinstantiate. 41 * mkdir: want it ? 42 */ 43 44 #include <opt_devfs.h> 45 #include <opt_mac.h> 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/conf.h> 50 #include <sys/dirent.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/filedesc.h> 54 #include <sys/filio.h> 55 #include <sys/kernel.h> 56 #include <sys/lock.h> 57 #include <sys/mac.h> 58 #include <sys/malloc.h> 59 #include <sys/mount.h> 60 #include <sys/namei.h> 61 #include <sys/proc.h> 62 #include <sys/stat.h> 63 #include <sys/sx.h> 64 #include <sys/sysctl.h> 65 #include <sys/time.h> 66 #include <sys/ttycom.h> 67 #include <sys/unistd.h> 68 #include <sys/vnode.h> 69 70 #include <fs/devfs/devfs.h> 71 72 static fo_rdwr_t devfs_read_f; 73 static fo_rdwr_t devfs_write_f; 74 static fo_ioctl_t devfs_ioctl_f; 75 static fo_poll_t devfs_poll_f; 76 static fo_kqfilter_t devfs_kqfilter_f; 77 static fo_stat_t devfs_stat_f; 78 static fo_close_t devfs_close_f; 79 80 static struct fileops devfs_ops_f = { 81 .fo_read = devfs_read_f, 82 .fo_write = devfs_write_f, 83 .fo_ioctl = devfs_ioctl_f, 84 .fo_poll = devfs_poll_f, 85 .fo_kqfilter = devfs_kqfilter_f, 86 .fo_stat = devfs_stat_f, 87 .fo_close = devfs_close_f, 88 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 89 }; 90 91 static vop_access_t devfs_access; 92 static vop_advlock_t devfs_advlock; 93 static vop_close_t devfs_close; 94 static vop_fsync_t devfs_fsync; 95 static vop_getattr_t devfs_getattr; 96 static vop_lookup_t devfs_lookup; 97 static vop_lookup_t devfs_lookupx; 98 static vop_mknod_t devfs_mknod; 99 static vop_open_t devfs_open; 100 static vop_pathconf_t devfs_pathconf; 101 static vop_print_t devfs_print; 102 static vop_readdir_t devfs_readdir; 103 static vop_readlink_t devfs_readlink; 104 static vop_reclaim_t devfs_reclaim; 105 static vop_remove_t devfs_remove; 106 static vop_revoke_t devfs_revoke; 107 static vop_ioctl_t devfs_rioctl; 108 static vop_read_t devfs_rread; 109 static vop_setattr_t devfs_setattr; 110 #ifdef MAC 111 static vop_setlabel_t devfs_setlabel; 112 #endif 113 static vop_symlink_t devfs_symlink; 114 115 static struct vop_vector devfs_vnodeops = { 116 .vop_default = &default_vnodeops, 117 118 .vop_access = devfs_access, 119 .vop_getattr = devfs_getattr, 120 .vop_ioctl = devfs_rioctl, 121 .vop_lookup = devfs_lookup, 122 .vop_mknod = devfs_mknod, 123 .vop_pathconf = devfs_pathconf, 124 .vop_read = devfs_rread, 125 .vop_readdir = devfs_readdir, 126 .vop_readlink = devfs_readlink, 127 .vop_reclaim = devfs_reclaim, 128 .vop_remove = devfs_remove, 129 .vop_revoke = devfs_revoke, 130 .vop_setattr = devfs_setattr, 131 #ifdef MAC 132 .vop_setlabel = devfs_setlabel, 133 #endif 134 .vop_symlink = devfs_symlink, 135 }; 136 137 static struct vop_vector devfs_specops = { 138 .vop_default = &default_vnodeops, 139 140 .vop_access = devfs_access, 141 .vop_advlock = devfs_advlock, 142 .vop_bmap = VOP_PANIC, 143 .vop_close = devfs_close, 144 .vop_create = VOP_PANIC, 145 .vop_fsync = devfs_fsync, 146 .vop_getattr = devfs_getattr, 147 .vop_lease = VOP_NULL, 148 .vop_link = VOP_PANIC, 149 .vop_mkdir = VOP_PANIC, 150 .vop_mknod = VOP_PANIC, 151 .vop_open = devfs_open, 152 .vop_pathconf = devfs_pathconf, 153 .vop_print = devfs_print, 154 .vop_read = VOP_PANIC, 155 .vop_readdir = VOP_PANIC, 156 .vop_readlink = VOP_PANIC, 157 .vop_reallocblks = VOP_PANIC, 158 .vop_reclaim = devfs_reclaim, 159 .vop_remove = devfs_remove, 160 .vop_rename = VOP_PANIC, 161 .vop_revoke = devfs_revoke, 162 .vop_rmdir = VOP_PANIC, 163 .vop_setattr = devfs_setattr, 164 #ifdef MAC 165 .vop_setlabel = devfs_setlabel, 166 #endif 167 .vop_strategy = VOP_PANIC, 168 .vop_symlink = VOP_PANIC, 169 .vop_write = VOP_PANIC, 170 }; 171 172 static u_int 173 devfs_random(void) 174 { 175 static u_int devfs_seed; 176 177 while (devfs_seed == 0) { 178 /* 179 * Make sure people don't make stupid assumptions 180 * about device major/minor numbers in userspace. 181 * We do this late to get entropy and for the same 182 * reason we force a reseed, but it may not be 183 * late enough for entropy to be available. 184 */ 185 arc4rand(&devfs_seed, sizeof devfs_seed, 1); 186 devfs_seed &= 0xf0f; 187 } 188 return (devfs_seed); 189 } 190 191 static int 192 devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp) 193 { 194 195 *devp = fp->f_vnode->v_rdev; 196 if (*devp != fp->f_data) 197 return (ENXIO); 198 KASSERT((*devp)->si_refcount > 0, 199 ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp))); 200 *dswp = dev_refthread(*devp); 201 if (*dswp == NULL) 202 return (ENXIO); 203 return (0); 204 } 205 206 /* 207 * Construct the fully qualified path name relative to the mountpoint 208 */ 209 static char * 210 devfs_fqpn(char *buf, struct vnode *dvp, struct componentname *cnp) 211 { 212 int i; 213 struct devfs_dirent *de, *dd; 214 struct devfs_mount *dmp; 215 216 dmp = VFSTODEVFS(dvp->v_mount); 217 dd = dvp->v_data; 218 i = SPECNAMELEN; 219 buf[i] = '\0'; 220 i -= cnp->cn_namelen; 221 if (i < 0) 222 return (NULL); 223 bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen); 224 de = dd; 225 while (de != dmp->dm_basedir) { 226 i--; 227 if (i < 0) 228 return (NULL); 229 buf[i] = '/'; 230 i -= de->de_dirent->d_namlen; 231 if (i < 0) 232 return (NULL); 233 bcopy(de->de_dirent->d_name, buf + i, 234 de->de_dirent->d_namlen); 235 de = TAILQ_FIRST(&de->de_dlist); /* "." */ 236 de = TAILQ_NEXT(de, de_list); /* ".." */ 237 de = de->de_dir; 238 } 239 return (buf + i); 240 } 241 242 int 243 devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp, struct thread *td) 244 { 245 int error; 246 struct vnode *vp; 247 struct cdev *dev; 248 249 KASSERT(td == curthread, ("devfs_allocv: td != curthread")); 250 loop: 251 vp = de->de_vnode; 252 if (vp != NULL) { 253 if (vget(vp, LK_EXCLUSIVE, td)) 254 goto loop; 255 *vpp = vp; 256 return (0); 257 } 258 if (de->de_dirent->d_type == DT_CHR) { 259 dev = *devfs_itod(de->de_inode); 260 if (dev == NULL) 261 return (ENOENT); 262 } else { 263 dev = NULL; 264 } 265 error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp); 266 if (error != 0) { 267 printf("devfs_allocv: failed to allocate new vnode\n"); 268 return (error); 269 } 270 271 if (de->de_dirent->d_type == DT_CHR) { 272 vp->v_type = VCHR; 273 VI_LOCK(vp); 274 dev_lock(); 275 dev_refl(dev); 276 vp->v_rdev = dev; 277 LIST_INSERT_HEAD(&dev->si_alist, de, de_alias); 278 dev->si_usecount += vp->v_usecount; 279 dev_unlock(); 280 VI_UNLOCK(vp); 281 vp->v_op = &devfs_specops; 282 } else if (de->de_dirent->d_type == DT_DIR) { 283 vp->v_type = VDIR; 284 } else if (de->de_dirent->d_type == DT_LNK) { 285 vp->v_type = VLNK; 286 } else { 287 vp->v_type = VBAD; 288 } 289 vp->v_data = de; 290 de->de_vnode = vp; 291 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 292 #ifdef MAC 293 mac_associate_vnode_devfs(mp, de, vp); 294 #endif 295 *vpp = vp; 296 return (0); 297 } 298 299 static int 300 devfs_access(ap) 301 struct vop_access_args /* { 302 struct vnode *a_vp; 303 int a_mode; 304 struct ucred *a_cred; 305 struct thread *a_td; 306 } */ *ap; 307 { 308 struct vnode *vp = ap->a_vp; 309 struct devfs_dirent *de; 310 int error; 311 312 de = vp->v_data; 313 if (vp->v_type == VDIR) 314 de = de->de_dir; 315 316 error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid, 317 ap->a_mode, ap->a_cred, NULL); 318 if (!error) 319 return (error); 320 if (error != EACCES) 321 return (error); 322 /* We do, however, allow access to the controlling terminal */ 323 if (!(ap->a_td->td_proc->p_flag & P_CONTROLT)) 324 return (error); 325 if (ap->a_td->td_proc->p_session->s_ttyvp == de->de_vnode) 326 return (0); 327 return (error); 328 } 329 330 /* 331 * Special device advisory byte-level locks. 332 */ 333 /* ARGSUSED */ 334 static int 335 devfs_advlock(ap) 336 struct vop_advlock_args /* { 337 struct vnode *a_vp; 338 caddr_t a_id; 339 int a_op; 340 struct flock *a_fl; 341 int a_flags; 342 } */ *ap; 343 { 344 345 return (ap->a_flags & F_FLOCK ? EOPNOTSUPP : EINVAL); 346 } 347 348 /* 349 * Device close routine 350 */ 351 /* ARGSUSED */ 352 static int 353 devfs_close(ap) 354 struct vop_close_args /* { 355 struct vnode *a_vp; 356 int a_fflag; 357 struct ucred *a_cred; 358 struct thread *a_td; 359 } */ *ap; 360 { 361 struct vnode *vp = ap->a_vp, *oldvp; 362 struct thread *td = ap->a_td; 363 struct cdev *dev = vp->v_rdev; 364 struct cdevsw *dsw; 365 int error; 366 367 /* 368 * Hack: a tty device that is a controlling terminal 369 * has a reference from the session structure. 370 * We cannot easily tell that a character device is 371 * a controlling terminal, unless it is the closing 372 * process' controlling terminal. In that case, 373 * if the reference count is 2 (this last descriptor 374 * plus the session), release the reference from the session. 375 */ 376 377 /* 378 * This needs to be rewritten to take the vp interlock into 379 * consideration. 380 */ 381 382 oldvp = NULL; 383 sx_xlock(&proctree_lock); 384 if (td && vp == td->td_proc->p_session->s_ttyvp) { 385 SESS_LOCK(td->td_proc->p_session); 386 VI_LOCK(vp); 387 if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) { 388 td->td_proc->p_session->s_ttyvp = NULL; 389 oldvp = vp; 390 } 391 VI_UNLOCK(vp); 392 SESS_UNLOCK(td->td_proc->p_session); 393 } 394 sx_xunlock(&proctree_lock); 395 if (oldvp != NULL) 396 vrele(oldvp); 397 /* 398 * We do not want to really close the device if it 399 * is still in use unless we are trying to close it 400 * forcibly. Since every use (buffer, vnode, swap, cmap) 401 * holds a reference to the vnode, and because we mark 402 * any other vnodes that alias this device, when the 403 * sum of the reference counts on all the aliased 404 * vnodes descends to one, we are on last close. 405 */ 406 dsw = dev_refthread(dev); 407 if (dsw == NULL) 408 return (ENXIO); 409 VI_LOCK(vp); 410 if (vp->v_iflag & VI_DOOMED) { 411 /* Forced close. */ 412 } else if (dsw->d_flags & D_TRACKCLOSE) { 413 /* Keep device updated on status. */ 414 } else if (count_dev(dev) > 1) { 415 VI_UNLOCK(vp); 416 dev_relthread(dev); 417 return (0); 418 } 419 VI_UNLOCK(vp); 420 KASSERT(dev->si_refcount > 0, 421 ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev))); 422 if (!(dsw->d_flags & D_NEEDGIANT)) { 423 DROP_GIANT(); 424 error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td); 425 PICKUP_GIANT(); 426 } else { 427 mtx_lock(&Giant); 428 error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td); 429 mtx_unlock(&Giant); 430 } 431 dev_relthread(dev); 432 return (error); 433 } 434 435 static int 436 devfs_close_f(struct file *fp, struct thread *td) 437 { 438 439 return (vnops.fo_close(fp, td)); 440 } 441 442 /* 443 * Synch buffers associated with a block device 444 */ 445 /* ARGSUSED */ 446 static int 447 devfs_fsync(ap) 448 struct vop_fsync_args /* { 449 struct vnode *a_vp; 450 struct ucred *a_cred; 451 int a_waitfor; 452 struct thread *a_td; 453 } */ *ap; 454 { 455 if (!vn_isdisk(ap->a_vp, NULL)) 456 return (0); 457 458 return (vop_stdfsync(ap)); 459 } 460 461 static int 462 devfs_getattr(ap) 463 struct vop_getattr_args /* { 464 struct vnode *a_vp; 465 struct vattr *a_vap; 466 struct ucred *a_cred; 467 struct thread *a_td; 468 } */ *ap; 469 { 470 struct vnode *vp = ap->a_vp; 471 struct vattr *vap = ap->a_vap; 472 int error = 0; 473 struct devfs_dirent *de; 474 struct cdev *dev; 475 476 de = vp->v_data; 477 KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp)); 478 if (vp->v_type == VDIR) { 479 de = de->de_dir; 480 KASSERT(de != NULL, 481 ("Null dir dirent in devfs_getattr vp=%p", vp)); 482 } 483 bzero((caddr_t) vap, sizeof(*vap)); 484 vattr_null(vap); 485 vap->va_uid = de->de_uid; 486 vap->va_gid = de->de_gid; 487 vap->va_mode = de->de_mode; 488 if (vp->v_type == VLNK) 489 vap->va_size = strlen(de->de_symlink); 490 else if (vp->v_type == VDIR) 491 vap->va_size = vap->va_bytes = DEV_BSIZE; 492 else 493 vap->va_size = 0; 494 if (vp->v_type != VDIR) 495 vap->va_bytes = 0; 496 vap->va_blocksize = DEV_BSIZE; 497 vap->va_type = vp->v_type; 498 499 #define fix(aa) \ 500 do { \ 501 if ((aa).tv_sec == 0) { \ 502 (aa).tv_sec = boottime.tv_sec; \ 503 (aa).tv_nsec = boottime.tv_usec * 1000; \ 504 } \ 505 } while (0) 506 507 if (vp->v_type != VCHR) { 508 fix(de->de_atime); 509 vap->va_atime = de->de_atime; 510 fix(de->de_mtime); 511 vap->va_mtime = de->de_mtime; 512 fix(de->de_ctime); 513 vap->va_ctime = de->de_ctime; 514 } else { 515 dev = vp->v_rdev; 516 fix(dev->si_atime); 517 vap->va_atime = dev->si_atime; 518 fix(dev->si_mtime); 519 vap->va_mtime = dev->si_mtime; 520 fix(dev->si_ctime); 521 vap->va_ctime = dev->si_ctime; 522 523 vap->va_rdev = dev->si_inode ^ devfs_random(); 524 } 525 vap->va_gen = 0; 526 vap->va_flags = 0; 527 vap->va_nlink = de->de_links; 528 vap->va_fileid = de->de_inode; 529 530 return (error); 531 } 532 533 /* 534 * Device ioctl operation. 535 */ 536 /* ARGSUSED */ 537 static int 538 devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td) 539 { 540 struct cdev *dev; 541 struct cdevsw *dsw; 542 struct vnode *vp; 543 struct vnode *vpold; 544 int error, i; 545 const char *p; 546 struct fiodgname_arg *fgn; 547 548 error = devfs_fp_check(fp, &dev, &dsw); 549 if (error) 550 return (error); 551 552 if (com == FIODTYPE) { 553 *(int *)data = dsw->d_flags & D_TYPEMASK; 554 dev_relthread(dev); 555 return (0); 556 } else if (com == FIODGNAME) { 557 fgn = data; 558 p = devtoname(dev); 559 i = strlen(p) + 1; 560 if (i > fgn->len) 561 return (EINVAL); 562 return (copyout(p, fgn->buf, i)); 563 } 564 if (dsw->d_flags & D_NEEDGIANT) 565 mtx_lock(&Giant); 566 error = dsw->d_ioctl(dev, com, data, fp->f_flag, td); 567 if (dsw->d_flags & D_NEEDGIANT) 568 mtx_unlock(&Giant); 569 dev_relthread(dev); 570 if (error == ENOIOCTL) 571 error = ENOTTY; 572 if (error == 0 && com == TIOCSCTTY) { 573 vp = fp->f_vnode; 574 575 /* Do nothing if reassigning same control tty */ 576 sx_slock(&proctree_lock); 577 if (td->td_proc->p_session->s_ttyvp == vp) { 578 sx_sunlock(&proctree_lock); 579 return (0); 580 } 581 582 mtx_lock(&Giant); 583 584 vpold = td->td_proc->p_session->s_ttyvp; 585 VREF(vp); 586 SESS_LOCK(td->td_proc->p_session); 587 td->td_proc->p_session->s_ttyvp = vp; 588 SESS_UNLOCK(td->td_proc->p_session); 589 590 sx_sunlock(&proctree_lock); 591 592 /* Get rid of reference to old control tty */ 593 if (vpold) 594 vrele(vpold); 595 mtx_unlock(&Giant); 596 } 597 return (error); 598 } 599 600 601 /* ARGSUSED */ 602 static int 603 devfs_kqfilter_f(struct file *fp, struct knote *kn) 604 { 605 struct cdev *dev; 606 struct cdevsw *dsw; 607 int error; 608 609 error = devfs_fp_check(fp, &dev, &dsw); 610 if (error) 611 return (error); 612 if (dsw->d_flags & D_NEEDGIANT) 613 mtx_lock(&Giant); 614 error = dsw->d_kqfilter(dev, kn); 615 if (dsw->d_flags & D_NEEDGIANT) 616 mtx_unlock(&Giant); 617 dev_relthread(dev); 618 return (error); 619 } 620 621 static int 622 devfs_lookupx(ap) 623 struct vop_lookup_args /* { 624 struct vnode * a_dvp; 625 struct vnode ** a_vpp; 626 struct componentname * a_cnp; 627 } */ *ap; 628 { 629 struct componentname *cnp; 630 struct vnode *dvp, **vpp; 631 struct thread *td; 632 struct devfs_dirent *de, *dd; 633 struct devfs_dirent **dde; 634 struct devfs_mount *dmp; 635 struct cdev *cdev; 636 int error, flags, nameiop; 637 char specname[SPECNAMELEN + 1], *pname; 638 639 cnp = ap->a_cnp; 640 vpp = ap->a_vpp; 641 dvp = ap->a_dvp; 642 pname = cnp->cn_nameptr; 643 td = cnp->cn_thread; 644 flags = cnp->cn_flags; 645 nameiop = cnp->cn_nameiop; 646 dmp = VFSTODEVFS(dvp->v_mount); 647 dd = dvp->v_data; 648 *vpp = NULLVP; 649 650 if ((flags & ISLASTCN) && nameiop == RENAME) 651 return (EOPNOTSUPP); 652 653 if (dvp->v_type != VDIR) 654 return (ENOTDIR); 655 656 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT)) 657 return (EIO); 658 659 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td); 660 if (error) 661 return (error); 662 663 if (cnp->cn_namelen == 1 && *pname == '.') { 664 if ((flags & ISLASTCN) && nameiop != LOOKUP) 665 return (EINVAL); 666 *vpp = dvp; 667 VREF(dvp); 668 return (0); 669 } 670 671 if (flags & ISDOTDOT) { 672 if ((flags & ISLASTCN) && nameiop != LOOKUP) 673 return (EINVAL); 674 VOP_UNLOCK(dvp, 0, td); 675 de = TAILQ_FIRST(&dd->de_dlist); /* "." */ 676 de = TAILQ_NEXT(de, de_list); /* ".." */ 677 de = de->de_dir; 678 error = devfs_allocv(de, dvp->v_mount, vpp, td); 679 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td); 680 return (error); 681 } 682 683 devfs_populate(dmp); 684 dd = dvp->v_data; 685 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 686 if (cnp->cn_namelen != de->de_dirent->d_namlen) 687 continue; 688 if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name, 689 de->de_dirent->d_namlen) != 0) 690 continue; 691 if (de->de_flags & DE_WHITEOUT) 692 goto notfound; 693 goto found; 694 } 695 696 if (nameiop == DELETE) 697 goto notfound; 698 699 /* 700 * OK, we didn't have an entry for the name we were asked for 701 * so we try to see if anybody can create it on demand. 702 */ 703 pname = devfs_fqpn(specname, dvp, cnp); 704 if (pname == NULL) 705 goto notfound; 706 707 cdev = NULL; 708 EVENTHANDLER_INVOKE(dev_clone, td->td_ucred, pname, strlen(pname), 709 &cdev); 710 if (cdev == NULL) 711 goto notfound; 712 713 devfs_populate(dmp); 714 715 dde = devfs_itode(dmp, cdev->si_inode); 716 dev_rel(cdev); 717 718 if (dde == NULL || *dde == NULL || *dde == DE_DELETED) 719 goto notfound; 720 721 if ((*dde)->de_flags & DE_WHITEOUT) 722 goto notfound; 723 724 de = *dde; 725 goto found; 726 727 notfound: 728 729 if ((nameiop == CREATE || nameiop == RENAME) && 730 (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) { 731 cnp->cn_flags |= SAVENAME; 732 return (EJUSTRETURN); 733 } 734 return (ENOENT); 735 736 737 found: 738 739 if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) { 740 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 741 if (error) 742 return (error); 743 if (*vpp == dvp) { 744 VREF(dvp); 745 *vpp = dvp; 746 return (0); 747 } 748 error = devfs_allocv(de, dvp->v_mount, vpp, td); 749 if (error) 750 return (error); 751 return (0); 752 } 753 error = devfs_allocv(de, dvp->v_mount, vpp, td); 754 return (error); 755 } 756 757 static int 758 devfs_lookup(struct vop_lookup_args *ap) 759 { 760 int j; 761 struct devfs_mount *dmp; 762 763 dmp = VFSTODEVFS(ap->a_dvp->v_mount); 764 lockmgr(&dmp->dm_lock, LK_SHARED, 0, curthread); 765 j = devfs_lookupx(ap); 766 lockmgr(&dmp->dm_lock, LK_RELEASE, 0, curthread); 767 return (j); 768 } 769 770 static int 771 devfs_mknod(struct vop_mknod_args *ap) 772 /* 773 struct vop_mknod_args { 774 struct vnodeop_desc *a_desc; 775 struct vnode *a_dvp; 776 struct vnode **a_vpp; 777 struct componentname *a_cnp; 778 struct vattr *a_vap; 779 }; */ 780 { 781 struct componentname *cnp; 782 struct vnode *dvp, **vpp; 783 struct thread *td; 784 struct devfs_dirent *dd, *de; 785 struct devfs_mount *dmp; 786 int error; 787 788 /* 789 * The only type of node we should be creating here is a 790 * character device, for anything else return EOPNOTSUPP. 791 */ 792 if (ap->a_vap->va_type != VCHR) 793 return (EOPNOTSUPP); 794 dvp = ap->a_dvp; 795 dmp = VFSTODEVFS(dvp->v_mount); 796 lockmgr(&dmp->dm_lock, LK_EXCLUSIVE, 0, curthread); 797 798 cnp = ap->a_cnp; 799 vpp = ap->a_vpp; 800 td = cnp->cn_thread; 801 dd = dvp->v_data; 802 803 error = ENOENT; 804 TAILQ_FOREACH(de, &dd->de_dlist, de_list) { 805 if (cnp->cn_namelen != de->de_dirent->d_namlen) 806 continue; 807 if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name, 808 de->de_dirent->d_namlen) != 0) 809 continue; 810 if (de->de_flags & DE_WHITEOUT) 811 break; 812 goto notfound; 813 } 814 if (de == NULL) 815 goto notfound; 816 de->de_flags &= ~DE_WHITEOUT; 817 error = devfs_allocv(de, dvp->v_mount, vpp, td); 818 notfound: 819 lockmgr(&dmp->dm_lock, LK_RELEASE, 0, curthread); 820 return (error); 821 } 822 823 /* 824 * Open a special file. 825 */ 826 /* ARGSUSED */ 827 static int 828 devfs_open(ap) 829 struct vop_open_args /* { 830 struct vnode *a_vp; 831 int a_mode; 832 struct ucred *a_cred; 833 struct thread *a_td; 834 int a_fdidx; 835 } */ *ap; 836 { 837 struct thread *td = ap->a_td; 838 struct vnode *vp = ap->a_vp; 839 struct cdev *dev = vp->v_rdev; 840 struct file *fp; 841 int error; 842 struct cdevsw *dsw; 843 844 if (vp->v_type == VBLK) 845 return (ENXIO); 846 847 if (dev == NULL) 848 return (ENXIO); 849 850 /* Make this field valid before any I/O in d_open. */ 851 if (dev->si_iosize_max == 0) 852 dev->si_iosize_max = DFLTPHYS; 853 854 if (vn_isdisk(vp, NULL) && 855 ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 856 /* 857 * When running in very secure mode, do not allow 858 * opens for writing of any disks. 859 * XXX: should be in geom_dev.c, but we lack the cred there. 860 */ 861 error = securelevel_ge(td->td_ucred, 2); 862 if (error) 863 return (error); 864 } 865 866 dsw = dev_refthread(dev); 867 if (dsw == NULL) 868 return (ENXIO); 869 870 /* XXX: Special casing of ttys for deadfs. Probably redundant. */ 871 if (dsw->d_flags & D_TTY) 872 vp->v_vflag |= VV_ISTTY; 873 874 VOP_UNLOCK(vp, 0, td); 875 876 if(!(dsw->d_flags & D_NEEDGIANT)) { 877 DROP_GIANT(); 878 if (dsw->d_fdopen != NULL) 879 error = dsw->d_fdopen(dev, ap->a_mode, td, ap->a_fdidx); 880 else 881 error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td); 882 PICKUP_GIANT(); 883 } else { 884 mtx_lock(&Giant); 885 if (dsw->d_fdopen != NULL) 886 error = dsw->d_fdopen(dev, ap->a_mode, td, ap->a_fdidx); 887 else 888 error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td); 889 mtx_unlock(&Giant); 890 } 891 892 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 893 894 dev_relthread(dev); 895 896 if (error) 897 return (error); 898 899 #if 0 /* /dev/console */ 900 KASSERT(ap->a_fdidx >= 0, 901 ("Could not vnode bypass device on fd %d", ap->a_fdidx)); 902 #else 903 if(ap->a_fdidx < 0) 904 return (error); 905 #endif 906 /* 907 * This is a pretty disgustingly long chain, but I am not 908 * sure there is any better way. Passing the fdidx into 909 * VOP_OPEN() offers us more information than just passing 910 * the file *. 911 */ 912 fp = ap->a_td->td_proc->p_fd->fd_ofiles[ap->a_fdidx]; 913 KASSERT(fp->f_ops == &badfileops, 914 ("Could not vnode bypass device on fdops %p", fp->f_ops)); 915 fp->f_ops = &devfs_ops_f; 916 fp->f_data = dev; 917 return (error); 918 } 919 920 static int 921 devfs_pathconf(ap) 922 struct vop_pathconf_args /* { 923 struct vnode *a_vp; 924 int a_name; 925 int *a_retval; 926 } */ *ap; 927 { 928 929 switch (ap->a_name) { 930 case _PC_NAME_MAX: 931 *ap->a_retval = NAME_MAX; 932 return (0); 933 case _PC_PATH_MAX: 934 *ap->a_retval = PATH_MAX; 935 return (0); 936 case _PC_MAC_PRESENT: 937 #ifdef MAC 938 /* 939 * If MAC is enabled, devfs automatically supports 940 * trivial non-persistant label storage. 941 */ 942 *ap->a_retval = 1; 943 #else 944 *ap->a_retval = 0; 945 #endif 946 return (0); 947 default: 948 return (vop_stdpathconf(ap)); 949 } 950 /* NOTREACHED */ 951 } 952 953 /* ARGSUSED */ 954 static int 955 devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td) 956 { 957 struct cdev *dev; 958 struct cdevsw *dsw; 959 int error; 960 961 error = devfs_fp_check(fp, &dev, &dsw); 962 if (error) 963 return (error); 964 if (dsw->d_flags & D_NEEDGIANT) 965 mtx_lock(&Giant); 966 error = dsw->d_poll(dev, events, td); 967 if (dsw->d_flags & D_NEEDGIANT) 968 mtx_unlock(&Giant); 969 dev_relthread(dev); 970 return(error); 971 } 972 973 /* 974 * Print out the contents of a special device vnode. 975 */ 976 static int 977 devfs_print(ap) 978 struct vop_print_args /* { 979 struct vnode *a_vp; 980 } */ *ap; 981 { 982 983 printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev)); 984 return (0); 985 } 986 987 /* 988 * Vnode op for read 989 */ 990 /* ARGSUSED */ 991 static int 992 devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) 993 { 994 struct cdev *dev; 995 int ioflag, error, resid; 996 struct cdevsw *dsw; 997 998 error = devfs_fp_check(fp, &dev, &dsw); 999 if (error) 1000 return (error); 1001 resid = uio->uio_resid; 1002 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT); 1003 if (ioflag & O_DIRECT) 1004 ioflag |= IO_DIRECT; 1005 1006 if ((flags & FOF_OFFSET) == 0) 1007 uio->uio_offset = fp->f_offset; 1008 1009 if (dsw->d_flags & D_NEEDGIANT) 1010 mtx_lock(&Giant); 1011 error = dsw->d_read(dev, uio, ioflag); 1012 if (dsw->d_flags & D_NEEDGIANT) 1013 mtx_unlock(&Giant); 1014 dev_relthread(dev); 1015 if (uio->uio_resid != resid || (error == 0 && resid != 0)) 1016 vfs_timestamp(&dev->si_atime); 1017 1018 if ((flags & FOF_OFFSET) == 0) 1019 fp->f_offset = uio->uio_offset; 1020 fp->f_nextoff = uio->uio_offset; 1021 return (error); 1022 } 1023 1024 static int 1025 devfs_readdir(ap) 1026 struct vop_readdir_args /* { 1027 struct vnode *a_vp; 1028 struct uio *a_uio; 1029 struct ucred *a_cred; 1030 int *a_eofflag; 1031 int *a_ncookies; 1032 u_long **a_cookies; 1033 } */ *ap; 1034 { 1035 int error; 1036 struct uio *uio; 1037 struct dirent *dp; 1038 struct devfs_dirent *dd; 1039 struct devfs_dirent *de; 1040 struct devfs_mount *dmp; 1041 off_t off, oldoff; 1042 int ncookies = 0; 1043 u_long *cookiebuf, *cookiep; 1044 struct dirent *dps, *dpe; 1045 1046 if (ap->a_vp->v_type != VDIR) 1047 return (ENOTDIR); 1048 1049 uio = ap->a_uio; 1050 if (uio->uio_offset < 0) 1051 return (EINVAL); 1052 1053 dmp = VFSTODEVFS(ap->a_vp->v_mount); 1054 lockmgr(&dmp->dm_lock, LK_SHARED, 0, curthread); 1055 devfs_populate(dmp); 1056 error = 0; 1057 de = ap->a_vp->v_data; 1058 off = 0; 1059 oldoff = uio->uio_offset; 1060 TAILQ_FOREACH(dd, &de->de_dlist, de_list) { 1061 if (dd->de_flags & DE_WHITEOUT) 1062 continue; 1063 if (dd->de_dirent->d_type == DT_DIR) 1064 de = dd->de_dir; 1065 else 1066 de = dd; 1067 dp = dd->de_dirent; 1068 if (dp->d_reclen > uio->uio_resid) 1069 break; 1070 dp->d_fileno = de->de_inode; 1071 if (off >= uio->uio_offset) { 1072 ncookies++; 1073 error = uiomove(dp, dp->d_reclen, uio); 1074 if (error) 1075 break; 1076 } 1077 off += dp->d_reclen; 1078 } 1079 if( !error && ap->a_ncookies != NULL && ap->a_cookies != NULL ) { 1080 MALLOC(cookiebuf, u_long *, ncookies * sizeof(u_long), 1081 M_TEMP, M_WAITOK); 1082 cookiep = cookiebuf; 1083 dps = (struct dirent *)((char *)uio->uio_iov->iov_base - 1084 (uio->uio_offset - oldoff)); 1085 dpe = (struct dirent *) uio->uio_iov->iov_base; 1086 for( dp = dps; 1087 dp < dpe; 1088 dp = (struct dirent *)((caddr_t) dp + dp->d_reclen)) { 1089 oldoff += dp->d_reclen; 1090 *cookiep++ = (u_long) oldoff; 1091 } 1092 *ap->a_ncookies = ncookies; 1093 *ap->a_cookies = cookiebuf; 1094 } 1095 lockmgr(&dmp->dm_lock, LK_RELEASE, 0, curthread); 1096 uio->uio_offset = off; 1097 return (error); 1098 } 1099 1100 static int 1101 devfs_readlink(ap) 1102 struct vop_readlink_args /* { 1103 struct vnode *a_vp; 1104 struct uio *a_uio; 1105 struct ucred *a_cead; 1106 } */ *ap; 1107 { 1108 int error; 1109 struct devfs_dirent *de; 1110 1111 de = ap->a_vp->v_data; 1112 error = uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio); 1113 return (error); 1114 } 1115 1116 static int 1117 devfs_reclaim(ap) 1118 struct vop_reclaim_args /* { 1119 struct vnode *a_vp; 1120 } */ *ap; 1121 { 1122 struct vnode *vp = ap->a_vp; 1123 struct devfs_dirent *de; 1124 struct cdev *dev; 1125 1126 de = vp->v_data; 1127 if (de != NULL) 1128 de->de_vnode = NULL; 1129 vp->v_data = NULL; 1130 vnode_destroy_vobject(vp); 1131 1132 dev = vp->v_rdev; 1133 vp->v_rdev = NULL; 1134 1135 if (dev == NULL) 1136 return (0); 1137 1138 dev_lock(); 1139 if (de != NULL) 1140 LIST_REMOVE(de, de_alias); 1141 dev->si_usecount -= vp->v_usecount; 1142 dev_unlock(); 1143 dev_rel(dev); 1144 return (0); 1145 } 1146 1147 static int 1148 devfs_remove(ap) 1149 struct vop_remove_args /* { 1150 struct vnode *a_dvp; 1151 struct vnode *a_vp; 1152 struct componentname *a_cnp; 1153 } */ *ap; 1154 { 1155 struct vnode *vp = ap->a_vp; 1156 struct devfs_dirent *dd; 1157 struct devfs_dirent *de; 1158 struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount); 1159 1160 lockmgr(&dmp->dm_lock, LK_EXCLUSIVE, 0, curthread); 1161 dd = ap->a_dvp->v_data; 1162 de = vp->v_data; 1163 if (de->de_dirent->d_type == DT_LNK) { 1164 TAILQ_REMOVE(&dd->de_dlist, de, de_list); 1165 if (de->de_vnode) 1166 de->de_vnode->v_data = NULL; 1167 #ifdef MAC 1168 mac_destroy_devfsdirent(de); 1169 #endif 1170 FREE(de, M_DEVFS); 1171 } else { 1172 de->de_flags |= DE_WHITEOUT; 1173 } 1174 lockmgr(&dmp->dm_lock, LK_RELEASE, 0, curthread); 1175 return (0); 1176 } 1177 1178 /* 1179 * Revoke is called on a tty when a terminal session ends. The vnode 1180 * is orphaned by setting v_op to deadfs so we need to let go of it 1181 * as well so that we create a new one next time around. 1182 */ 1183 static int 1184 devfs_revoke(ap) 1185 struct vop_revoke_args /* { 1186 struct vnode *a_vp; 1187 int a_flags; 1188 } */ *ap; 1189 { 1190 struct vnode *vp = ap->a_vp; 1191 struct cdev *dev; 1192 struct devfs_dirent *de; 1193 1194 KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL")); 1195 1196 dev = vp->v_rdev; 1197 for (;;) { 1198 dev_lock(); 1199 de = LIST_FIRST(&dev->si_alist); 1200 dev_unlock(); 1201 if (de == NULL) 1202 break; 1203 vgone(de->de_vnode); 1204 } 1205 return (0); 1206 } 1207 1208 static int 1209 devfs_rioctl(ap) 1210 struct vop_ioctl_args /* { 1211 struct vnode *a_vp; 1212 u_long a_command; 1213 caddr_t a_data; 1214 int a_fflag; 1215 struct ucred *a_cred; 1216 struct thread *a_td; 1217 } */ *ap; 1218 { 1219 int error; 1220 struct devfs_mount *dmp; 1221 1222 dmp = VFSTODEVFS(ap->a_vp->v_mount); 1223 lockmgr(&dmp->dm_lock, LK_SHARED, 0, curthread); 1224 devfs_populate(dmp); 1225 lockmgr(&dmp->dm_lock, LK_RELEASE, 0, curthread); 1226 error = devfs_rules_ioctl(ap->a_vp->v_mount, ap->a_command, ap->a_data, 1227 ap->a_td); 1228 return (error); 1229 } 1230 1231 static int 1232 devfs_rread(ap) 1233 struct vop_read_args /* { 1234 struct vnode *a_vp; 1235 struct uio *a_uio; 1236 int a_ioflag; 1237 struct ucred *a_cred; 1238 } */ *ap; 1239 { 1240 1241 if (ap->a_vp->v_type != VDIR) 1242 return (EINVAL); 1243 return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL)); 1244 } 1245 1246 static int 1247 devfs_setattr(ap) 1248 struct vop_setattr_args /* { 1249 struct vnode *a_vp; 1250 struct vattr *a_vap; 1251 struct ucred *a_cred; 1252 struct proc *a_p; 1253 } */ *ap; 1254 { 1255 struct devfs_dirent *de; 1256 struct vattr *vap; 1257 struct vnode *vp; 1258 int c, error; 1259 uid_t uid; 1260 gid_t gid; 1261 1262 vap = ap->a_vap; 1263 vp = ap->a_vp; 1264 if ((vap->va_type != VNON) || 1265 (vap->va_nlink != VNOVAL) || 1266 (vap->va_fsid != VNOVAL) || 1267 (vap->va_fileid != VNOVAL) || 1268 (vap->va_blocksize != VNOVAL) || 1269 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1270 (vap->va_rdev != VNOVAL) || 1271 ((int)vap->va_bytes != VNOVAL) || 1272 (vap->va_gen != VNOVAL)) { 1273 return (EINVAL); 1274 } 1275 1276 de = vp->v_data; 1277 if (vp->v_type == VDIR) 1278 de = de->de_dir; 1279 1280 error = c = 0; 1281 if (vap->va_uid == (uid_t)VNOVAL) 1282 uid = de->de_uid; 1283 else 1284 uid = vap->va_uid; 1285 if (vap->va_gid == (gid_t)VNOVAL) 1286 gid = de->de_gid; 1287 else 1288 gid = vap->va_gid; 1289 if (uid != de->de_uid || gid != de->de_gid) { 1290 if (((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid || 1291 (gid != de->de_gid && !groupmember(gid, ap->a_cred))) && 1292 (error = suser_cred(ap->a_td->td_ucred, SUSER_ALLOWJAIL)) != 0) 1293 return (error); 1294 de->de_uid = uid; 1295 de->de_gid = gid; 1296 c = 1; 1297 } 1298 1299 if (vap->va_mode != (mode_t)VNOVAL) { 1300 if ((ap->a_cred->cr_uid != de->de_uid) && 1301 (error = suser_cred(ap->a_td->td_ucred, SUSER_ALLOWJAIL))) 1302 return (error); 1303 de->de_mode = vap->va_mode; 1304 c = 1; 1305 } 1306 1307 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1308 /* See the comment in ufs_vnops::ufs_setattr(). */ 1309 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td)) && 1310 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1311 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, ap->a_td)))) 1312 return (error); 1313 if (vap->va_atime.tv_sec != VNOVAL) { 1314 if (vp->v_type == VCHR) 1315 vp->v_rdev->si_atime = vap->va_atime; 1316 else 1317 de->de_atime = vap->va_atime; 1318 } 1319 if (vap->va_mtime.tv_sec != VNOVAL) { 1320 if (vp->v_type == VCHR) 1321 vp->v_rdev->si_mtime = vap->va_mtime; 1322 else 1323 de->de_mtime = vap->va_mtime; 1324 } 1325 c = 1; 1326 } 1327 1328 if (c) { 1329 if (vp->v_type == VCHR) 1330 vfs_timestamp(&vp->v_rdev->si_ctime); 1331 else 1332 vfs_timestamp(&de->de_mtime); 1333 } 1334 return (0); 1335 } 1336 1337 #ifdef MAC 1338 static int 1339 devfs_setlabel(ap) 1340 struct vop_setlabel_args /* { 1341 struct vnode *a_vp; 1342 struct mac *a_label; 1343 struct ucred *a_cred; 1344 struct thread *a_td; 1345 } */ *ap; 1346 { 1347 struct vnode *vp; 1348 struct devfs_dirent *de; 1349 1350 vp = ap->a_vp; 1351 de = vp->v_data; 1352 1353 mac_relabel_vnode(ap->a_cred, vp, ap->a_label); 1354 mac_update_devfsdirent(vp->v_mount, de, vp); 1355 1356 return (0); 1357 } 1358 #endif 1359 1360 static int 1361 devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td) 1362 { 1363 1364 return (vnops.fo_stat(fp, sb, cred, td)); 1365 } 1366 1367 static int 1368 devfs_symlink(ap) 1369 struct vop_symlink_args /* { 1370 struct vnode *a_dvp; 1371 struct vnode **a_vpp; 1372 struct componentname *a_cnp; 1373 struct vattr *a_vap; 1374 char *a_target; 1375 } */ *ap; 1376 { 1377 int i, error; 1378 struct devfs_dirent *dd; 1379 struct devfs_dirent *de; 1380 struct devfs_mount *dmp; 1381 struct thread *td; 1382 1383 td = ap->a_cnp->cn_thread; 1384 KASSERT(td == curthread, ("devfs_symlink: td != curthread")); 1385 error = suser(td); 1386 if (error) 1387 return(error); 1388 dmp = VFSTODEVFS(ap->a_dvp->v_mount); 1389 dd = ap->a_dvp->v_data; 1390 de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen); 1391 de->de_uid = 0; 1392 de->de_gid = 0; 1393 de->de_mode = 0755; 1394 de->de_inode = dmp->dm_inode++; 1395 de->de_dirent->d_type = DT_LNK; 1396 i = strlen(ap->a_target) + 1; 1397 MALLOC(de->de_symlink, char *, i, M_DEVFS, M_WAITOK); 1398 bcopy(ap->a_target, de->de_symlink, i); 1399 lockmgr(&dmp->dm_lock, LK_EXCLUSIVE, 0, td); 1400 #ifdef MAC 1401 mac_create_devfs_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de); 1402 #endif 1403 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); 1404 devfs_allocv(de, ap->a_dvp->v_mount, ap->a_vpp, td); 1405 lockmgr(&dmp->dm_lock, LK_RELEASE, 0, td); 1406 return (0); 1407 } 1408 1409 /* 1410 * Vnode op for write 1411 */ 1412 /* ARGSUSED */ 1413 static int 1414 devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) 1415 { 1416 struct cdev *dev; 1417 struct vnode *vp; 1418 int error, ioflag, resid; 1419 struct cdevsw *dsw; 1420 1421 error = devfs_fp_check(fp, &dev, &dsw); 1422 if (error) 1423 return (error); 1424 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td)); 1425 vp = fp->f_vnode; 1426 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC); 1427 if (ioflag & O_DIRECT) 1428 ioflag |= IO_DIRECT; 1429 if ((flags & FOF_OFFSET) == 0) 1430 uio->uio_offset = fp->f_offset; 1431 1432 resid = uio->uio_resid; 1433 1434 if (dsw->d_flags & D_NEEDGIANT) 1435 mtx_lock(&Giant); 1436 error = dsw->d_write(dev, uio, ioflag); 1437 if (dsw->d_flags & D_NEEDGIANT) 1438 mtx_unlock(&Giant); 1439 dev_relthread(dev); 1440 if (uio->uio_resid != resid || (error == 0 && resid != 0)) { 1441 vfs_timestamp(&dev->si_ctime); 1442 dev->si_mtime = dev->si_ctime; 1443 } 1444 1445 if ((flags & FOF_OFFSET) == 0) 1446 fp->f_offset = uio->uio_offset; 1447 fp->f_nextoff = uio->uio_offset; 1448 return (error); 1449 } 1450 1451 dev_t 1452 dev2udev(struct cdev *x) 1453 { 1454 if (x == NULL) 1455 return (NODEV); 1456 return (x->si_inode ^ devfs_random()); 1457 } 1458 1459 /* 1460 * Helper sysctl for devname(3). We're given a struct cdev * and return 1461 * the name, if any, registered by the device driver. 1462 */ 1463 static int 1464 sysctl_devname(SYSCTL_HANDLER_ARGS) 1465 { 1466 int error; 1467 dev_t ud; 1468 struct cdev *dev, **dp; 1469 1470 error = SYSCTL_IN(req, &ud, sizeof (ud)); 1471 if (error) 1472 return (error); 1473 if (ud == NODEV) 1474 return(EINVAL); 1475 dp = devfs_itod(ud ^ devfs_random()); 1476 if (dp == NULL) 1477 return(ENOENT); 1478 dev = *dp; 1479 if (dev == NULL) 1480 return(ENOENT); 1481 return(SYSCTL_OUT(req, dev->si_name, strlen(dev->si_name) + 1)); 1482 return (error); 1483 } 1484 1485 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY, 1486 NULL, 0, sysctl_devname, "", "devname(3) handler"); 1487 1488 /* 1489 * Our calling convention to the device drivers used to be that we passed 1490 * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_ 1491 * flags instead since that's what open(), close() and ioctl() takes and 1492 * we don't really want vnode.h in device drivers. 1493 * We solved the source compatibility by redefining some vnode flags to 1494 * be the same as the fcntl ones and by sending down the bitwise OR of 1495 * the respective fcntl/vnode flags. These CTASSERTS make sure nobody 1496 * pulls the rug out under this. 1497 */ 1498 CTASSERT(O_NONBLOCK == IO_NDELAY); 1499 CTASSERT(O_FSYNC == IO_SYNC); 1500