1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 39 * $FreeBSD$ 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/stat.h> 47 #include <sys/proc.h> 48 #include <sys/lock.h> 49 #include <sys/mount.h> 50 #include <sys/mutex.h> 51 #include <sys/namei.h> 52 #include <sys/vnode.h> 53 #include <sys/bio.h> 54 #include <sys/buf.h> 55 #include <sys/filio.h> 56 #include <sys/sx.h> 57 #include <sys/ttycom.h> 58 #include <sys/conf.h> 59 #include <sys/syslog.h> 60 61 #include <machine/limits.h> 62 63 static int vn_closefile(struct file *fp, struct thread *td); 64 static int vn_ioctl(struct file *fp, u_long com, caddr_t data, 65 struct thread *td); 66 static int vn_read(struct file *fp, struct uio *uio, 67 struct ucred *cred, int flags, struct thread *td); 68 static int vn_poll(struct file *fp, int events, struct ucred *cred, 69 struct thread *td); 70 static int vn_kqfilter(struct file *fp, struct knote *kn); 71 static int vn_statfile(struct file *fp, struct stat *sb, struct thread *td); 72 static int vn_write(struct file *fp, struct uio *uio, 73 struct ucred *cred, int flags, struct thread *td); 74 75 struct fileops vnops = { 76 vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter, 77 vn_statfile, vn_closefile 78 }; 79 80 int 81 vn_open(ndp, flagp, cmode) 82 register struct nameidata *ndp; 83 int *flagp, cmode; 84 { 85 struct thread *td = ndp->ni_cnd.cn_thread; 86 87 return (vn_open_cred(ndp, flagp, cmode, td->td_ucred)); 88 } 89 90 /* 91 * Common code for vnode open operations. 92 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 93 * 94 * Note that this does NOT free nameidata for the successful case, 95 * due to the NDINIT being done elsewhere. 96 */ 97 int 98 vn_open_cred(ndp, flagp, cmode, cred) 99 register struct nameidata *ndp; 100 int *flagp, cmode; 101 struct ucred *cred; 102 { 103 struct vnode *vp; 104 struct mount *mp; 105 struct thread *td = ndp->ni_cnd.cn_thread; 106 struct vattr vat; 107 struct vattr *vap = &vat; 108 int mode, fmode, error; 109 #ifndef LOOKUP_EXCLUSIVE 110 int exclusive; /* The current intended lock state */ 111 112 exclusive = 0; 113 #endif 114 115 restart: 116 fmode = *flagp; 117 if (fmode & O_CREAT) { 118 ndp->ni_cnd.cn_nameiop = CREATE; 119 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; 120 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 121 ndp->ni_cnd.cn_flags |= FOLLOW; 122 bwillwrite(); 123 if ((error = namei(ndp)) != 0) 124 return (error); 125 if (ndp->ni_vp == NULL) { 126 VATTR_NULL(vap); 127 vap->va_type = VREG; 128 vap->va_mode = cmode; 129 if (fmode & O_EXCL) 130 vap->va_vaflags |= VA_EXCLUSIVE; 131 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 132 NDFREE(ndp, NDF_ONLY_PNBUF); 133 vput(ndp->ni_dvp); 134 if ((error = vn_start_write(NULL, &mp, 135 V_XSLEEP | PCATCH)) != 0) 136 return (error); 137 goto restart; 138 } 139 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE); 140 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 141 &ndp->ni_cnd, vap); 142 vput(ndp->ni_dvp); 143 vn_finished_write(mp); 144 if (error) { 145 NDFREE(ndp, NDF_ONLY_PNBUF); 146 return (error); 147 } 148 ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create"); 149 ASSERT_VOP_LOCKED(ndp->ni_vp, "create"); 150 fmode &= ~O_TRUNC; 151 vp = ndp->ni_vp; 152 #ifndef LOOKUP_EXCLUSIVE 153 exclusive = 1; 154 #endif 155 } else { 156 if (ndp->ni_dvp == ndp->ni_vp) 157 vrele(ndp->ni_dvp); 158 else 159 vput(ndp->ni_dvp); 160 ndp->ni_dvp = NULL; 161 vp = ndp->ni_vp; 162 if (fmode & O_EXCL) { 163 error = EEXIST; 164 goto bad; 165 } 166 fmode &= ~O_CREAT; 167 } 168 } else { 169 ndp->ni_cnd.cn_nameiop = LOOKUP; 170 #ifndef LOOKUP_EXCLUSIVE 171 ndp->ni_cnd.cn_flags = 172 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | 173 LOCKSHARED | LOCKLEAF; 174 #else 175 ndp->ni_cnd.cn_flags = 176 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 177 #endif 178 if ((error = namei(ndp)) != 0) 179 return (error); 180 vp = ndp->ni_vp; 181 } 182 if (vp->v_type == VLNK) { 183 error = EMLINK; 184 goto bad; 185 } 186 if (vp->v_type == VSOCK) { 187 error = EOPNOTSUPP; 188 goto bad; 189 } 190 if ((fmode & O_CREAT) == 0) { 191 mode = 0; 192 if (fmode & (FWRITE | O_TRUNC)) { 193 if (vp->v_type == VDIR) { 194 error = EISDIR; 195 goto bad; 196 } 197 error = vn_writechk(vp); 198 if (error) 199 goto bad; 200 mode |= VWRITE; 201 } 202 if (fmode & FREAD) 203 mode |= VREAD; 204 if (mode) { 205 error = VOP_ACCESS(vp, mode, cred, td); 206 if (error) 207 goto bad; 208 } 209 } 210 if ((error = VOP_OPEN(vp, fmode, cred, td)) != 0) 211 goto bad; 212 /* 213 * Make sure that a VM object is created for VMIO support. 214 */ 215 if (vn_canvmio(vp) == TRUE) { 216 #ifndef LOOKUP_EXCLUSIVE 217 int flock; 218 219 if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0) 220 VOP_LOCK(vp, LK_UPGRADE, td); 221 /* 222 * In cases where the object is marked as dead object_create 223 * will unlock and relock exclusive. It is safe to call in 224 * here with a shared lock because we only examine fields that 225 * the shared lock guarantees will be stable. In the UPGRADE 226 * case it is not likely that anyone has used this vnode yet 227 * so there will be no contention. The logic after this call 228 * restores the requested locking state. 229 */ 230 #endif 231 if ((error = vfs_object_create(vp, td, cred)) != 0) { 232 VOP_UNLOCK(vp, 0, td); 233 VOP_CLOSE(vp, fmode, cred, td); 234 NDFREE(ndp, NDF_ONLY_PNBUF); 235 vrele(vp); 236 *flagp = fmode; 237 return (error); 238 } 239 #ifndef LOOKUP_EXCLUSIVE 240 flock = VOP_ISLOCKED(vp, td); 241 if (!exclusive && flock == LK_EXCLUSIVE) 242 VOP_LOCK(vp, LK_DOWNGRADE, td); 243 #endif 244 } 245 246 if (fmode & FWRITE) 247 vp->v_writecount++; 248 *flagp = fmode; 249 return (0); 250 bad: 251 NDFREE(ndp, NDF_ONLY_PNBUF); 252 vput(vp); 253 *flagp = fmode; 254 return (error); 255 } 256 257 /* 258 * Check for write permissions on the specified vnode. 259 * Prototype text segments cannot be written. 260 */ 261 int 262 vn_writechk(vp) 263 register struct vnode *vp; 264 { 265 266 /* 267 * If there's shared text associated with 268 * the vnode, try to free it up once. If 269 * we fail, we can't allow writing. 270 */ 271 if (vp->v_flag & VTEXT) 272 return (ETXTBSY); 273 return (0); 274 } 275 276 /* 277 * Vnode close call 278 */ 279 int 280 vn_close(vp, flags, cred, td) 281 register struct vnode *vp; 282 int flags; 283 struct ucred *cred; 284 struct thread *td; 285 { 286 int error; 287 288 if (flags & FWRITE) 289 vp->v_writecount--; 290 error = VOP_CLOSE(vp, flags, cred, td); 291 /* 292 * XXX - In certain instances VOP_CLOSE has to do the vrele 293 * itself. If the vrele has been done, it will return EAGAIN 294 * to indicate that the vrele should not be done again. When 295 * this happens, we just return success. The correct thing to 296 * do would be to have all VOP_CLOSE instances do the vrele. 297 */ 298 if (error == EAGAIN) 299 return (0); 300 vrele(vp); 301 return (error); 302 } 303 304 /* 305 * Sequential heuristic - detect sequential operation 306 */ 307 static __inline 308 int 309 sequential_heuristic(struct uio *uio, struct file *fp) 310 { 311 312 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 313 uio->uio_offset == fp->f_nextoff) { 314 /* 315 * XXX we assume that the filesystem block size is 316 * the default. Not true, but still gives us a pretty 317 * good indicator of how sequential the read operations 318 * are. 319 */ 320 fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 321 if (fp->f_seqcount >= 127) 322 fp->f_seqcount = 127; 323 return(fp->f_seqcount << 16); 324 } 325 326 /* 327 * Not sequential, quick draw-down of seqcount 328 */ 329 if (fp->f_seqcount > 1) 330 fp->f_seqcount = 1; 331 else 332 fp->f_seqcount = 0; 333 return(0); 334 } 335 336 /* 337 * Package up an I/O request on a vnode into a uio and do it. 338 */ 339 int 340 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td) 341 enum uio_rw rw; 342 struct vnode *vp; 343 caddr_t base; 344 int len; 345 off_t offset; 346 enum uio_seg segflg; 347 int ioflg; 348 struct ucred *cred; 349 int *aresid; 350 struct thread *td; 351 { 352 struct uio auio; 353 struct iovec aiov; 354 struct mount *mp; 355 int error; 356 357 if ((ioflg & IO_NODELOCKED) == 0) { 358 mp = NULL; 359 if (rw == UIO_WRITE && 360 vp->v_type != VCHR && 361 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 362 return (error); 363 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 364 } 365 auio.uio_iov = &aiov; 366 auio.uio_iovcnt = 1; 367 aiov.iov_base = base; 368 aiov.iov_len = len; 369 auio.uio_resid = len; 370 auio.uio_offset = offset; 371 auio.uio_segflg = segflg; 372 auio.uio_rw = rw; 373 auio.uio_td = td; 374 if (rw == UIO_READ) { 375 error = VOP_READ(vp, &auio, ioflg, cred); 376 } else { 377 error = VOP_WRITE(vp, &auio, ioflg, cred); 378 } 379 if (aresid) 380 *aresid = auio.uio_resid; 381 else 382 if (auio.uio_resid && error == 0) 383 error = EIO; 384 if ((ioflg & IO_NODELOCKED) == 0) { 385 vn_finished_write(mp); 386 VOP_UNLOCK(vp, 0, td); 387 } 388 return (error); 389 } 390 391 /* 392 * Package up an I/O request on a vnode into a uio and do it. The I/O 393 * request is split up into smaller chunks and we try to avoid saturating 394 * the buffer cache while potentially holding a vnode locked, so we 395 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield() 396 * to give other processes a chance to lock the vnode (either other processes 397 * core'ing the same binary, or unrelated processes scanning the directory). 398 */ 399 int 400 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td) 401 enum uio_rw rw; 402 struct vnode *vp; 403 caddr_t base; 404 int len; 405 off_t offset; 406 enum uio_seg segflg; 407 int ioflg; 408 struct ucred *cred; 409 int *aresid; 410 struct thread *td; 411 { 412 int error = 0; 413 414 do { 415 int chunk = (len > MAXBSIZE) ? MAXBSIZE : len; 416 417 if (rw != UIO_READ && vp->v_type == VREG) 418 bwillwrite(); 419 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 420 ioflg, cred, aresid, td); 421 len -= chunk; /* aresid calc already includes length */ 422 if (error) 423 break; 424 offset += chunk; 425 base += chunk; 426 uio_yield(); 427 } while (len); 428 if (aresid) 429 *aresid += len; 430 return (error); 431 } 432 433 /* 434 * File table vnode read routine. 435 */ 436 static int 437 vn_read(fp, uio, cred, flags, td) 438 struct file *fp; 439 struct uio *uio; 440 struct ucred *cred; 441 struct thread *td; 442 int flags; 443 { 444 struct vnode *vp; 445 int error, ioflag; 446 447 mtx_lock(&Giant); 448 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 449 uio->uio_td, td)); 450 vp = (struct vnode *)fp->f_data; 451 ioflag = 0; 452 if (fp->f_flag & FNONBLOCK) 453 ioflag |= IO_NDELAY; 454 if (fp->f_flag & O_DIRECT) 455 ioflag |= IO_DIRECT; 456 VOP_LEASE(vp, td, cred, LEASE_READ); 457 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); 458 if ((flags & FOF_OFFSET) == 0) 459 uio->uio_offset = fp->f_offset; 460 461 ioflag |= sequential_heuristic(uio, fp); 462 463 error = VOP_READ(vp, uio, ioflag, cred); 464 if ((flags & FOF_OFFSET) == 0) 465 fp->f_offset = uio->uio_offset; 466 fp->f_nextoff = uio->uio_offset; 467 VOP_UNLOCK(vp, 0, td); 468 mtx_unlock(&Giant); 469 return (error); 470 } 471 472 /* 473 * File table vnode write routine. 474 */ 475 static int 476 vn_write(fp, uio, cred, flags, td) 477 struct file *fp; 478 struct uio *uio; 479 struct ucred *cred; 480 struct thread *td; 481 int flags; 482 { 483 struct vnode *vp; 484 struct mount *mp; 485 int error, ioflag; 486 487 mtx_lock(&Giant); 488 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 489 uio->uio_td, td)); 490 vp = (struct vnode *)fp->f_data; 491 if (vp->v_type == VREG) 492 bwillwrite(); 493 ioflag = IO_UNIT; 494 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 495 ioflag |= IO_APPEND; 496 if (fp->f_flag & FNONBLOCK) 497 ioflag |= IO_NDELAY; 498 if (fp->f_flag & O_DIRECT) 499 ioflag |= IO_DIRECT; 500 if ((fp->f_flag & O_FSYNC) || 501 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 502 ioflag |= IO_SYNC; 503 mp = NULL; 504 if (vp->v_type != VCHR && 505 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) { 506 mtx_unlock(&Giant); 507 return (error); 508 } 509 VOP_LEASE(vp, td, cred, LEASE_WRITE); 510 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 511 if ((flags & FOF_OFFSET) == 0) 512 uio->uio_offset = fp->f_offset; 513 ioflag |= sequential_heuristic(uio, fp); 514 error = VOP_WRITE(vp, uio, ioflag, cred); 515 if ((flags & FOF_OFFSET) == 0) 516 fp->f_offset = uio->uio_offset; 517 fp->f_nextoff = uio->uio_offset; 518 VOP_UNLOCK(vp, 0, td); 519 vn_finished_write(mp); 520 mtx_unlock(&Giant); 521 return (error); 522 } 523 524 /* 525 * File table vnode stat routine. 526 */ 527 static int 528 vn_statfile(fp, sb, td) 529 struct file *fp; 530 struct stat *sb; 531 struct thread *td; 532 { 533 struct vnode *vp = (struct vnode *)fp->f_data; 534 int error; 535 536 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 537 error = vn_stat(vp, sb, td); 538 VOP_UNLOCK(vp, 0, td); 539 540 return (error); 541 } 542 543 /* 544 * Stat a vnode; implementation for the stat syscall 545 */ 546 int 547 vn_stat(vp, sb, td) 548 struct vnode *vp; 549 register struct stat *sb; 550 struct thread *td; 551 { 552 struct vattr vattr; 553 register struct vattr *vap; 554 int error; 555 u_short mode; 556 557 vap = &vattr; 558 error = VOP_GETATTR(vp, vap, td->td_ucred, td); 559 if (error) 560 return (error); 561 562 /* 563 * Zero the spare stat fields 564 */ 565 sb->st_lspare = 0; 566 sb->st_qspare[0] = 0; 567 sb->st_qspare[1] = 0; 568 569 /* 570 * Copy from vattr table 571 */ 572 if (vap->va_fsid != VNOVAL) 573 sb->st_dev = vap->va_fsid; 574 else 575 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 576 sb->st_ino = vap->va_fileid; 577 mode = vap->va_mode; 578 switch (vap->va_type) { 579 case VREG: 580 mode |= S_IFREG; 581 break; 582 case VDIR: 583 mode |= S_IFDIR; 584 break; 585 case VBLK: 586 mode |= S_IFBLK; 587 break; 588 case VCHR: 589 mode |= S_IFCHR; 590 break; 591 case VLNK: 592 mode |= S_IFLNK; 593 /* This is a cosmetic change, symlinks do not have a mode. */ 594 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 595 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 596 else 597 sb->st_mode |= ACCESSPERMS; /* 0777 */ 598 break; 599 case VSOCK: 600 mode |= S_IFSOCK; 601 break; 602 case VFIFO: 603 mode |= S_IFIFO; 604 break; 605 default: 606 return (EBADF); 607 }; 608 sb->st_mode = mode; 609 sb->st_nlink = vap->va_nlink; 610 sb->st_uid = vap->va_uid; 611 sb->st_gid = vap->va_gid; 612 sb->st_rdev = vap->va_rdev; 613 if (vap->va_size > OFF_MAX) 614 return (EOVERFLOW); 615 sb->st_size = vap->va_size; 616 sb->st_atimespec = vap->va_atime; 617 sb->st_mtimespec = vap->va_mtime; 618 sb->st_ctimespec = vap->va_ctime; 619 620 /* 621 * According to www.opengroup.org, the meaning of st_blksize is 622 * "a filesystem-specific preferred I/O block size for this 623 * object. In some filesystem types, this may vary from file 624 * to file" 625 * Default to PAGE_SIZE after much discussion. 626 */ 627 628 if (vap->va_type == VREG) { 629 sb->st_blksize = vap->va_blocksize; 630 } else if (vn_isdisk(vp, NULL)) { 631 sb->st_blksize = vp->v_rdev->si_bsize_best; 632 if (sb->st_blksize < vp->v_rdev->si_bsize_phys) 633 sb->st_blksize = vp->v_rdev->si_bsize_phys; 634 if (sb->st_blksize < BLKDEV_IOSIZE) 635 sb->st_blksize = BLKDEV_IOSIZE; 636 } else { 637 sb->st_blksize = PAGE_SIZE; 638 } 639 640 sb->st_flags = vap->va_flags; 641 if (suser(td)) 642 sb->st_gen = 0; 643 else 644 sb->st_gen = vap->va_gen; 645 646 #if (S_BLKSIZE == 512) 647 /* Optimize this case */ 648 sb->st_blocks = vap->va_bytes >> 9; 649 #else 650 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 651 #endif 652 return (0); 653 } 654 655 /* 656 * File table vnode ioctl routine. 657 */ 658 static int 659 vn_ioctl(fp, com, data, td) 660 struct file *fp; 661 u_long com; 662 caddr_t data; 663 struct thread *td; 664 { 665 register struct vnode *vp = ((struct vnode *)fp->f_data); 666 struct vnode *vpold; 667 struct vattr vattr; 668 int error; 669 670 switch (vp->v_type) { 671 672 case VREG: 673 case VDIR: 674 if (com == FIONREAD) { 675 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 676 error = VOP_GETATTR(vp, &vattr, td->td_ucred, td); 677 VOP_UNLOCK(vp, 0, td); 678 if (error) 679 return (error); 680 *(int *)data = vattr.va_size - fp->f_offset; 681 return (0); 682 } 683 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 684 return (0); /* XXX */ 685 /* fall into ... */ 686 687 default: 688 #if 0 689 return (ENOTTY); 690 #endif 691 case VFIFO: 692 case VCHR: 693 case VBLK: 694 if (com == FIODTYPE) { 695 if (vp->v_type != VCHR && vp->v_type != VBLK) 696 return (ENOTTY); 697 *(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK; 698 return (0); 699 } 700 error = VOP_IOCTL(vp, com, data, fp->f_flag, td->td_ucred, td); 701 if (error == 0 && com == TIOCSCTTY) { 702 703 /* Do nothing if reassigning same control tty */ 704 sx_slock(&proctree_lock); 705 if (td->td_proc->p_session->s_ttyvp == vp) { 706 sx_sunlock(&proctree_lock); 707 return (0); 708 } 709 710 vpold = td->td_proc->p_session->s_ttyvp; 711 VREF(vp); 712 SESS_LOCK(td->td_proc->p_session); 713 td->td_proc->p_session->s_ttyvp = vp; 714 SESS_UNLOCK(td->td_proc->p_session); 715 716 sx_sunlock(&proctree_lock); 717 718 /* Get rid of reference to old control tty */ 719 if (vpold) 720 vrele(vpold); 721 } 722 return (error); 723 } 724 } 725 726 /* 727 * File table vnode poll routine. 728 */ 729 static int 730 vn_poll(fp, events, cred, td) 731 struct file *fp; 732 int events; 733 struct ucred *cred; 734 struct thread *td; 735 { 736 737 return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td)); 738 } 739 740 /* 741 * Check that the vnode is still valid, and if so 742 * acquire requested lock. 743 */ 744 int 745 #ifndef DEBUG_LOCKS 746 vn_lock(vp, flags, td) 747 #else 748 debug_vn_lock(vp, flags, td, filename, line) 749 #endif 750 struct vnode *vp; 751 int flags; 752 struct thread *td; 753 #ifdef DEBUG_LOCKS 754 const char *filename; 755 int line; 756 #endif 757 { 758 int error; 759 760 do { 761 if ((flags & LK_INTERLOCK) == 0) 762 mtx_lock(&vp->v_interlock); 763 if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curthread) { 764 vp->v_flag |= VXWANT; 765 msleep(vp, &vp->v_interlock, PINOD | PDROP, 766 "vn_lock", 0); 767 error = ENOENT; 768 } else { 769 #if 0 770 /* this can now occur in normal operation */ 771 if (vp->v_vxproc != NULL) 772 log(LOG_INFO, "VXLOCK interlock avoided in vn_lock\n"); 773 #endif 774 #ifdef DEBUG_LOCKS 775 vp->filename = filename; 776 vp->line = line; 777 #endif 778 error = VOP_LOCK(vp, 779 flags | LK_NOPAUSE | LK_INTERLOCK, td); 780 if (error == 0) 781 return (error); 782 } 783 flags &= ~LK_INTERLOCK; 784 } while (flags & LK_RETRY); 785 return (error); 786 } 787 788 /* 789 * File table vnode close routine. 790 */ 791 static int 792 vn_closefile(fp, td) 793 struct file *fp; 794 struct thread *td; 795 { 796 797 fp->f_ops = &badfileops; 798 return (vn_close(((struct vnode *)fp->f_data), fp->f_flag, 799 fp->f_cred, td)); 800 } 801 802 /* 803 * Preparing to start a filesystem write operation. If the operation is 804 * permitted, then we bump the count of operations in progress and 805 * proceed. If a suspend request is in progress, we wait until the 806 * suspension is over, and then proceed. 807 */ 808 int 809 vn_start_write(vp, mpp, flags) 810 struct vnode *vp; 811 struct mount **mpp; 812 int flags; 813 { 814 struct mount *mp; 815 int error; 816 817 /* 818 * If a vnode is provided, get and return the mount point that 819 * to which it will write. 820 */ 821 if (vp != NULL) { 822 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 823 *mpp = NULL; 824 if (error != EOPNOTSUPP) 825 return (error); 826 return (0); 827 } 828 } 829 if ((mp = *mpp) == NULL) 830 return (0); 831 /* 832 * Check on status of suspension. 833 */ 834 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 835 if (flags & V_NOWAIT) 836 return (EWOULDBLOCK); 837 error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 838 "suspfs", 0); 839 if (error) 840 return (error); 841 } 842 if (flags & V_XSLEEP) 843 return (0); 844 mp->mnt_writeopcount++; 845 return (0); 846 } 847 848 /* 849 * Secondary suspension. Used by operations such as vop_inactive 850 * routines that are needed by the higher level functions. These 851 * are allowed to proceed until all the higher level functions have 852 * completed (indicated by mnt_writeopcount dropping to zero). At that 853 * time, these operations are halted until the suspension is over. 854 */ 855 int 856 vn_write_suspend_wait(vp, mp, flags) 857 struct vnode *vp; 858 struct mount *mp; 859 int flags; 860 { 861 int error; 862 863 if (vp != NULL) { 864 if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) { 865 if (error != EOPNOTSUPP) 866 return (error); 867 return (0); 868 } 869 } 870 /* 871 * If we are not suspended or have not yet reached suspended 872 * mode, then let the operation proceed. 873 */ 874 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0) 875 return (0); 876 if (flags & V_NOWAIT) 877 return (EWOULDBLOCK); 878 /* 879 * Wait for the suspension to finish. 880 */ 881 return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 882 "suspfs", 0)); 883 } 884 885 /* 886 * Filesystem write operation has completed. If we are suspending and this 887 * operation is the last one, notify the suspender that the suspension is 888 * now in effect. 889 */ 890 void 891 vn_finished_write(mp) 892 struct mount *mp; 893 { 894 895 if (mp == NULL) 896 return; 897 mp->mnt_writeopcount--; 898 if (mp->mnt_writeopcount < 0) 899 panic("vn_finished_write: neg cnt"); 900 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 901 mp->mnt_writeopcount <= 0) 902 wakeup(&mp->mnt_writeopcount); 903 } 904 905 /* 906 * Request a filesystem to suspend write operations. 907 */ 908 void 909 vfs_write_suspend(mp) 910 struct mount *mp; 911 { 912 struct thread *td = curthread; 913 914 if (mp->mnt_kern_flag & MNTK_SUSPEND) 915 return; 916 mp->mnt_kern_flag |= MNTK_SUSPEND; 917 if (mp->mnt_writeopcount > 0) 918 (void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0); 919 VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td); 920 mp->mnt_kern_flag |= MNTK_SUSPENDED; 921 } 922 923 /* 924 * Request a filesystem to resume write operations. 925 */ 926 void 927 vfs_write_resume(mp) 928 struct mount *mp; 929 { 930 931 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) 932 return; 933 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED); 934 wakeup(&mp->mnt_writeopcount); 935 wakeup(&mp->mnt_flag); 936 } 937 938 /* 939 * Implement kqueues for files by translating it to vnode operation. 940 */ 941 static int 942 vn_kqfilter(struct file *fp, struct knote *kn) 943 { 944 945 return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn)); 946 } 947 948 /* 949 * Simplified in-kernel wrapper calls for extended attribute access. 950 * Both calls pass in a NULL credential, authorizing as "kernel" access. 951 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 952 */ 953 int 954 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 955 const char *attrname, int *buflen, char *buf, struct thread *td) 956 { 957 struct uio auio; 958 struct iovec iov; 959 int error; 960 961 iov.iov_len = *buflen; 962 iov.iov_base = buf; 963 964 auio.uio_iov = &iov; 965 auio.uio_iovcnt = 1; 966 auio.uio_rw = UIO_READ; 967 auio.uio_segflg = UIO_SYSSPACE; 968 auio.uio_td = td; 969 auio.uio_offset = 0; 970 auio.uio_resid = *buflen; 971 972 if ((ioflg & IO_NODELOCKED) == 0) 973 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 974 975 /* authorize attribute retrieval as kernel */ 976 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 977 td); 978 979 if ((ioflg & IO_NODELOCKED) == 0) 980 VOP_UNLOCK(vp, 0, td); 981 982 if (error == 0) { 983 *buflen = *buflen - auio.uio_resid; 984 } 985 986 return (error); 987 } 988 989 /* 990 * XXX failure mode if partially written? 991 */ 992 int 993 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 994 const char *attrname, int buflen, char *buf, struct thread *td) 995 { 996 struct uio auio; 997 struct iovec iov; 998 struct mount *mp; 999 int error; 1000 1001 iov.iov_len = buflen; 1002 iov.iov_base = buf; 1003 1004 auio.uio_iov = &iov; 1005 auio.uio_iovcnt = 1; 1006 auio.uio_rw = UIO_WRITE; 1007 auio.uio_segflg = UIO_SYSSPACE; 1008 auio.uio_td = td; 1009 auio.uio_offset = 0; 1010 auio.uio_resid = buflen; 1011 1012 if ((ioflg & IO_NODELOCKED) == 0) { 1013 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1014 return (error); 1015 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1016 } 1017 1018 /* authorize attribute setting as kernel */ 1019 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1020 1021 if ((ioflg & IO_NODELOCKED) == 0) { 1022 vn_finished_write(mp); 1023 VOP_UNLOCK(vp, 0, td); 1024 } 1025 1026 return (error); 1027 } 1028 1029 int 1030 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1031 const char *attrname, struct thread *td) 1032 { 1033 struct mount *mp; 1034 int error; 1035 1036 if ((ioflg & IO_NODELOCKED) == 0) { 1037 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1038 return (error); 1039 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1040 } 1041 1042 /* authorize attribute removal as kernel */ 1043 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, td); 1044 1045 if ((ioflg & IO_NODELOCKED) == 0) { 1046 vn_finished_write(mp); 1047 VOP_UNLOCK(vp, 0, td); 1048 } 1049 1050 return (error); 1051 } 1052