1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_mac.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/stat.h> 47 #include <sys/proc.h> 48 #include <sys/limits.h> 49 #include <sys/lock.h> 50 #include <sys/mac.h> 51 #include <sys/mount.h> 52 #include <sys/mutex.h> 53 #include <sys/namei.h> 54 #include <sys/vnode.h> 55 #include <sys/bio.h> 56 #include <sys/buf.h> 57 #include <sys/filio.h> 58 #include <sys/sx.h> 59 #include <sys/ttycom.h> 60 #include <sys/conf.h> 61 #include <sys/syslog.h> 62 63 static fo_rdwr_t vn_read; 64 static fo_rdwr_t vn_write; 65 static fo_ioctl_t vn_ioctl; 66 static fo_poll_t vn_poll; 67 static fo_kqfilter_t vn_kqfilter; 68 static fo_stat_t vn_statfile; 69 static fo_close_t vn_closefile; 70 71 struct fileops vnops = { 72 .fo_read = vn_read, 73 .fo_write = vn_write, 74 .fo_ioctl = vn_ioctl, 75 .fo_poll = vn_poll, 76 .fo_kqfilter = vn_kqfilter, 77 .fo_stat = vn_statfile, 78 .fo_close = vn_closefile, 79 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 80 }; 81 82 int 83 vn_open(ndp, flagp, cmode, fdidx) 84 struct nameidata *ndp; 85 int *flagp, cmode, fdidx; 86 { 87 struct thread *td = ndp->ni_cnd.cn_thread; 88 89 return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fdidx)); 90 } 91 92 /* 93 * Common code for vnode open operations. 94 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 95 * 96 * Note that this does NOT free nameidata for the successful case, 97 * due to the NDINIT being done elsewhere. 98 */ 99 int 100 vn_open_cred(ndp, flagp, cmode, cred, fdidx) 101 struct nameidata *ndp; 102 int *flagp, cmode; 103 struct ucred *cred; 104 int fdidx; 105 { 106 struct vnode *vp; 107 struct mount *mp; 108 struct thread *td = ndp->ni_cnd.cn_thread; 109 struct vattr vat; 110 struct vattr *vap = &vat; 111 int mode, fmode, error; 112 #ifdef LOOKUP_SHARED 113 int exclusive; /* The current intended lock state */ 114 115 exclusive = 0; 116 #endif 117 118 restart: 119 fmode = *flagp; 120 if (fmode & O_CREAT) { 121 ndp->ni_cnd.cn_nameiop = CREATE; 122 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; 123 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 124 ndp->ni_cnd.cn_flags |= FOLLOW; 125 bwillwrite(); 126 if ((error = namei(ndp)) != 0) 127 return (error); 128 if (ndp->ni_vp == NULL) { 129 VATTR_NULL(vap); 130 vap->va_type = VREG; 131 vap->va_mode = cmode; 132 if (fmode & O_EXCL) 133 vap->va_vaflags |= VA_EXCLUSIVE; 134 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 135 NDFREE(ndp, NDF_ONLY_PNBUF); 136 vput(ndp->ni_dvp); 137 if ((error = vn_start_write(NULL, &mp, 138 V_XSLEEP | PCATCH)) != 0) 139 return (error); 140 goto restart; 141 } 142 #ifdef MAC 143 error = mac_check_vnode_create(cred, ndp->ni_dvp, 144 &ndp->ni_cnd, vap); 145 if (error == 0) { 146 #endif 147 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE); 148 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 149 &ndp->ni_cnd, vap); 150 #ifdef MAC 151 } 152 #endif 153 vput(ndp->ni_dvp); 154 vn_finished_write(mp); 155 if (error) { 156 NDFREE(ndp, NDF_ONLY_PNBUF); 157 return (error); 158 } 159 ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create"); 160 ASSERT_VOP_LOCKED(ndp->ni_vp, "create"); 161 fmode &= ~O_TRUNC; 162 vp = ndp->ni_vp; 163 #ifdef LOOKUP_SHARED 164 exclusive = 1; 165 #endif 166 } else { 167 if (ndp->ni_dvp == ndp->ni_vp) 168 vrele(ndp->ni_dvp); 169 else 170 vput(ndp->ni_dvp); 171 ndp->ni_dvp = NULL; 172 vp = ndp->ni_vp; 173 if (fmode & O_EXCL) { 174 error = EEXIST; 175 goto bad; 176 } 177 fmode &= ~O_CREAT; 178 } 179 } else { 180 ndp->ni_cnd.cn_nameiop = LOOKUP; 181 #ifdef LOOKUP_SHARED 182 ndp->ni_cnd.cn_flags = 183 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | 184 LOCKSHARED | LOCKLEAF; 185 #else 186 ndp->ni_cnd.cn_flags = 187 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 188 #endif 189 if ((error = namei(ndp)) != 0) 190 return (error); 191 vp = ndp->ni_vp; 192 } 193 if (vp->v_type == VLNK) { 194 error = EMLINK; 195 goto bad; 196 } 197 if (vp->v_type == VSOCK) { 198 error = EOPNOTSUPP; 199 goto bad; 200 } 201 mode = 0; 202 if (fmode & (FWRITE | O_TRUNC)) { 203 if (vp->v_type == VDIR) { 204 error = EISDIR; 205 goto bad; 206 } 207 mode |= VWRITE; 208 } 209 if (fmode & FREAD) 210 mode |= VREAD; 211 if (fmode & O_APPEND) 212 mode |= VAPPEND; 213 #ifdef MAC 214 error = mac_check_vnode_open(cred, vp, mode); 215 if (error) 216 goto bad; 217 #endif 218 if ((fmode & O_CREAT) == 0) { 219 if (mode & VWRITE) { 220 error = vn_writechk(vp); 221 if (error) 222 goto bad; 223 } 224 if (mode) { 225 error = VOP_ACCESS(vp, mode, cred, td); 226 if (error) 227 goto bad; 228 } 229 } 230 if ((error = VOP_GETATTR(vp, vap, cred, td)) == 0) { 231 vp->v_cachedfs = vap->va_fsid; 232 vp->v_cachedid = vap->va_fileid; 233 } 234 if ((error = VOP_OPEN(vp, fmode, cred, td, fdidx)) != 0) 235 goto bad; 236 /* 237 * Make sure that a VM object is created for VMIO support. 238 */ 239 if (vn_canvmio(vp) == TRUE) { 240 #ifdef LOOKUP_SHARED 241 int flock; 242 243 if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0) 244 VOP_LOCK(vp, LK_UPGRADE, td); 245 /* 246 * In cases where the object is marked as dead object_create 247 * will unlock and relock exclusive. It is safe to call in 248 * here with a shared lock because we only examine fields that 249 * the shared lock guarantees will be stable. In the UPGRADE 250 * case it is not likely that anyone has used this vnode yet 251 * so there will be no contention. The logic after this call 252 * restores the requested locking state. 253 */ 254 #endif 255 if ((error = vfs_object_create(vp, td, cred)) != 0) { 256 VOP_UNLOCK(vp, 0, td); 257 VOP_CLOSE(vp, fmode, cred, td); 258 NDFREE(ndp, NDF_ONLY_PNBUF); 259 vrele(vp); 260 *flagp = fmode; 261 return (error); 262 } 263 #ifdef LOOKUP_SHARED 264 flock = VOP_ISLOCKED(vp, td); 265 if (!exclusive && flock == LK_EXCLUSIVE) 266 VOP_LOCK(vp, LK_DOWNGRADE, td); 267 #endif 268 } 269 270 if (fmode & FWRITE) 271 vp->v_writecount++; 272 *flagp = fmode; 273 ASSERT_VOP_LOCKED(vp, "vn_open_cred"); 274 return (0); 275 bad: 276 NDFREE(ndp, NDF_ONLY_PNBUF); 277 vput(vp); 278 *flagp = fmode; 279 ndp->ni_vp = NULL; 280 return (error); 281 } 282 283 /* 284 * Check for write permissions on the specified vnode. 285 * Prototype text segments cannot be written. 286 */ 287 int 288 vn_writechk(vp) 289 register struct vnode *vp; 290 { 291 292 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 293 /* 294 * If there's shared text associated with 295 * the vnode, try to free it up once. If 296 * we fail, we can't allow writing. 297 */ 298 if (vp->v_vflag & VV_TEXT) 299 return (ETXTBSY); 300 301 return (0); 302 } 303 304 /* 305 * Vnode close call 306 */ 307 int 308 vn_close(vp, flags, file_cred, td) 309 register struct vnode *vp; 310 int flags; 311 struct ucred *file_cred; 312 struct thread *td; 313 { 314 int error; 315 316 if (flags & FWRITE) 317 vp->v_writecount--; 318 error = VOP_CLOSE(vp, flags, file_cred, td); 319 /* 320 * XXX - In certain instances VOP_CLOSE has to do the vrele 321 * itself. If the vrele has been done, it will return EAGAIN 322 * to indicate that the vrele should not be done again. When 323 * this happens, we just return success. The correct thing to 324 * do would be to have all VOP_CLOSE instances do the vrele. 325 */ 326 if (error == EAGAIN) 327 return (0); 328 vrele(vp); 329 return (error); 330 } 331 332 /* 333 * Sequential heuristic - detect sequential operation 334 */ 335 static __inline 336 int 337 sequential_heuristic(struct uio *uio, struct file *fp) 338 { 339 340 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 341 uio->uio_offset == fp->f_nextoff) { 342 /* 343 * XXX we assume that the filesystem block size is 344 * the default. Not true, but still gives us a pretty 345 * good indicator of how sequential the read operations 346 * are. 347 */ 348 fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 349 if (fp->f_seqcount > IO_SEQMAX) 350 fp->f_seqcount = IO_SEQMAX; 351 return(fp->f_seqcount << IO_SEQSHIFT); 352 } 353 354 /* 355 * Not sequential, quick draw-down of seqcount 356 */ 357 if (fp->f_seqcount > 1) 358 fp->f_seqcount = 1; 359 else 360 fp->f_seqcount = 0; 361 return(0); 362 } 363 364 /* 365 * Package up an I/O request on a vnode into a uio and do it. 366 */ 367 int 368 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred, 369 aresid, td) 370 enum uio_rw rw; 371 struct vnode *vp; 372 caddr_t base; 373 int len; 374 off_t offset; 375 enum uio_seg segflg; 376 int ioflg; 377 struct ucred *active_cred; 378 struct ucred *file_cred; 379 int *aresid; 380 struct thread *td; 381 { 382 struct uio auio; 383 struct iovec aiov; 384 struct mount *mp; 385 struct ucred *cred; 386 int error; 387 388 if ((ioflg & IO_NODELOCKED) == 0) { 389 mp = NULL; 390 if (rw == UIO_WRITE) { 391 if (vp->v_type != VCHR && 392 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 393 != 0) 394 return (error); 395 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 396 } else { 397 /* 398 * XXX This should be LK_SHARED but I don't trust VFS 399 * enough to leave it like that until it has been 400 * reviewed further. 401 */ 402 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 403 } 404 405 } 406 auio.uio_iov = &aiov; 407 auio.uio_iovcnt = 1; 408 aiov.iov_base = base; 409 aiov.iov_len = len; 410 auio.uio_resid = len; 411 auio.uio_offset = offset; 412 auio.uio_segflg = segflg; 413 auio.uio_rw = rw; 414 auio.uio_td = td; 415 error = 0; 416 #ifdef MAC 417 if ((ioflg & IO_NOMACCHECK) == 0) { 418 if (rw == UIO_READ) 419 error = mac_check_vnode_read(active_cred, file_cred, 420 vp); 421 else 422 error = mac_check_vnode_write(active_cred, file_cred, 423 vp); 424 } 425 #endif 426 if (error == 0) { 427 if (file_cred) 428 cred = file_cred; 429 else 430 cred = active_cred; 431 if (rw == UIO_READ) 432 error = VOP_READ(vp, &auio, ioflg, cred); 433 else 434 error = VOP_WRITE(vp, &auio, ioflg, cred); 435 } 436 if (aresid) 437 *aresid = auio.uio_resid; 438 else 439 if (auio.uio_resid && error == 0) 440 error = EIO; 441 if ((ioflg & IO_NODELOCKED) == 0) { 442 if (rw == UIO_WRITE) 443 vn_finished_write(mp); 444 VOP_UNLOCK(vp, 0, td); 445 } 446 return (error); 447 } 448 449 /* 450 * Package up an I/O request on a vnode into a uio and do it. The I/O 451 * request is split up into smaller chunks and we try to avoid saturating 452 * the buffer cache while potentially holding a vnode locked, so we 453 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield() 454 * to give other processes a chance to lock the vnode (either other processes 455 * core'ing the same binary, or unrelated processes scanning the directory). 456 */ 457 int 458 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 459 file_cred, aresid, td) 460 enum uio_rw rw; 461 struct vnode *vp; 462 caddr_t base; 463 int len; 464 off_t offset; 465 enum uio_seg segflg; 466 int ioflg; 467 struct ucred *active_cred; 468 struct ucred *file_cred; 469 int *aresid; 470 struct thread *td; 471 { 472 int error = 0; 473 474 do { 475 int chunk; 476 477 /* 478 * Force `offset' to a multiple of MAXBSIZE except possibly 479 * for the first chunk, so that filesystems only need to 480 * write full blocks except possibly for the first and last 481 * chunks. 482 */ 483 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 484 485 if (chunk > len) 486 chunk = len; 487 if (rw != UIO_READ && vp->v_type == VREG) 488 bwillwrite(); 489 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 490 ioflg, active_cred, file_cred, aresid, td); 491 len -= chunk; /* aresid calc already includes length */ 492 if (error) 493 break; 494 offset += chunk; 495 base += chunk; 496 uio_yield(); 497 } while (len); 498 if (aresid) 499 *aresid += len; 500 return (error); 501 } 502 503 /* 504 * File table vnode read routine. 505 */ 506 static int 507 vn_read(fp, uio, active_cred, flags, td) 508 struct file *fp; 509 struct uio *uio; 510 struct ucred *active_cred; 511 struct thread *td; 512 int flags; 513 { 514 struct vnode *vp; 515 int error, ioflag; 516 517 mtx_lock(&Giant); 518 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 519 uio->uio_td, td)); 520 vp = fp->f_vnode; 521 ioflag = 0; 522 if (fp->f_flag & FNONBLOCK) 523 ioflag |= IO_NDELAY; 524 if (fp->f_flag & O_DIRECT) 525 ioflag |= IO_DIRECT; 526 VOP_LEASE(vp, td, fp->f_cred, LEASE_READ); 527 /* 528 * According to McKusick the vn lock is protecting f_offset here. 529 * Once this field has it's own lock we can acquire this shared. 530 */ 531 if ((flags & FOF_OFFSET) == 0) { 532 vn_lock(vp, LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td); 533 uio->uio_offset = fp->f_offset; 534 } else 535 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); 536 537 ioflag |= sequential_heuristic(uio, fp); 538 539 #ifdef MAC 540 error = mac_check_vnode_read(active_cred, fp->f_cred, vp); 541 if (error == 0) 542 #endif 543 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 544 if ((flags & FOF_OFFSET) == 0) 545 fp->f_offset = uio->uio_offset; 546 fp->f_nextoff = uio->uio_offset; 547 VOP_UNLOCK(vp, 0, td); 548 mtx_unlock(&Giant); 549 return (error); 550 } 551 552 /* 553 * File table vnode write routine. 554 */ 555 static int 556 vn_write(fp, uio, active_cred, flags, td) 557 struct file *fp; 558 struct uio *uio; 559 struct ucred *active_cred; 560 struct thread *td; 561 int flags; 562 { 563 struct vnode *vp; 564 struct mount *mp; 565 int error, ioflag; 566 567 mtx_lock(&Giant); 568 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 569 uio->uio_td, td)); 570 vp = fp->f_vnode; 571 if (vp->v_type == VREG) 572 bwillwrite(); 573 ioflag = IO_UNIT; 574 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 575 ioflag |= IO_APPEND; 576 if (fp->f_flag & FNONBLOCK) 577 ioflag |= IO_NDELAY; 578 if (fp->f_flag & O_DIRECT) 579 ioflag |= IO_DIRECT; 580 if ((fp->f_flag & O_FSYNC) || 581 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 582 ioflag |= IO_SYNC; 583 mp = NULL; 584 if (vp->v_type != VCHR && 585 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) { 586 mtx_unlock(&Giant); 587 return (error); 588 } 589 VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE); 590 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 591 if ((flags & FOF_OFFSET) == 0) 592 uio->uio_offset = fp->f_offset; 593 ioflag |= sequential_heuristic(uio, fp); 594 #ifdef MAC 595 error = mac_check_vnode_write(active_cred, fp->f_cred, vp); 596 if (error == 0) 597 #endif 598 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 599 if ((flags & FOF_OFFSET) == 0) 600 fp->f_offset = uio->uio_offset; 601 fp->f_nextoff = uio->uio_offset; 602 VOP_UNLOCK(vp, 0, td); 603 vn_finished_write(mp); 604 mtx_unlock(&Giant); 605 return (error); 606 } 607 608 /* 609 * File table vnode stat routine. 610 */ 611 static int 612 vn_statfile(fp, sb, active_cred, td) 613 struct file *fp; 614 struct stat *sb; 615 struct ucred *active_cred; 616 struct thread *td; 617 { 618 struct vnode *vp = fp->f_vnode; 619 int error; 620 621 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 622 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 623 VOP_UNLOCK(vp, 0, td); 624 625 return (error); 626 } 627 628 /* 629 * Stat a vnode; implementation for the stat syscall 630 */ 631 int 632 vn_stat(vp, sb, active_cred, file_cred, td) 633 struct vnode *vp; 634 register struct stat *sb; 635 struct ucred *active_cred; 636 struct ucred *file_cred; 637 struct thread *td; 638 { 639 struct vattr vattr; 640 register struct vattr *vap; 641 int error; 642 u_short mode; 643 644 #ifdef MAC 645 error = mac_check_vnode_stat(active_cred, file_cred, vp); 646 if (error) 647 return (error); 648 #endif 649 650 vap = &vattr; 651 error = VOP_GETATTR(vp, vap, active_cred, td); 652 if (error) 653 return (error); 654 655 vp->v_cachedfs = vap->va_fsid; 656 vp->v_cachedid = vap->va_fileid; 657 658 /* 659 * Zero the spare stat fields 660 */ 661 bzero(sb, sizeof *sb); 662 663 /* 664 * Copy from vattr table 665 */ 666 if (vap->va_fsid != VNOVAL) 667 sb->st_dev = vap->va_fsid; 668 else 669 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 670 sb->st_ino = vap->va_fileid; 671 mode = vap->va_mode; 672 switch (vap->va_type) { 673 case VREG: 674 mode |= S_IFREG; 675 break; 676 case VDIR: 677 mode |= S_IFDIR; 678 break; 679 case VBLK: 680 mode |= S_IFBLK; 681 break; 682 case VCHR: 683 mode |= S_IFCHR; 684 break; 685 case VLNK: 686 mode |= S_IFLNK; 687 /* This is a cosmetic change, symlinks do not have a mode. */ 688 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 689 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 690 else 691 sb->st_mode |= ACCESSPERMS; /* 0777 */ 692 break; 693 case VSOCK: 694 mode |= S_IFSOCK; 695 break; 696 case VFIFO: 697 mode |= S_IFIFO; 698 break; 699 default: 700 return (EBADF); 701 }; 702 sb->st_mode = mode; 703 sb->st_nlink = vap->va_nlink; 704 sb->st_uid = vap->va_uid; 705 sb->st_gid = vap->va_gid; 706 sb->st_rdev = vap->va_rdev; 707 if (vap->va_size > OFF_MAX) 708 return (EOVERFLOW); 709 sb->st_size = vap->va_size; 710 sb->st_atimespec = vap->va_atime; 711 sb->st_mtimespec = vap->va_mtime; 712 sb->st_ctimespec = vap->va_ctime; 713 sb->st_birthtimespec = vap->va_birthtime; 714 715 /* 716 * According to www.opengroup.org, the meaning of st_blksize is 717 * "a filesystem-specific preferred I/O block size for this 718 * object. In some filesystem types, this may vary from file 719 * to file" 720 * Default to PAGE_SIZE after much discussion. 721 */ 722 723 if (vap->va_type == VREG) { 724 sb->st_blksize = vap->va_blocksize; 725 } else if (vn_isdisk(vp, NULL)) { 726 sb->st_blksize = vp->v_rdev->si_bsize_best; 727 if (sb->st_blksize < vp->v_rdev->si_bsize_phys) 728 sb->st_blksize = vp->v_rdev->si_bsize_phys; 729 if (sb->st_blksize < BLKDEV_IOSIZE) 730 sb->st_blksize = BLKDEV_IOSIZE; 731 } else { 732 sb->st_blksize = PAGE_SIZE; 733 } 734 735 sb->st_flags = vap->va_flags; 736 if (suser(td)) 737 sb->st_gen = 0; 738 else 739 sb->st_gen = vap->va_gen; 740 741 #if (S_BLKSIZE == 512) 742 /* Optimize this case */ 743 sb->st_blocks = vap->va_bytes >> 9; 744 #else 745 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 746 #endif 747 return (0); 748 } 749 750 /* 751 * File table vnode ioctl routine. 752 */ 753 static int 754 vn_ioctl(fp, com, data, active_cred, td) 755 struct file *fp; 756 u_long com; 757 void *data; 758 struct ucred *active_cred; 759 struct thread *td; 760 { 761 struct vnode *vp = fp->f_vnode; 762 struct vnode *vpold; 763 struct vattr vattr; 764 int error; 765 766 switch (vp->v_type) { 767 768 case VREG: 769 case VDIR: 770 if (com == FIONREAD) { 771 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 772 error = VOP_GETATTR(vp, &vattr, active_cred, td); 773 VOP_UNLOCK(vp, 0, td); 774 if (error) 775 return (error); 776 *(int *)data = vattr.va_size - fp->f_offset; 777 return (0); 778 } 779 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 780 return (0); /* XXX */ 781 /* FALLTHROUGH */ 782 783 default: 784 #if 0 785 return (ENOTTY); 786 #endif 787 case VFIFO: 788 case VCHR: 789 case VBLK: 790 if (com == FIODTYPE) { 791 if (vp->v_type != VCHR && vp->v_type != VBLK) 792 return (ENOTTY); 793 *(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK; 794 return (0); 795 } 796 error = VOP_IOCTL(vp, com, data, fp->f_flag, active_cred, td); 797 if (error == ENOIOCTL) { 798 #ifdef DIAGNOSTIC 799 Debugger("ENOIOCTL leaked through"); 800 #endif 801 error = ENOTTY; 802 } 803 if (error == 0 && com == TIOCSCTTY) { 804 805 /* Do nothing if reassigning same control tty */ 806 sx_slock(&proctree_lock); 807 if (td->td_proc->p_session->s_ttyvp == vp) { 808 sx_sunlock(&proctree_lock); 809 return (0); 810 } 811 812 vpold = td->td_proc->p_session->s_ttyvp; 813 VREF(vp); 814 SESS_LOCK(td->td_proc->p_session); 815 td->td_proc->p_session->s_ttyvp = vp; 816 SESS_UNLOCK(td->td_proc->p_session); 817 818 sx_sunlock(&proctree_lock); 819 820 /* Get rid of reference to old control tty */ 821 if (vpold) 822 vrele(vpold); 823 } 824 return (error); 825 } 826 } 827 828 /* 829 * File table vnode poll routine. 830 */ 831 static int 832 vn_poll(fp, events, active_cred, td) 833 struct file *fp; 834 int events; 835 struct ucred *active_cred; 836 struct thread *td; 837 { 838 struct vnode *vp; 839 #ifdef MAC 840 int error; 841 #endif 842 843 vp = fp->f_vnode; 844 #ifdef MAC 845 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 846 error = mac_check_vnode_poll(active_cred, fp->f_cred, vp); 847 VOP_UNLOCK(vp, 0, td); 848 if (error) 849 return (error); 850 #endif 851 852 return (VOP_POLL(vp, events, fp->f_cred, td)); 853 } 854 855 /* 856 * Check that the vnode is still valid, and if so 857 * acquire requested lock. 858 */ 859 int 860 #ifndef DEBUG_LOCKS 861 vn_lock(vp, flags, td) 862 #else 863 debug_vn_lock(vp, flags, td, filename, line) 864 #endif 865 struct vnode *vp; 866 int flags; 867 struct thread *td; 868 #ifdef DEBUG_LOCKS 869 const char *filename; 870 int line; 871 #endif 872 { 873 int error; 874 875 do { 876 if ((flags & LK_INTERLOCK) == 0) 877 VI_LOCK(vp); 878 if ((vp->v_iflag & VI_XLOCK) && vp->v_vxthread != curthread) { 879 if ((flags & LK_NOWAIT) != 0) { 880 VI_UNLOCK(vp); 881 return (ENOENT); 882 } 883 vp->v_iflag |= VI_XWANT; 884 msleep(vp, VI_MTX(vp), PINOD, "vn_lock", 0); 885 if ((flags & LK_RETRY) == 0) { 886 VI_UNLOCK(vp); 887 return (ENOENT); 888 } 889 } 890 #ifdef DEBUG_LOCKS 891 vp->filename = filename; 892 vp->line = line; 893 #endif 894 /* 895 * lockmgr drops interlock before it will return for 896 * any reason. So force the code above to relock it. 897 */ 898 error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, td); 899 flags &= ~LK_INTERLOCK; 900 } while (flags & LK_RETRY && error != 0); 901 return (error); 902 } 903 904 /* 905 * File table vnode close routine. 906 */ 907 static int 908 vn_closefile(fp, td) 909 struct file *fp; 910 struct thread *td; 911 { 912 913 fp->f_ops = &badfileops; 914 return (vn_close(fp->f_vnode, fp->f_flag, fp->f_cred, td)); 915 } 916 917 /* 918 * Preparing to start a filesystem write operation. If the operation is 919 * permitted, then we bump the count of operations in progress and 920 * proceed. If a suspend request is in progress, we wait until the 921 * suspension is over, and then proceed. 922 */ 923 int 924 vn_start_write(vp, mpp, flags) 925 struct vnode *vp; 926 struct mount **mpp; 927 int flags; 928 { 929 struct mount *mp; 930 int error; 931 932 /* 933 * If a vnode is provided, get and return the mount point that 934 * to which it will write. 935 */ 936 if (vp != NULL) { 937 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 938 *mpp = NULL; 939 if (error != EOPNOTSUPP) 940 return (error); 941 return (0); 942 } 943 } 944 if ((mp = *mpp) == NULL) 945 return (0); 946 /* 947 * Check on status of suspension. 948 */ 949 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 950 if (flags & V_NOWAIT) 951 return (EWOULDBLOCK); 952 error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 953 "suspfs", 0); 954 if (error) 955 return (error); 956 } 957 if (flags & V_XSLEEP) 958 return (0); 959 mp->mnt_writeopcount++; 960 return (0); 961 } 962 963 /* 964 * Secondary suspension. Used by operations such as vop_inactive 965 * routines that are needed by the higher level functions. These 966 * are allowed to proceed until all the higher level functions have 967 * completed (indicated by mnt_writeopcount dropping to zero). At that 968 * time, these operations are halted until the suspension is over. 969 */ 970 int 971 vn_write_suspend_wait(vp, mp, flags) 972 struct vnode *vp; 973 struct mount *mp; 974 int flags; 975 { 976 int error; 977 978 if (vp != NULL) { 979 if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) { 980 if (error != EOPNOTSUPP) 981 return (error); 982 return (0); 983 } 984 } 985 /* 986 * If we are not suspended or have not yet reached suspended 987 * mode, then let the operation proceed. 988 */ 989 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0) 990 return (0); 991 if (flags & V_NOWAIT) 992 return (EWOULDBLOCK); 993 /* 994 * Wait for the suspension to finish. 995 */ 996 return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 997 "suspfs", 0)); 998 } 999 1000 /* 1001 * Filesystem write operation has completed. If we are suspending and this 1002 * operation is the last one, notify the suspender that the suspension is 1003 * now in effect. 1004 */ 1005 void 1006 vn_finished_write(mp) 1007 struct mount *mp; 1008 { 1009 1010 if (mp == NULL) 1011 return; 1012 mp->mnt_writeopcount--; 1013 if (mp->mnt_writeopcount < 0) 1014 panic("vn_finished_write: neg cnt"); 1015 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1016 mp->mnt_writeopcount <= 0) 1017 wakeup(&mp->mnt_writeopcount); 1018 } 1019 1020 /* 1021 * Request a filesystem to suspend write operations. 1022 */ 1023 int 1024 vfs_write_suspend(mp) 1025 struct mount *mp; 1026 { 1027 struct thread *td = curthread; 1028 int error; 1029 1030 if (mp->mnt_kern_flag & MNTK_SUSPEND) 1031 return (0); 1032 mp->mnt_kern_flag |= MNTK_SUSPEND; 1033 if (mp->mnt_writeopcount > 0) 1034 (void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0); 1035 if ((error = VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td)) != 0) { 1036 vfs_write_resume(mp); 1037 return (error); 1038 } 1039 mp->mnt_kern_flag |= MNTK_SUSPENDED; 1040 return (0); 1041 } 1042 1043 /* 1044 * Request a filesystem to resume write operations. 1045 */ 1046 void 1047 vfs_write_resume(mp) 1048 struct mount *mp; 1049 { 1050 1051 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) 1052 return; 1053 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED); 1054 wakeup(&mp->mnt_writeopcount); 1055 wakeup(&mp->mnt_flag); 1056 } 1057 1058 /* 1059 * Implement kqueues for files by translating it to vnode operation. 1060 */ 1061 static int 1062 vn_kqfilter(struct file *fp, struct knote *kn) 1063 { 1064 1065 return (VOP_KQFILTER(fp->f_vnode, kn)); 1066 } 1067 1068 /* 1069 * Simplified in-kernel wrapper calls for extended attribute access. 1070 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1071 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1072 */ 1073 int 1074 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1075 const char *attrname, int *buflen, char *buf, struct thread *td) 1076 { 1077 struct uio auio; 1078 struct iovec iov; 1079 int error; 1080 1081 iov.iov_len = *buflen; 1082 iov.iov_base = buf; 1083 1084 auio.uio_iov = &iov; 1085 auio.uio_iovcnt = 1; 1086 auio.uio_rw = UIO_READ; 1087 auio.uio_segflg = UIO_SYSSPACE; 1088 auio.uio_td = td; 1089 auio.uio_offset = 0; 1090 auio.uio_resid = *buflen; 1091 1092 if ((ioflg & IO_NODELOCKED) == 0) 1093 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1094 1095 /* authorize attribute retrieval as kernel */ 1096 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1097 td); 1098 1099 if ((ioflg & IO_NODELOCKED) == 0) 1100 VOP_UNLOCK(vp, 0, td); 1101 1102 if (error == 0) { 1103 *buflen = *buflen - auio.uio_resid; 1104 } 1105 1106 return (error); 1107 } 1108 1109 /* 1110 * XXX failure mode if partially written? 1111 */ 1112 int 1113 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1114 const char *attrname, int buflen, char *buf, struct thread *td) 1115 { 1116 struct uio auio; 1117 struct iovec iov; 1118 struct mount *mp; 1119 int error; 1120 1121 iov.iov_len = buflen; 1122 iov.iov_base = buf; 1123 1124 auio.uio_iov = &iov; 1125 auio.uio_iovcnt = 1; 1126 auio.uio_rw = UIO_WRITE; 1127 auio.uio_segflg = UIO_SYSSPACE; 1128 auio.uio_td = td; 1129 auio.uio_offset = 0; 1130 auio.uio_resid = buflen; 1131 1132 if ((ioflg & IO_NODELOCKED) == 0) { 1133 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1134 return (error); 1135 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1136 } 1137 1138 /* authorize attribute setting as kernel */ 1139 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1140 1141 if ((ioflg & IO_NODELOCKED) == 0) { 1142 vn_finished_write(mp); 1143 VOP_UNLOCK(vp, 0, td); 1144 } 1145 1146 return (error); 1147 } 1148 1149 int 1150 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1151 const char *attrname, struct thread *td) 1152 { 1153 struct mount *mp; 1154 int error; 1155 1156 if ((ioflg & IO_NODELOCKED) == 0) { 1157 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1158 return (error); 1159 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1160 } 1161 1162 /* authorize attribute removal as kernel */ 1163 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 1164 if (error == EOPNOTSUPP) 1165 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 1166 NULL, td); 1167 1168 if ((ioflg & IO_NODELOCKED) == 0) { 1169 vn_finished_write(mp); 1170 VOP_UNLOCK(vp, 0, td); 1171 } 1172 1173 return (error); 1174 } 1175