1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_mac.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/stat.h> 51 #include <sys/proc.h> 52 #include <sys/limits.h> 53 #include <sys/lock.h> 54 #include <sys/mac.h> 55 #include <sys/mount.h> 56 #include <sys/mutex.h> 57 #include <sys/namei.h> 58 #include <sys/vnode.h> 59 #include <sys/bio.h> 60 #include <sys/buf.h> 61 #include <sys/filio.h> 62 #include <sys/sx.h> 63 #include <sys/ttycom.h> 64 #include <sys/conf.h> 65 #include <sys/syslog.h> 66 67 static fo_rdwr_t vn_read; 68 static fo_rdwr_t vn_write; 69 static fo_ioctl_t vn_ioctl; 70 static fo_poll_t vn_poll; 71 static fo_kqfilter_t vn_kqfilter; 72 static fo_stat_t vn_statfile; 73 static fo_close_t vn_closefile; 74 75 struct fileops vnops = { 76 .fo_read = vn_read, 77 .fo_write = vn_write, 78 .fo_ioctl = vn_ioctl, 79 .fo_poll = vn_poll, 80 .fo_kqfilter = vn_kqfilter, 81 .fo_stat = vn_statfile, 82 .fo_close = vn_closefile, 83 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 84 }; 85 86 int 87 vn_open(ndp, flagp, cmode, fdidx) 88 struct nameidata *ndp; 89 int *flagp, cmode, fdidx; 90 { 91 struct thread *td = ndp->ni_cnd.cn_thread; 92 93 return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fdidx)); 94 } 95 96 /* 97 * Common code for vnode open operations. 98 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 99 * 100 * Note that this does NOT free nameidata for the successful case, 101 * due to the NDINIT being done elsewhere. 102 */ 103 int 104 vn_open_cred(ndp, flagp, cmode, cred, fdidx) 105 struct nameidata *ndp; 106 int *flagp, cmode; 107 struct ucred *cred; 108 int fdidx; 109 { 110 struct vnode *vp; 111 struct mount *mp; 112 struct thread *td = ndp->ni_cnd.cn_thread; 113 struct vattr vat; 114 struct vattr *vap = &vat; 115 int mode, fmode, error; 116 #ifdef LOOKUP_SHARED 117 int exclusive; /* The current intended lock state */ 118 119 exclusive = 0; 120 #endif 121 122 restart: 123 fmode = *flagp; 124 if (fmode & O_CREAT) { 125 ndp->ni_cnd.cn_nameiop = CREATE; 126 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; 127 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 128 ndp->ni_cnd.cn_flags |= FOLLOW; 129 bwillwrite(); 130 if ((error = namei(ndp)) != 0) 131 return (error); 132 if (ndp->ni_vp == NULL) { 133 VATTR_NULL(vap); 134 vap->va_type = VREG; 135 vap->va_mode = cmode; 136 if (fmode & O_EXCL) 137 vap->va_vaflags |= VA_EXCLUSIVE; 138 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 139 NDFREE(ndp, NDF_ONLY_PNBUF); 140 vput(ndp->ni_dvp); 141 if ((error = vn_start_write(NULL, &mp, 142 V_XSLEEP | PCATCH)) != 0) 143 return (error); 144 goto restart; 145 } 146 #ifdef MAC 147 error = mac_check_vnode_create(cred, ndp->ni_dvp, 148 &ndp->ni_cnd, vap); 149 if (error == 0) { 150 #endif 151 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE); 152 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 153 &ndp->ni_cnd, vap); 154 #ifdef MAC 155 } 156 #endif 157 vput(ndp->ni_dvp); 158 vn_finished_write(mp); 159 if (error) { 160 NDFREE(ndp, NDF_ONLY_PNBUF); 161 return (error); 162 } 163 ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create"); 164 ASSERT_VOP_LOCKED(ndp->ni_vp, "create"); 165 fmode &= ~O_TRUNC; 166 vp = ndp->ni_vp; 167 #ifdef LOOKUP_SHARED 168 exclusive = 1; 169 #endif 170 } else { 171 if (ndp->ni_dvp == ndp->ni_vp) 172 vrele(ndp->ni_dvp); 173 else 174 vput(ndp->ni_dvp); 175 ndp->ni_dvp = NULL; 176 vp = ndp->ni_vp; 177 if (fmode & O_EXCL) { 178 error = EEXIST; 179 goto bad; 180 } 181 fmode &= ~O_CREAT; 182 } 183 } else { 184 ndp->ni_cnd.cn_nameiop = LOOKUP; 185 #ifdef LOOKUP_SHARED 186 ndp->ni_cnd.cn_flags = 187 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | 188 LOCKSHARED | LOCKLEAF; 189 #else 190 ndp->ni_cnd.cn_flags = 191 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 192 #endif 193 if ((error = namei(ndp)) != 0) 194 return (error); 195 vp = ndp->ni_vp; 196 } 197 if (vp->v_type == VLNK) { 198 error = EMLINK; 199 goto bad; 200 } 201 if (vp->v_type == VSOCK) { 202 error = EOPNOTSUPP; 203 goto bad; 204 } 205 mode = 0; 206 if (fmode & (FWRITE | O_TRUNC)) { 207 if (vp->v_type == VDIR) { 208 error = EISDIR; 209 goto bad; 210 } 211 mode |= VWRITE; 212 } 213 if (fmode & FREAD) 214 mode |= VREAD; 215 if (fmode & O_APPEND) 216 mode |= VAPPEND; 217 #ifdef MAC 218 error = mac_check_vnode_open(cred, vp, mode); 219 if (error) 220 goto bad; 221 #endif 222 if ((fmode & O_CREAT) == 0) { 223 if (mode & VWRITE) { 224 error = vn_writechk(vp); 225 if (error) 226 goto bad; 227 } 228 if (mode) { 229 error = VOP_ACCESS(vp, mode, cred, td); 230 if (error) 231 goto bad; 232 } 233 } 234 if ((error = VOP_GETATTR(vp, vap, cred, td)) == 0) { 235 vp->v_cachedfs = vap->va_fsid; 236 vp->v_cachedid = vap->va_fileid; 237 } 238 if ((error = VOP_OPEN(vp, fmode, cred, td, fdidx)) != 0) 239 goto bad; 240 /* 241 * Make sure that a VM object is created for VMIO support. 242 */ 243 if (vn_canvmio(vp) == TRUE) { 244 #ifdef LOOKUP_SHARED 245 int flock; 246 247 if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0) 248 VOP_LOCK(vp, LK_UPGRADE, td); 249 /* 250 * In cases where the object is marked as dead object_create 251 * will unlock and relock exclusive. It is safe to call in 252 * here with a shared lock because we only examine fields that 253 * the shared lock guarantees will be stable. In the UPGRADE 254 * case it is not likely that anyone has used this vnode yet 255 * so there will be no contention. The logic after this call 256 * restores the requested locking state. 257 */ 258 #endif 259 if ((error = vfs_object_create(vp, td, cred)) != 0) { 260 VOP_UNLOCK(vp, 0, td); 261 VOP_CLOSE(vp, fmode, cred, td); 262 NDFREE(ndp, NDF_ONLY_PNBUF); 263 vrele(vp); 264 *flagp = fmode; 265 return (error); 266 } 267 #ifdef LOOKUP_SHARED 268 flock = VOP_ISLOCKED(vp, td); 269 if (!exclusive && flock == LK_EXCLUSIVE) 270 VOP_LOCK(vp, LK_DOWNGRADE, td); 271 #endif 272 } 273 274 if (fmode & FWRITE) 275 vp->v_writecount++; 276 *flagp = fmode; 277 ASSERT_VOP_LOCKED(vp, "vn_open_cred"); 278 return (0); 279 bad: 280 NDFREE(ndp, NDF_ONLY_PNBUF); 281 vput(vp); 282 *flagp = fmode; 283 ndp->ni_vp = NULL; 284 return (error); 285 } 286 287 /* 288 * Check for write permissions on the specified vnode. 289 * Prototype text segments cannot be written. 290 */ 291 int 292 vn_writechk(vp) 293 register struct vnode *vp; 294 { 295 296 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 297 /* 298 * If there's shared text associated with 299 * the vnode, try to free it up once. If 300 * we fail, we can't allow writing. 301 */ 302 if (vp->v_vflag & VV_TEXT) 303 return (ETXTBSY); 304 305 return (0); 306 } 307 308 /* 309 * Vnode close call 310 */ 311 int 312 vn_close(vp, flags, file_cred, td) 313 register struct vnode *vp; 314 int flags; 315 struct ucred *file_cred; 316 struct thread *td; 317 { 318 int error; 319 320 if (flags & FWRITE) 321 vp->v_writecount--; 322 error = VOP_CLOSE(vp, flags, file_cred, td); 323 /* 324 * XXX - In certain instances VOP_CLOSE has to do the vrele 325 * itself. If the vrele has been done, it will return EAGAIN 326 * to indicate that the vrele should not be done again. When 327 * this happens, we just return success. The correct thing to 328 * do would be to have all VOP_CLOSE instances do the vrele. 329 */ 330 if (error == EAGAIN) 331 return (0); 332 vrele(vp); 333 return (error); 334 } 335 336 /* 337 * Sequential heuristic - detect sequential operation 338 */ 339 static __inline 340 int 341 sequential_heuristic(struct uio *uio, struct file *fp) 342 { 343 344 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 345 uio->uio_offset == fp->f_nextoff) { 346 /* 347 * XXX we assume that the filesystem block size is 348 * the default. Not true, but still gives us a pretty 349 * good indicator of how sequential the read operations 350 * are. 351 */ 352 fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 353 if (fp->f_seqcount > IO_SEQMAX) 354 fp->f_seqcount = IO_SEQMAX; 355 return(fp->f_seqcount << IO_SEQSHIFT); 356 } 357 358 /* 359 * Not sequential, quick draw-down of seqcount 360 */ 361 if (fp->f_seqcount > 1) 362 fp->f_seqcount = 1; 363 else 364 fp->f_seqcount = 0; 365 return(0); 366 } 367 368 /* 369 * Package up an I/O request on a vnode into a uio and do it. 370 */ 371 int 372 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred, 373 aresid, td) 374 enum uio_rw rw; 375 struct vnode *vp; 376 caddr_t base; 377 int len; 378 off_t offset; 379 enum uio_seg segflg; 380 int ioflg; 381 struct ucred *active_cred; 382 struct ucred *file_cred; 383 int *aresid; 384 struct thread *td; 385 { 386 struct uio auio; 387 struct iovec aiov; 388 struct mount *mp; 389 struct ucred *cred; 390 int error; 391 392 if ((ioflg & IO_NODELOCKED) == 0) { 393 mp = NULL; 394 if (rw == UIO_WRITE) { 395 if (vp->v_type != VCHR && 396 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 397 != 0) 398 return (error); 399 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 400 } else { 401 /* 402 * XXX This should be LK_SHARED but I don't trust VFS 403 * enough to leave it like that until it has been 404 * reviewed further. 405 */ 406 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 407 } 408 409 } 410 auio.uio_iov = &aiov; 411 auio.uio_iovcnt = 1; 412 aiov.iov_base = base; 413 aiov.iov_len = len; 414 auio.uio_resid = len; 415 auio.uio_offset = offset; 416 auio.uio_segflg = segflg; 417 auio.uio_rw = rw; 418 auio.uio_td = td; 419 error = 0; 420 #ifdef MAC 421 if ((ioflg & IO_NOMACCHECK) == 0) { 422 if (rw == UIO_READ) 423 error = mac_check_vnode_read(active_cred, file_cred, 424 vp); 425 else 426 error = mac_check_vnode_write(active_cred, file_cred, 427 vp); 428 } 429 #endif 430 if (error == 0) { 431 if (file_cred) 432 cred = file_cred; 433 else 434 cred = active_cred; 435 if (rw == UIO_READ) 436 error = VOP_READ(vp, &auio, ioflg, cred); 437 else 438 error = VOP_WRITE(vp, &auio, ioflg, cred); 439 } 440 if (aresid) 441 *aresid = auio.uio_resid; 442 else 443 if (auio.uio_resid && error == 0) 444 error = EIO; 445 if ((ioflg & IO_NODELOCKED) == 0) { 446 if (rw == UIO_WRITE) 447 vn_finished_write(mp); 448 VOP_UNLOCK(vp, 0, td); 449 } 450 return (error); 451 } 452 453 /* 454 * Package up an I/O request on a vnode into a uio and do it. The I/O 455 * request is split up into smaller chunks and we try to avoid saturating 456 * the buffer cache while potentially holding a vnode locked, so we 457 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield() 458 * to give other processes a chance to lock the vnode (either other processes 459 * core'ing the same binary, or unrelated processes scanning the directory). 460 */ 461 int 462 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 463 file_cred, aresid, td) 464 enum uio_rw rw; 465 struct vnode *vp; 466 caddr_t base; 467 int len; 468 off_t offset; 469 enum uio_seg segflg; 470 int ioflg; 471 struct ucred *active_cred; 472 struct ucred *file_cred; 473 int *aresid; 474 struct thread *td; 475 { 476 int error = 0; 477 478 do { 479 int chunk; 480 481 /* 482 * Force `offset' to a multiple of MAXBSIZE except possibly 483 * for the first chunk, so that filesystems only need to 484 * write full blocks except possibly for the first and last 485 * chunks. 486 */ 487 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 488 489 if (chunk > len) 490 chunk = len; 491 if (rw != UIO_READ && vp->v_type == VREG) 492 bwillwrite(); 493 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 494 ioflg, active_cred, file_cred, aresid, td); 495 len -= chunk; /* aresid calc already includes length */ 496 if (error) 497 break; 498 offset += chunk; 499 base += chunk; 500 uio_yield(); 501 } while (len); 502 if (aresid) 503 *aresid += len; 504 return (error); 505 } 506 507 /* 508 * File table vnode read routine. 509 */ 510 static int 511 vn_read(fp, uio, active_cred, flags, td) 512 struct file *fp; 513 struct uio *uio; 514 struct ucred *active_cred; 515 struct thread *td; 516 int flags; 517 { 518 struct vnode *vp; 519 int error, ioflag; 520 521 mtx_lock(&Giant); 522 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 523 uio->uio_td, td)); 524 vp = fp->f_vnode; 525 ioflag = 0; 526 if (fp->f_flag & FNONBLOCK) 527 ioflag |= IO_NDELAY; 528 if (fp->f_flag & O_DIRECT) 529 ioflag |= IO_DIRECT; 530 VOP_LEASE(vp, td, fp->f_cred, LEASE_READ); 531 /* 532 * According to McKusick the vn lock is protecting f_offset here. 533 * Once this field has it's own lock we can acquire this shared. 534 */ 535 if ((flags & FOF_OFFSET) == 0) { 536 vn_lock(vp, LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td); 537 uio->uio_offset = fp->f_offset; 538 } else 539 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); 540 541 ioflag |= sequential_heuristic(uio, fp); 542 543 #ifdef MAC 544 error = mac_check_vnode_read(active_cred, fp->f_cred, vp); 545 if (error == 0) 546 #endif 547 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 548 if ((flags & FOF_OFFSET) == 0) 549 fp->f_offset = uio->uio_offset; 550 fp->f_nextoff = uio->uio_offset; 551 VOP_UNLOCK(vp, 0, td); 552 mtx_unlock(&Giant); 553 return (error); 554 } 555 556 /* 557 * File table vnode write routine. 558 */ 559 static int 560 vn_write(fp, uio, active_cred, flags, td) 561 struct file *fp; 562 struct uio *uio; 563 struct ucred *active_cred; 564 struct thread *td; 565 int flags; 566 { 567 struct vnode *vp; 568 struct mount *mp; 569 int error, ioflag; 570 571 mtx_lock(&Giant); 572 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 573 uio->uio_td, td)); 574 vp = fp->f_vnode; 575 if (vp->v_type == VREG) 576 bwillwrite(); 577 ioflag = IO_UNIT; 578 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 579 ioflag |= IO_APPEND; 580 if (fp->f_flag & FNONBLOCK) 581 ioflag |= IO_NDELAY; 582 if (fp->f_flag & O_DIRECT) 583 ioflag |= IO_DIRECT; 584 if ((fp->f_flag & O_FSYNC) || 585 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 586 ioflag |= IO_SYNC; 587 mp = NULL; 588 if (vp->v_type != VCHR && 589 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) { 590 mtx_unlock(&Giant); 591 return (error); 592 } 593 VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE); 594 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 595 if ((flags & FOF_OFFSET) == 0) 596 uio->uio_offset = fp->f_offset; 597 ioflag |= sequential_heuristic(uio, fp); 598 #ifdef MAC 599 error = mac_check_vnode_write(active_cred, fp->f_cred, vp); 600 if (error == 0) 601 #endif 602 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 603 if ((flags & FOF_OFFSET) == 0) 604 fp->f_offset = uio->uio_offset; 605 fp->f_nextoff = uio->uio_offset; 606 VOP_UNLOCK(vp, 0, td); 607 vn_finished_write(mp); 608 mtx_unlock(&Giant); 609 return (error); 610 } 611 612 /* 613 * File table vnode stat routine. 614 */ 615 static int 616 vn_statfile(fp, sb, active_cred, td) 617 struct file *fp; 618 struct stat *sb; 619 struct ucred *active_cred; 620 struct thread *td; 621 { 622 struct vnode *vp = fp->f_vnode; 623 int error; 624 625 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 626 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 627 VOP_UNLOCK(vp, 0, td); 628 629 return (error); 630 } 631 632 /* 633 * Stat a vnode; implementation for the stat syscall 634 */ 635 int 636 vn_stat(vp, sb, active_cred, file_cred, td) 637 struct vnode *vp; 638 register struct stat *sb; 639 struct ucred *active_cred; 640 struct ucred *file_cred; 641 struct thread *td; 642 { 643 struct vattr vattr; 644 register struct vattr *vap; 645 int error; 646 u_short mode; 647 648 #ifdef MAC 649 error = mac_check_vnode_stat(active_cred, file_cred, vp); 650 if (error) 651 return (error); 652 #endif 653 654 vap = &vattr; 655 error = VOP_GETATTR(vp, vap, active_cred, td); 656 if (error) 657 return (error); 658 659 vp->v_cachedfs = vap->va_fsid; 660 vp->v_cachedid = vap->va_fileid; 661 662 /* 663 * Zero the spare stat fields 664 */ 665 bzero(sb, sizeof *sb); 666 667 /* 668 * Copy from vattr table 669 */ 670 if (vap->va_fsid != VNOVAL) 671 sb->st_dev = vap->va_fsid; 672 else 673 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 674 sb->st_ino = vap->va_fileid; 675 mode = vap->va_mode; 676 switch (vap->va_type) { 677 case VREG: 678 mode |= S_IFREG; 679 break; 680 case VDIR: 681 mode |= S_IFDIR; 682 break; 683 case VBLK: 684 mode |= S_IFBLK; 685 break; 686 case VCHR: 687 mode |= S_IFCHR; 688 break; 689 case VLNK: 690 mode |= S_IFLNK; 691 /* This is a cosmetic change, symlinks do not have a mode. */ 692 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 693 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 694 else 695 sb->st_mode |= ACCESSPERMS; /* 0777 */ 696 break; 697 case VSOCK: 698 mode |= S_IFSOCK; 699 break; 700 case VFIFO: 701 mode |= S_IFIFO; 702 break; 703 default: 704 return (EBADF); 705 }; 706 sb->st_mode = mode; 707 sb->st_nlink = vap->va_nlink; 708 sb->st_uid = vap->va_uid; 709 sb->st_gid = vap->va_gid; 710 sb->st_rdev = vap->va_rdev; 711 if (vap->va_size > OFF_MAX) 712 return (EOVERFLOW); 713 sb->st_size = vap->va_size; 714 sb->st_atimespec = vap->va_atime; 715 sb->st_mtimespec = vap->va_mtime; 716 sb->st_ctimespec = vap->va_ctime; 717 sb->st_birthtimespec = vap->va_birthtime; 718 719 /* 720 * According to www.opengroup.org, the meaning of st_blksize is 721 * "a filesystem-specific preferred I/O block size for this 722 * object. In some filesystem types, this may vary from file 723 * to file" 724 * Default to PAGE_SIZE after much discussion. 725 */ 726 727 if (vap->va_type == VREG) { 728 sb->st_blksize = vap->va_blocksize; 729 } else if (vn_isdisk(vp, NULL)) { 730 sb->st_blksize = vp->v_rdev->si_bsize_best; 731 if (sb->st_blksize < vp->v_rdev->si_bsize_phys) 732 sb->st_blksize = vp->v_rdev->si_bsize_phys; 733 if (sb->st_blksize < BLKDEV_IOSIZE) 734 sb->st_blksize = BLKDEV_IOSIZE; 735 } else { 736 sb->st_blksize = PAGE_SIZE; 737 } 738 739 sb->st_flags = vap->va_flags; 740 if (suser(td)) 741 sb->st_gen = 0; 742 else 743 sb->st_gen = vap->va_gen; 744 745 #if (S_BLKSIZE == 512) 746 /* Optimize this case */ 747 sb->st_blocks = vap->va_bytes >> 9; 748 #else 749 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 750 #endif 751 return (0); 752 } 753 754 /* 755 * File table vnode ioctl routine. 756 */ 757 static int 758 vn_ioctl(fp, com, data, active_cred, td) 759 struct file *fp; 760 u_long com; 761 void *data; 762 struct ucred *active_cred; 763 struct thread *td; 764 { 765 struct vnode *vp = fp->f_vnode; 766 struct vnode *vpold; 767 struct vattr vattr; 768 int error; 769 770 switch (vp->v_type) { 771 772 case VREG: 773 case VDIR: 774 if (com == FIONREAD) { 775 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 776 error = VOP_GETATTR(vp, &vattr, active_cred, td); 777 VOP_UNLOCK(vp, 0, td); 778 if (error) 779 return (error); 780 *(int *)data = vattr.va_size - fp->f_offset; 781 return (0); 782 } 783 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 784 return (0); /* XXX */ 785 /* FALLTHROUGH */ 786 787 default: 788 #if 0 789 return (ENOTTY); 790 #endif 791 case VFIFO: 792 case VCHR: 793 case VBLK: 794 if (com == FIODTYPE) { 795 if (vp->v_type != VCHR && vp->v_type != VBLK) 796 return (ENOTTY); 797 *(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK; 798 return (0); 799 } 800 error = VOP_IOCTL(vp, com, data, fp->f_flag, active_cred, td); 801 if (error == ENOIOCTL) { 802 #ifdef DIAGNOSTIC 803 Debugger("ENOIOCTL leaked through"); 804 #endif 805 error = ENOTTY; 806 } 807 if (error == 0 && com == TIOCSCTTY) { 808 809 /* Do nothing if reassigning same control tty */ 810 sx_slock(&proctree_lock); 811 if (td->td_proc->p_session->s_ttyvp == vp) { 812 sx_sunlock(&proctree_lock); 813 return (0); 814 } 815 816 vpold = td->td_proc->p_session->s_ttyvp; 817 VREF(vp); 818 SESS_LOCK(td->td_proc->p_session); 819 td->td_proc->p_session->s_ttyvp = vp; 820 SESS_UNLOCK(td->td_proc->p_session); 821 822 sx_sunlock(&proctree_lock); 823 824 /* Get rid of reference to old control tty */ 825 if (vpold) 826 vrele(vpold); 827 } 828 return (error); 829 } 830 } 831 832 /* 833 * File table vnode poll routine. 834 */ 835 static int 836 vn_poll(fp, events, active_cred, td) 837 struct file *fp; 838 int events; 839 struct ucred *active_cred; 840 struct thread *td; 841 { 842 struct vnode *vp; 843 #ifdef MAC 844 int error; 845 #endif 846 847 vp = fp->f_vnode; 848 #ifdef MAC 849 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 850 error = mac_check_vnode_poll(active_cred, fp->f_cred, vp); 851 VOP_UNLOCK(vp, 0, td); 852 if (error) 853 return (error); 854 #endif 855 856 return (VOP_POLL(vp, events, fp->f_cred, td)); 857 } 858 859 /* 860 * Check that the vnode is still valid, and if so 861 * acquire requested lock. 862 */ 863 int 864 #ifndef DEBUG_LOCKS 865 vn_lock(vp, flags, td) 866 #else 867 debug_vn_lock(vp, flags, td, filename, line) 868 #endif 869 struct vnode *vp; 870 int flags; 871 struct thread *td; 872 #ifdef DEBUG_LOCKS 873 const char *filename; 874 int line; 875 #endif 876 { 877 int error; 878 879 do { 880 if ((flags & LK_INTERLOCK) == 0) 881 VI_LOCK(vp); 882 if ((vp->v_iflag & VI_XLOCK) && vp->v_vxthread != curthread) { 883 if ((flags & LK_NOWAIT) != 0) { 884 VI_UNLOCK(vp); 885 return (ENOENT); 886 } 887 vp->v_iflag |= VI_XWANT; 888 msleep(vp, VI_MTX(vp), PINOD, "vn_lock", 0); 889 if ((flags & LK_RETRY) == 0) { 890 VI_UNLOCK(vp); 891 return (ENOENT); 892 } 893 } 894 #ifdef DEBUG_LOCKS 895 vp->filename = filename; 896 vp->line = line; 897 #endif 898 /* 899 * lockmgr drops interlock before it will return for 900 * any reason. So force the code above to relock it. 901 */ 902 error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, td); 903 flags &= ~LK_INTERLOCK; 904 } while (flags & LK_RETRY && error != 0); 905 return (error); 906 } 907 908 /* 909 * File table vnode close routine. 910 */ 911 static int 912 vn_closefile(fp, td) 913 struct file *fp; 914 struct thread *td; 915 { 916 917 fp->f_ops = &badfileops; 918 return (vn_close(fp->f_vnode, fp->f_flag, fp->f_cred, td)); 919 } 920 921 /* 922 * Preparing to start a filesystem write operation. If the operation is 923 * permitted, then we bump the count of operations in progress and 924 * proceed. If a suspend request is in progress, we wait until the 925 * suspension is over, and then proceed. 926 */ 927 int 928 vn_start_write(vp, mpp, flags) 929 struct vnode *vp; 930 struct mount **mpp; 931 int flags; 932 { 933 struct mount *mp; 934 int error; 935 936 /* 937 * If a vnode is provided, get and return the mount point that 938 * to which it will write. 939 */ 940 if (vp != NULL) { 941 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 942 *mpp = NULL; 943 if (error != EOPNOTSUPP) 944 return (error); 945 return (0); 946 } 947 } 948 if ((mp = *mpp) == NULL) 949 return (0); 950 /* 951 * Check on status of suspension. 952 */ 953 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 954 if (flags & V_NOWAIT) 955 return (EWOULDBLOCK); 956 error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 957 "suspfs", 0); 958 if (error) 959 return (error); 960 } 961 if (flags & V_XSLEEP) 962 return (0); 963 mp->mnt_writeopcount++; 964 return (0); 965 } 966 967 /* 968 * Secondary suspension. Used by operations such as vop_inactive 969 * routines that are needed by the higher level functions. These 970 * are allowed to proceed until all the higher level functions have 971 * completed (indicated by mnt_writeopcount dropping to zero). At that 972 * time, these operations are halted until the suspension is over. 973 */ 974 int 975 vn_write_suspend_wait(vp, mp, flags) 976 struct vnode *vp; 977 struct mount *mp; 978 int flags; 979 { 980 int error; 981 982 if (vp != NULL) { 983 if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) { 984 if (error != EOPNOTSUPP) 985 return (error); 986 return (0); 987 } 988 } 989 /* 990 * If we are not suspended or have not yet reached suspended 991 * mode, then let the operation proceed. 992 */ 993 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0) 994 return (0); 995 if (flags & V_NOWAIT) 996 return (EWOULDBLOCK); 997 /* 998 * Wait for the suspension to finish. 999 */ 1000 return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 1001 "suspfs", 0)); 1002 } 1003 1004 /* 1005 * Filesystem write operation has completed. If we are suspending and this 1006 * operation is the last one, notify the suspender that the suspension is 1007 * now in effect. 1008 */ 1009 void 1010 vn_finished_write(mp) 1011 struct mount *mp; 1012 { 1013 1014 if (mp == NULL) 1015 return; 1016 mp->mnt_writeopcount--; 1017 if (mp->mnt_writeopcount < 0) 1018 panic("vn_finished_write: neg cnt"); 1019 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1020 mp->mnt_writeopcount <= 0) 1021 wakeup(&mp->mnt_writeopcount); 1022 } 1023 1024 /* 1025 * Request a filesystem to suspend write operations. 1026 */ 1027 int 1028 vfs_write_suspend(mp) 1029 struct mount *mp; 1030 { 1031 struct thread *td = curthread; 1032 int error; 1033 1034 if (mp->mnt_kern_flag & MNTK_SUSPEND) 1035 return (0); 1036 mp->mnt_kern_flag |= MNTK_SUSPEND; 1037 if (mp->mnt_writeopcount > 0) 1038 (void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0); 1039 if ((error = VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td)) != 0) { 1040 vfs_write_resume(mp); 1041 return (error); 1042 } 1043 mp->mnt_kern_flag |= MNTK_SUSPENDED; 1044 return (0); 1045 } 1046 1047 /* 1048 * Request a filesystem to resume write operations. 1049 */ 1050 void 1051 vfs_write_resume(mp) 1052 struct mount *mp; 1053 { 1054 1055 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) 1056 return; 1057 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED); 1058 wakeup(&mp->mnt_writeopcount); 1059 wakeup(&mp->mnt_flag); 1060 } 1061 1062 /* 1063 * Implement kqueues for files by translating it to vnode operation. 1064 */ 1065 static int 1066 vn_kqfilter(struct file *fp, struct knote *kn) 1067 { 1068 1069 return (VOP_KQFILTER(fp->f_vnode, kn)); 1070 } 1071 1072 /* 1073 * Simplified in-kernel wrapper calls for extended attribute access. 1074 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1075 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1076 */ 1077 int 1078 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1079 const char *attrname, int *buflen, char *buf, struct thread *td) 1080 { 1081 struct uio auio; 1082 struct iovec iov; 1083 int error; 1084 1085 iov.iov_len = *buflen; 1086 iov.iov_base = buf; 1087 1088 auio.uio_iov = &iov; 1089 auio.uio_iovcnt = 1; 1090 auio.uio_rw = UIO_READ; 1091 auio.uio_segflg = UIO_SYSSPACE; 1092 auio.uio_td = td; 1093 auio.uio_offset = 0; 1094 auio.uio_resid = *buflen; 1095 1096 if ((ioflg & IO_NODELOCKED) == 0) 1097 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1098 1099 /* authorize attribute retrieval as kernel */ 1100 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1101 td); 1102 1103 if ((ioflg & IO_NODELOCKED) == 0) 1104 VOP_UNLOCK(vp, 0, td); 1105 1106 if (error == 0) { 1107 *buflen = *buflen - auio.uio_resid; 1108 } 1109 1110 return (error); 1111 } 1112 1113 /* 1114 * XXX failure mode if partially written? 1115 */ 1116 int 1117 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1118 const char *attrname, int buflen, char *buf, struct thread *td) 1119 { 1120 struct uio auio; 1121 struct iovec iov; 1122 struct mount *mp; 1123 int error; 1124 1125 iov.iov_len = buflen; 1126 iov.iov_base = buf; 1127 1128 auio.uio_iov = &iov; 1129 auio.uio_iovcnt = 1; 1130 auio.uio_rw = UIO_WRITE; 1131 auio.uio_segflg = UIO_SYSSPACE; 1132 auio.uio_td = td; 1133 auio.uio_offset = 0; 1134 auio.uio_resid = buflen; 1135 1136 if ((ioflg & IO_NODELOCKED) == 0) { 1137 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1138 return (error); 1139 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1140 } 1141 1142 /* authorize attribute setting as kernel */ 1143 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1144 1145 if ((ioflg & IO_NODELOCKED) == 0) { 1146 vn_finished_write(mp); 1147 VOP_UNLOCK(vp, 0, td); 1148 } 1149 1150 return (error); 1151 } 1152 1153 int 1154 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1155 const char *attrname, struct thread *td) 1156 { 1157 struct mount *mp; 1158 int error; 1159 1160 if ((ioflg & IO_NODELOCKED) == 0) { 1161 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1162 return (error); 1163 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1164 } 1165 1166 /* authorize attribute removal as kernel */ 1167 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 1168 if (error == EOPNOTSUPP) 1169 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 1170 NULL, td); 1171 1172 if ((ioflg & IO_NODELOCKED) == 0) { 1173 vn_finished_write(mp); 1174 VOP_UNLOCK(vp, 0, td); 1175 } 1176 1177 return (error); 1178 } 1179