1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_mac.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/kdb.h> 47 #include <sys/stat.h> 48 #include <sys/proc.h> 49 #include <sys/limits.h> 50 #include <sys/lock.h> 51 #include <sys/mac.h> 52 #include <sys/mount.h> 53 #include <sys/mutex.h> 54 #include <sys/namei.h> 55 #include <sys/vnode.h> 56 #include <sys/bio.h> 57 #include <sys/buf.h> 58 #include <sys/filio.h> 59 #include <sys/sx.h> 60 #include <sys/ttycom.h> 61 #include <sys/conf.h> 62 #include <sys/syslog.h> 63 #include <sys/unistd.h> 64 65 static fo_rdwr_t vn_read; 66 static fo_rdwr_t vn_write; 67 static fo_ioctl_t vn_ioctl; 68 static fo_poll_t vn_poll; 69 static fo_kqfilter_t vn_kqfilter; 70 static fo_stat_t vn_statfile; 71 static fo_close_t vn_closefile; 72 73 struct fileops vnops = { 74 .fo_read = vn_read, 75 .fo_write = vn_write, 76 .fo_ioctl = vn_ioctl, 77 .fo_poll = vn_poll, 78 .fo_kqfilter = vn_kqfilter, 79 .fo_stat = vn_statfile, 80 .fo_close = vn_closefile, 81 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 82 }; 83 84 int 85 vn_open(ndp, flagp, cmode, fdidx) 86 struct nameidata *ndp; 87 int *flagp, cmode, fdidx; 88 { 89 struct thread *td = ndp->ni_cnd.cn_thread; 90 91 return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fdidx)); 92 } 93 94 /* 95 * Common code for vnode open operations. 96 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 97 * 98 * Note that this does NOT free nameidata for the successful case, 99 * due to the NDINIT being done elsewhere. 100 */ 101 int 102 vn_open_cred(ndp, flagp, cmode, cred, fdidx) 103 struct nameidata *ndp; 104 int *flagp, cmode; 105 struct ucred *cred; 106 int fdidx; 107 { 108 struct vnode *vp; 109 struct mount *mp; 110 struct thread *td = ndp->ni_cnd.cn_thread; 111 struct vattr vat; 112 struct vattr *vap = &vat; 113 int mode, fmode, error; 114 #ifdef LOOKUP_SHARED 115 int exclusive; /* The current intended lock state */ 116 117 exclusive = 0; 118 #endif 119 120 restart: 121 fmode = *flagp; 122 if (fmode & O_CREAT) { 123 ndp->ni_cnd.cn_nameiop = CREATE; 124 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; 125 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 126 ndp->ni_cnd.cn_flags |= FOLLOW; 127 bwillwrite(); 128 if ((error = namei(ndp)) != 0) 129 return (error); 130 if (ndp->ni_vp == NULL) { 131 VATTR_NULL(vap); 132 vap->va_type = VREG; 133 vap->va_mode = cmode; 134 if (fmode & O_EXCL) 135 vap->va_vaflags |= VA_EXCLUSIVE; 136 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 137 NDFREE(ndp, NDF_ONLY_PNBUF); 138 vput(ndp->ni_dvp); 139 if ((error = vn_start_write(NULL, &mp, 140 V_XSLEEP | PCATCH)) != 0) 141 return (error); 142 goto restart; 143 } 144 #ifdef MAC 145 error = mac_check_vnode_create(cred, ndp->ni_dvp, 146 &ndp->ni_cnd, vap); 147 if (error == 0) { 148 #endif 149 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE); 150 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 151 &ndp->ni_cnd, vap); 152 #ifdef MAC 153 } 154 #endif 155 vput(ndp->ni_dvp); 156 vn_finished_write(mp); 157 if (error) { 158 NDFREE(ndp, NDF_ONLY_PNBUF); 159 return (error); 160 } 161 ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create"); 162 ASSERT_VOP_LOCKED(ndp->ni_vp, "create"); 163 fmode &= ~O_TRUNC; 164 vp = ndp->ni_vp; 165 #ifdef LOOKUP_SHARED 166 exclusive = 1; 167 #endif 168 } else { 169 if (ndp->ni_dvp == ndp->ni_vp) 170 vrele(ndp->ni_dvp); 171 else 172 vput(ndp->ni_dvp); 173 ndp->ni_dvp = NULL; 174 vp = ndp->ni_vp; 175 if (fmode & O_EXCL) { 176 error = EEXIST; 177 goto bad; 178 } 179 fmode &= ~O_CREAT; 180 } 181 } else { 182 ndp->ni_cnd.cn_nameiop = LOOKUP; 183 #ifdef LOOKUP_SHARED 184 ndp->ni_cnd.cn_flags = 185 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | 186 LOCKSHARED | LOCKLEAF; 187 #else 188 ndp->ni_cnd.cn_flags = 189 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 190 #endif 191 if ((error = namei(ndp)) != 0) 192 return (error); 193 vp = ndp->ni_vp; 194 } 195 if (vp->v_type == VLNK) { 196 error = EMLINK; 197 goto bad; 198 } 199 if (vp->v_type == VSOCK) { 200 error = EOPNOTSUPP; 201 goto bad; 202 } 203 mode = 0; 204 if (fmode & (FWRITE | O_TRUNC)) { 205 if (vp->v_type == VDIR) { 206 error = EISDIR; 207 goto bad; 208 } 209 mode |= VWRITE; 210 } 211 if (fmode & FREAD) 212 mode |= VREAD; 213 if (fmode & O_APPEND) 214 mode |= VAPPEND; 215 #ifdef MAC 216 error = mac_check_vnode_open(cred, vp, mode); 217 if (error) 218 goto bad; 219 #endif 220 if ((fmode & O_CREAT) == 0) { 221 if (mode & VWRITE) { 222 error = vn_writechk(vp); 223 if (error) 224 goto bad; 225 } 226 if (mode) { 227 error = VOP_ACCESS(vp, mode, cred, td); 228 if (error) 229 goto bad; 230 } 231 } 232 if ((error = VOP_GETATTR(vp, vap, cred, td)) == 0) { 233 vp->v_cachedfs = vap->va_fsid; 234 vp->v_cachedid = vap->va_fileid; 235 } 236 if ((error = VOP_OPEN(vp, fmode, cred, td, fdidx)) != 0) 237 goto bad; 238 /* 239 * Make sure that a VM object is created for VMIO support. 240 */ 241 if (vn_canvmio(vp) == TRUE) { 242 #ifdef LOOKUP_SHARED 243 int flock; 244 245 if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0) 246 VOP_LOCK(vp, LK_UPGRADE, td); 247 /* 248 * In cases where the object is marked as dead object_create 249 * will unlock and relock exclusive. It is safe to call in 250 * here with a shared lock because we only examine fields that 251 * the shared lock guarantees will be stable. In the UPGRADE 252 * case it is not likely that anyone has used this vnode yet 253 * so there will be no contention. The logic after this call 254 * restores the requested locking state. 255 */ 256 #endif 257 if ((error = vfs_object_create(vp, td, cred)) != 0) { 258 VOP_UNLOCK(vp, 0, td); 259 VOP_CLOSE(vp, fmode, cred, td); 260 NDFREE(ndp, NDF_ONLY_PNBUF); 261 vrele(vp); 262 *flagp = fmode; 263 return (error); 264 } 265 #ifdef LOOKUP_SHARED 266 flock = VOP_ISLOCKED(vp, td); 267 if (!exclusive && flock == LK_EXCLUSIVE) 268 VOP_LOCK(vp, LK_DOWNGRADE, td); 269 #endif 270 } 271 272 if (fmode & FWRITE) 273 vp->v_writecount++; 274 *flagp = fmode; 275 ASSERT_VOP_LOCKED(vp, "vn_open_cred"); 276 return (0); 277 bad: 278 NDFREE(ndp, NDF_ONLY_PNBUF); 279 vput(vp); 280 *flagp = fmode; 281 ndp->ni_vp = NULL; 282 return (error); 283 } 284 285 /* 286 * Check for write permissions on the specified vnode. 287 * Prototype text segments cannot be written. 288 */ 289 int 290 vn_writechk(vp) 291 register struct vnode *vp; 292 { 293 294 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 295 /* 296 * If there's shared text associated with 297 * the vnode, try to free it up once. If 298 * we fail, we can't allow writing. 299 */ 300 if (vp->v_vflag & VV_TEXT) 301 return (ETXTBSY); 302 303 return (0); 304 } 305 306 /* 307 * Vnode close call 308 */ 309 int 310 vn_close(vp, flags, file_cred, td) 311 register struct vnode *vp; 312 int flags; 313 struct ucred *file_cred; 314 struct thread *td; 315 { 316 int error; 317 318 if (flags & FWRITE) 319 vp->v_writecount--; 320 error = VOP_CLOSE(vp, flags, file_cred, td); 321 /* 322 * XXX - In certain instances VOP_CLOSE has to do the vrele 323 * itself. If the vrele has been done, it will return EAGAIN 324 * to indicate that the vrele should not be done again. When 325 * this happens, we just return success. The correct thing to 326 * do would be to have all VOP_CLOSE instances do the vrele. 327 */ 328 if (error == EAGAIN) 329 return (0); 330 vrele(vp); 331 return (error); 332 } 333 334 /* 335 * Sequential heuristic - detect sequential operation 336 */ 337 static __inline 338 int 339 sequential_heuristic(struct uio *uio, struct file *fp) 340 { 341 342 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 343 uio->uio_offset == fp->f_nextoff) { 344 /* 345 * XXX we assume that the filesystem block size is 346 * the default. Not true, but still gives us a pretty 347 * good indicator of how sequential the read operations 348 * are. 349 */ 350 fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 351 if (fp->f_seqcount > IO_SEQMAX) 352 fp->f_seqcount = IO_SEQMAX; 353 return(fp->f_seqcount << IO_SEQSHIFT); 354 } 355 356 /* 357 * Not sequential, quick draw-down of seqcount 358 */ 359 if (fp->f_seqcount > 1) 360 fp->f_seqcount = 1; 361 else 362 fp->f_seqcount = 0; 363 return(0); 364 } 365 366 /* 367 * Package up an I/O request on a vnode into a uio and do it. 368 */ 369 int 370 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred, 371 aresid, td) 372 enum uio_rw rw; 373 struct vnode *vp; 374 caddr_t base; 375 int len; 376 off_t offset; 377 enum uio_seg segflg; 378 int ioflg; 379 struct ucred *active_cred; 380 struct ucred *file_cred; 381 int *aresid; 382 struct thread *td; 383 { 384 struct uio auio; 385 struct iovec aiov; 386 struct mount *mp; 387 struct ucred *cred; 388 int error; 389 390 if ((ioflg & IO_NODELOCKED) == 0) { 391 mp = NULL; 392 if (rw == UIO_WRITE) { 393 if (vp->v_type != VCHR && 394 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 395 != 0) 396 return (error); 397 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 398 } else { 399 /* 400 * XXX This should be LK_SHARED but I don't trust VFS 401 * enough to leave it like that until it has been 402 * reviewed further. 403 */ 404 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 405 } 406 407 } 408 auio.uio_iov = &aiov; 409 auio.uio_iovcnt = 1; 410 aiov.iov_base = base; 411 aiov.iov_len = len; 412 auio.uio_resid = len; 413 auio.uio_offset = offset; 414 auio.uio_segflg = segflg; 415 auio.uio_rw = rw; 416 auio.uio_td = td; 417 error = 0; 418 #ifdef MAC 419 if ((ioflg & IO_NOMACCHECK) == 0) { 420 if (rw == UIO_READ) 421 error = mac_check_vnode_read(active_cred, file_cred, 422 vp); 423 else 424 error = mac_check_vnode_write(active_cred, file_cred, 425 vp); 426 } 427 #endif 428 if (error == 0) { 429 if (file_cred) 430 cred = file_cred; 431 else 432 cred = active_cred; 433 if (rw == UIO_READ) 434 error = VOP_READ(vp, &auio, ioflg, cred); 435 else 436 error = VOP_WRITE(vp, &auio, ioflg, cred); 437 } 438 if (aresid) 439 *aresid = auio.uio_resid; 440 else 441 if (auio.uio_resid && error == 0) 442 error = EIO; 443 if ((ioflg & IO_NODELOCKED) == 0) { 444 if (rw == UIO_WRITE) 445 vn_finished_write(mp); 446 VOP_UNLOCK(vp, 0, td); 447 } 448 return (error); 449 } 450 451 /* 452 * Package up an I/O request on a vnode into a uio and do it. The I/O 453 * request is split up into smaller chunks and we try to avoid saturating 454 * the buffer cache while potentially holding a vnode locked, so we 455 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield() 456 * to give other processes a chance to lock the vnode (either other processes 457 * core'ing the same binary, or unrelated processes scanning the directory). 458 */ 459 int 460 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 461 file_cred, aresid, td) 462 enum uio_rw rw; 463 struct vnode *vp; 464 caddr_t base; 465 size_t len; 466 off_t offset; 467 enum uio_seg segflg; 468 int ioflg; 469 struct ucred *active_cred; 470 struct ucred *file_cred; 471 size_t *aresid; 472 struct thread *td; 473 { 474 int error = 0; 475 int iaresid; 476 477 do { 478 int chunk; 479 480 /* 481 * Force `offset' to a multiple of MAXBSIZE except possibly 482 * for the first chunk, so that filesystems only need to 483 * write full blocks except possibly for the first and last 484 * chunks. 485 */ 486 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 487 488 if (chunk > len) 489 chunk = len; 490 if (rw != UIO_READ && vp->v_type == VREG) 491 bwillwrite(); 492 iaresid = 0; 493 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 494 ioflg, active_cred, file_cred, &iaresid, td); 495 len -= chunk; /* aresid calc already includes length */ 496 if (error) 497 break; 498 offset += chunk; 499 base += chunk; 500 uio_yield(); 501 } while (len); 502 if (aresid) 503 *aresid = len + iaresid; 504 return (error); 505 } 506 507 /* 508 * File table vnode read routine. 509 */ 510 static int 511 vn_read(fp, uio, active_cred, flags, td) 512 struct file *fp; 513 struct uio *uio; 514 struct ucred *active_cred; 515 struct thread *td; 516 int flags; 517 { 518 struct vnode *vp; 519 int error, ioflag; 520 521 mtx_lock(&Giant); 522 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 523 uio->uio_td, td)); 524 vp = fp->f_vnode; 525 ioflag = 0; 526 if (fp->f_flag & FNONBLOCK) 527 ioflag |= IO_NDELAY; 528 if (fp->f_flag & O_DIRECT) 529 ioflag |= IO_DIRECT; 530 VOP_LEASE(vp, td, fp->f_cred, LEASE_READ); 531 /* 532 * According to McKusick the vn lock is protecting f_offset here. 533 * Once this field has it's own lock we can acquire this shared. 534 */ 535 if ((flags & FOF_OFFSET) == 0) { 536 vn_lock(vp, LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td); 537 uio->uio_offset = fp->f_offset; 538 } else 539 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); 540 541 ioflag |= sequential_heuristic(uio, fp); 542 543 #ifdef MAC 544 error = mac_check_vnode_read(active_cred, fp->f_cred, vp); 545 if (error == 0) 546 #endif 547 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 548 if ((flags & FOF_OFFSET) == 0) 549 fp->f_offset = uio->uio_offset; 550 fp->f_nextoff = uio->uio_offset; 551 VOP_UNLOCK(vp, 0, td); 552 mtx_unlock(&Giant); 553 return (error); 554 } 555 556 /* 557 * File table vnode write routine. 558 */ 559 static int 560 vn_write(fp, uio, active_cred, flags, td) 561 struct file *fp; 562 struct uio *uio; 563 struct ucred *active_cred; 564 struct thread *td; 565 int flags; 566 { 567 struct vnode *vp; 568 struct mount *mp; 569 int error, ioflag; 570 571 mtx_lock(&Giant); 572 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 573 uio->uio_td, td)); 574 vp = fp->f_vnode; 575 if (vp->v_type == VREG) 576 bwillwrite(); 577 ioflag = IO_UNIT; 578 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 579 ioflag |= IO_APPEND; 580 if (fp->f_flag & FNONBLOCK) 581 ioflag |= IO_NDELAY; 582 if (fp->f_flag & O_DIRECT) 583 ioflag |= IO_DIRECT; 584 if ((fp->f_flag & O_FSYNC) || 585 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 586 ioflag |= IO_SYNC; 587 mp = NULL; 588 if (vp->v_type != VCHR && 589 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) { 590 mtx_unlock(&Giant); 591 return (error); 592 } 593 VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE); 594 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 595 if ((flags & FOF_OFFSET) == 0) 596 uio->uio_offset = fp->f_offset; 597 ioflag |= sequential_heuristic(uio, fp); 598 #ifdef MAC 599 error = mac_check_vnode_write(active_cred, fp->f_cred, vp); 600 if (error == 0) 601 #endif 602 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 603 if ((flags & FOF_OFFSET) == 0) 604 fp->f_offset = uio->uio_offset; 605 fp->f_nextoff = uio->uio_offset; 606 VOP_UNLOCK(vp, 0, td); 607 vn_finished_write(mp); 608 mtx_unlock(&Giant); 609 return (error); 610 } 611 612 /* 613 * File table vnode stat routine. 614 */ 615 static int 616 vn_statfile(fp, sb, active_cred, td) 617 struct file *fp; 618 struct stat *sb; 619 struct ucred *active_cred; 620 struct thread *td; 621 { 622 struct vnode *vp = fp->f_vnode; 623 int error; 624 625 mtx_lock(&Giant); 626 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 627 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 628 VOP_UNLOCK(vp, 0, td); 629 mtx_unlock(&Giant); 630 631 return (error); 632 } 633 634 /* 635 * Stat a vnode; implementation for the stat syscall 636 */ 637 int 638 vn_stat(vp, sb, active_cred, file_cred, td) 639 struct vnode *vp; 640 register struct stat *sb; 641 struct ucred *active_cred; 642 struct ucred *file_cred; 643 struct thread *td; 644 { 645 struct vattr vattr; 646 register struct vattr *vap; 647 int error; 648 u_short mode; 649 650 #ifdef MAC 651 error = mac_check_vnode_stat(active_cred, file_cred, vp); 652 if (error) 653 return (error); 654 #endif 655 656 vap = &vattr; 657 error = VOP_GETATTR(vp, vap, active_cred, td); 658 if (error) 659 return (error); 660 661 vp->v_cachedfs = vap->va_fsid; 662 vp->v_cachedid = vap->va_fileid; 663 664 /* 665 * Zero the spare stat fields 666 */ 667 bzero(sb, sizeof *sb); 668 669 /* 670 * Copy from vattr table 671 */ 672 if (vap->va_fsid != VNOVAL) 673 sb->st_dev = vap->va_fsid; 674 else 675 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 676 sb->st_ino = vap->va_fileid; 677 mode = vap->va_mode; 678 switch (vap->va_type) { 679 case VREG: 680 mode |= S_IFREG; 681 break; 682 case VDIR: 683 mode |= S_IFDIR; 684 break; 685 case VBLK: 686 mode |= S_IFBLK; 687 break; 688 case VCHR: 689 mode |= S_IFCHR; 690 break; 691 case VLNK: 692 mode |= S_IFLNK; 693 /* This is a cosmetic change, symlinks do not have a mode. */ 694 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 695 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 696 else 697 sb->st_mode |= ACCESSPERMS; /* 0777 */ 698 break; 699 case VSOCK: 700 mode |= S_IFSOCK; 701 break; 702 case VFIFO: 703 mode |= S_IFIFO; 704 break; 705 default: 706 return (EBADF); 707 }; 708 sb->st_mode = mode; 709 sb->st_nlink = vap->va_nlink; 710 sb->st_uid = vap->va_uid; 711 sb->st_gid = vap->va_gid; 712 sb->st_rdev = vap->va_rdev; 713 if (vap->va_size > OFF_MAX) 714 return (EOVERFLOW); 715 sb->st_size = vap->va_size; 716 sb->st_atimespec = vap->va_atime; 717 sb->st_mtimespec = vap->va_mtime; 718 sb->st_ctimespec = vap->va_ctime; 719 sb->st_birthtimespec = vap->va_birthtime; 720 721 /* 722 * According to www.opengroup.org, the meaning of st_blksize is 723 * "a filesystem-specific preferred I/O block size for this 724 * object. In some filesystem types, this may vary from file 725 * to file" 726 * Default to PAGE_SIZE after much discussion. 727 */ 728 729 if (vap->va_type == VREG) { 730 sb->st_blksize = vap->va_blocksize; 731 } else if (vn_isdisk(vp, NULL)) { 732 sb->st_blksize = vp->v_rdev->si_bsize_best; 733 if (sb->st_blksize < vp->v_rdev->si_bsize_phys) 734 sb->st_blksize = vp->v_rdev->si_bsize_phys; 735 if (sb->st_blksize < BLKDEV_IOSIZE) 736 sb->st_blksize = BLKDEV_IOSIZE; 737 } else { 738 sb->st_blksize = PAGE_SIZE; 739 } 740 741 sb->st_flags = vap->va_flags; 742 if (suser(td)) 743 sb->st_gen = 0; 744 else 745 sb->st_gen = vap->va_gen; 746 747 #if (S_BLKSIZE == 512) 748 /* Optimize this case */ 749 sb->st_blocks = vap->va_bytes >> 9; 750 #else 751 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 752 #endif 753 return (0); 754 } 755 756 /* 757 * File table vnode ioctl routine. 758 */ 759 static int 760 vn_ioctl(fp, com, data, active_cred, td) 761 struct file *fp; 762 u_long com; 763 void *data; 764 struct ucred *active_cred; 765 struct thread *td; 766 { 767 struct vnode *vp = fp->f_vnode; 768 struct vnode *vpold; 769 struct vattr vattr; 770 int error; 771 772 switch (vp->v_type) { 773 774 case VREG: 775 case VDIR: 776 if (com == FIONREAD) { 777 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 778 error = VOP_GETATTR(vp, &vattr, active_cred, td); 779 VOP_UNLOCK(vp, 0, td); 780 if (error) 781 return (error); 782 *(int *)data = vattr.va_size - fp->f_offset; 783 return (0); 784 } 785 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 786 return (0); /* XXX */ 787 /* FALLTHROUGH */ 788 789 default: 790 #if 0 791 return (ENOTTY); 792 #endif 793 case VFIFO: 794 case VCHR: 795 case VBLK: 796 if (com == FIODTYPE) { 797 if (vp->v_type != VCHR && vp->v_type != VBLK) 798 return (ENOTTY); 799 *(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK; 800 return (0); 801 } 802 error = VOP_IOCTL(vp, com, data, fp->f_flag, active_cred, td); 803 if (error == ENOIOCTL) { 804 #ifdef DIAGNOSTIC 805 kdb_enter("ENOIOCTL leaked through"); 806 #endif 807 error = ENOTTY; 808 } 809 if (error == 0 && com == TIOCSCTTY) { 810 811 /* Do nothing if reassigning same control tty */ 812 sx_slock(&proctree_lock); 813 if (td->td_proc->p_session->s_ttyvp == vp) { 814 sx_sunlock(&proctree_lock); 815 return (0); 816 } 817 818 vpold = td->td_proc->p_session->s_ttyvp; 819 VREF(vp); 820 SESS_LOCK(td->td_proc->p_session); 821 td->td_proc->p_session->s_ttyvp = vp; 822 SESS_UNLOCK(td->td_proc->p_session); 823 824 sx_sunlock(&proctree_lock); 825 826 /* Get rid of reference to old control tty */ 827 if (vpold) 828 vrele(vpold); 829 } 830 return (error); 831 } 832 } 833 834 /* 835 * File table vnode poll routine. 836 */ 837 static int 838 vn_poll(fp, events, active_cred, td) 839 struct file *fp; 840 int events; 841 struct ucred *active_cred; 842 struct thread *td; 843 { 844 struct vnode *vp; 845 #ifdef MAC 846 int error; 847 #endif 848 849 vp = fp->f_vnode; 850 #ifdef MAC 851 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 852 error = mac_check_vnode_poll(active_cred, fp->f_cred, vp); 853 VOP_UNLOCK(vp, 0, td); 854 if (error) 855 return (error); 856 #endif 857 858 return (VOP_POLL(vp, events, fp->f_cred, td)); 859 } 860 861 /* 862 * Check that the vnode is still valid, and if so 863 * acquire requested lock. 864 */ 865 int 866 #ifndef DEBUG_LOCKS 867 vn_lock(vp, flags, td) 868 #else 869 debug_vn_lock(vp, flags, td, filename, line) 870 #endif 871 struct vnode *vp; 872 int flags; 873 struct thread *td; 874 #ifdef DEBUG_LOCKS 875 const char *filename; 876 int line; 877 #endif 878 { 879 int error; 880 881 do { 882 if ((flags & LK_INTERLOCK) == 0) 883 VI_LOCK(vp); 884 if ((vp->v_iflag & VI_XLOCK) && vp->v_vxthread != curthread) { 885 if ((flags & LK_NOWAIT) != 0) { 886 VI_UNLOCK(vp); 887 return (ENOENT); 888 } 889 vp->v_iflag |= VI_XWANT; 890 msleep(vp, VI_MTX(vp), PINOD, "vn_lock", 0); 891 if ((flags & LK_RETRY) == 0) { 892 VI_UNLOCK(vp); 893 return (ENOENT); 894 } 895 } 896 #ifdef DEBUG_LOCKS 897 vp->filename = filename; 898 vp->line = line; 899 #endif 900 /* 901 * lockmgr drops interlock before it will return for 902 * any reason. So force the code above to relock it. 903 */ 904 error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, td); 905 flags &= ~LK_INTERLOCK; 906 } while (flags & LK_RETRY && error != 0); 907 return (error); 908 } 909 910 /* 911 * File table vnode close routine. 912 */ 913 static int 914 vn_closefile(fp, td) 915 struct file *fp; 916 struct thread *td; 917 { 918 struct vnode *vp; 919 struct flock lf; 920 int error; 921 922 vp = fp->f_vnode; 923 924 mtx_lock(&Giant); 925 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) { 926 lf.l_whence = SEEK_SET; 927 lf.l_start = 0; 928 lf.l_len = 0; 929 lf.l_type = F_UNLCK; 930 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK); 931 } 932 933 fp->f_ops = &badfileops; 934 935 error = vn_close(vp, fp->f_flag, fp->f_cred, td); 936 mtx_unlock(&Giant); 937 return (error); 938 } 939 940 /* 941 * Preparing to start a filesystem write operation. If the operation is 942 * permitted, then we bump the count of operations in progress and 943 * proceed. If a suspend request is in progress, we wait until the 944 * suspension is over, and then proceed. 945 */ 946 int 947 vn_start_write(vp, mpp, flags) 948 struct vnode *vp; 949 struct mount **mpp; 950 int flags; 951 { 952 struct mount *mp; 953 int error; 954 955 GIANT_REQUIRED; 956 957 /* 958 * If a vnode is provided, get and return the mount point that 959 * to which it will write. 960 */ 961 if (vp != NULL) { 962 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 963 *mpp = NULL; 964 if (error != EOPNOTSUPP) 965 return (error); 966 return (0); 967 } 968 } 969 if ((mp = *mpp) == NULL) 970 return (0); 971 /* 972 * Check on status of suspension. 973 */ 974 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 975 if (flags & V_NOWAIT) 976 return (EWOULDBLOCK); 977 error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 978 "suspfs", 0); 979 if (error) 980 return (error); 981 } 982 if (flags & V_XSLEEP) 983 return (0); 984 mp->mnt_writeopcount++; 985 return (0); 986 } 987 988 /* 989 * Secondary suspension. Used by operations such as vop_inactive 990 * routines that are needed by the higher level functions. These 991 * are allowed to proceed until all the higher level functions have 992 * completed (indicated by mnt_writeopcount dropping to zero). At that 993 * time, these operations are halted until the suspension is over. 994 */ 995 int 996 vn_write_suspend_wait(vp, mp, flags) 997 struct vnode *vp; 998 struct mount *mp; 999 int flags; 1000 { 1001 int error; 1002 1003 if (vp != NULL) { 1004 if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) { 1005 if (error != EOPNOTSUPP) 1006 return (error); 1007 return (0); 1008 } 1009 } 1010 /* 1011 * If we are not suspended or have not yet reached suspended 1012 * mode, then let the operation proceed. 1013 */ 1014 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0) 1015 return (0); 1016 if (flags & V_NOWAIT) 1017 return (EWOULDBLOCK); 1018 /* 1019 * Wait for the suspension to finish. 1020 */ 1021 return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 1022 "suspfs", 0)); 1023 } 1024 1025 /* 1026 * Filesystem write operation has completed. If we are suspending and this 1027 * operation is the last one, notify the suspender that the suspension is 1028 * now in effect. 1029 */ 1030 void 1031 vn_finished_write(mp) 1032 struct mount *mp; 1033 { 1034 1035 GIANT_REQUIRED; 1036 1037 if (mp == NULL) 1038 return; 1039 mp->mnt_writeopcount--; 1040 if (mp->mnt_writeopcount < 0) 1041 panic("vn_finished_write: neg cnt"); 1042 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1043 mp->mnt_writeopcount <= 0) 1044 wakeup(&mp->mnt_writeopcount); 1045 } 1046 1047 /* 1048 * Request a filesystem to suspend write operations. 1049 */ 1050 int 1051 vfs_write_suspend(mp) 1052 struct mount *mp; 1053 { 1054 struct thread *td = curthread; 1055 int error; 1056 1057 if (mp->mnt_kern_flag & MNTK_SUSPEND) 1058 return (0); 1059 mp->mnt_kern_flag |= MNTK_SUSPEND; 1060 if (mp->mnt_writeopcount > 0) 1061 (void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0); 1062 if ((error = VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td)) != 0) { 1063 vfs_write_resume(mp); 1064 return (error); 1065 } 1066 mp->mnt_kern_flag |= MNTK_SUSPENDED; 1067 return (0); 1068 } 1069 1070 /* 1071 * Request a filesystem to resume write operations. 1072 */ 1073 void 1074 vfs_write_resume(mp) 1075 struct mount *mp; 1076 { 1077 1078 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) 1079 return; 1080 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED); 1081 wakeup(&mp->mnt_writeopcount); 1082 wakeup(&mp->mnt_flag); 1083 } 1084 1085 /* 1086 * Implement kqueues for files by translating it to vnode operation. 1087 */ 1088 static int 1089 vn_kqfilter(struct file *fp, struct knote *kn) 1090 { 1091 1092 return (VOP_KQFILTER(fp->f_vnode, kn)); 1093 } 1094 1095 /* 1096 * Simplified in-kernel wrapper calls for extended attribute access. 1097 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1098 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1099 */ 1100 int 1101 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1102 const char *attrname, int *buflen, char *buf, struct thread *td) 1103 { 1104 struct uio auio; 1105 struct iovec iov; 1106 int error; 1107 1108 iov.iov_len = *buflen; 1109 iov.iov_base = buf; 1110 1111 auio.uio_iov = &iov; 1112 auio.uio_iovcnt = 1; 1113 auio.uio_rw = UIO_READ; 1114 auio.uio_segflg = UIO_SYSSPACE; 1115 auio.uio_td = td; 1116 auio.uio_offset = 0; 1117 auio.uio_resid = *buflen; 1118 1119 if ((ioflg & IO_NODELOCKED) == 0) 1120 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1121 1122 /* authorize attribute retrieval as kernel */ 1123 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1124 td); 1125 1126 if ((ioflg & IO_NODELOCKED) == 0) 1127 VOP_UNLOCK(vp, 0, td); 1128 1129 if (error == 0) { 1130 *buflen = *buflen - auio.uio_resid; 1131 } 1132 1133 return (error); 1134 } 1135 1136 /* 1137 * XXX failure mode if partially written? 1138 */ 1139 int 1140 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1141 const char *attrname, int buflen, char *buf, struct thread *td) 1142 { 1143 struct uio auio; 1144 struct iovec iov; 1145 struct mount *mp; 1146 int error; 1147 1148 iov.iov_len = buflen; 1149 iov.iov_base = buf; 1150 1151 auio.uio_iov = &iov; 1152 auio.uio_iovcnt = 1; 1153 auio.uio_rw = UIO_WRITE; 1154 auio.uio_segflg = UIO_SYSSPACE; 1155 auio.uio_td = td; 1156 auio.uio_offset = 0; 1157 auio.uio_resid = buflen; 1158 1159 if ((ioflg & IO_NODELOCKED) == 0) { 1160 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1161 return (error); 1162 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1163 } 1164 1165 /* authorize attribute setting as kernel */ 1166 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1167 1168 if ((ioflg & IO_NODELOCKED) == 0) { 1169 vn_finished_write(mp); 1170 VOP_UNLOCK(vp, 0, td); 1171 } 1172 1173 return (error); 1174 } 1175 1176 int 1177 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1178 const char *attrname, struct thread *td) 1179 { 1180 struct mount *mp; 1181 int error; 1182 1183 if ((ioflg & IO_NODELOCKED) == 0) { 1184 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1185 return (error); 1186 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1187 } 1188 1189 /* authorize attribute removal as kernel */ 1190 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 1191 if (error == EOPNOTSUPP) 1192 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 1193 NULL, td); 1194 1195 if ((ioflg & IO_NODELOCKED) == 0) { 1196 vn_finished_write(mp); 1197 VOP_UNLOCK(vp, 0, td); 1198 } 1199 1200 return (error); 1201 } 1202