1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_mac.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/kdb.h> 47 #include <sys/stat.h> 48 #include <sys/proc.h> 49 #include <sys/limits.h> 50 #include <sys/lock.h> 51 #include <sys/mac.h> 52 #include <sys/mount.h> 53 #include <sys/mutex.h> 54 #include <sys/namei.h> 55 #include <sys/vnode.h> 56 #include <sys/bio.h> 57 #include <sys/buf.h> 58 #include <sys/filio.h> 59 #include <sys/sx.h> 60 #include <sys/ttycom.h> 61 #include <sys/conf.h> 62 #include <sys/syslog.h> 63 #include <sys/unistd.h> 64 65 static fo_rdwr_t vn_read; 66 static fo_rdwr_t vn_write; 67 static fo_ioctl_t vn_ioctl; 68 static fo_poll_t vn_poll; 69 static fo_kqfilter_t vn_kqfilter; 70 static fo_stat_t vn_statfile; 71 static fo_close_t vn_closefile; 72 73 struct fileops vnops = { 74 .fo_read = vn_read, 75 .fo_write = vn_write, 76 .fo_ioctl = vn_ioctl, 77 .fo_poll = vn_poll, 78 .fo_kqfilter = vn_kqfilter, 79 .fo_stat = vn_statfile, 80 .fo_close = vn_closefile, 81 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 82 }; 83 84 int 85 vn_open(ndp, flagp, cmode, fdidx) 86 struct nameidata *ndp; 87 int *flagp, cmode, fdidx; 88 { 89 struct thread *td = ndp->ni_cnd.cn_thread; 90 91 return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fdidx)); 92 } 93 94 /* 95 * Common code for vnode open operations. 96 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 97 * 98 * Note that this does NOT free nameidata for the successful case, 99 * due to the NDINIT being done elsewhere. 100 */ 101 int 102 vn_open_cred(ndp, flagp, cmode, cred, fdidx) 103 struct nameidata *ndp; 104 int *flagp, cmode; 105 struct ucred *cred; 106 int fdidx; 107 { 108 struct vnode *vp; 109 struct mount *mp; 110 struct thread *td = ndp->ni_cnd.cn_thread; 111 struct vattr vat; 112 struct vattr *vap = &vat; 113 int mode, fmode, error; 114 int vfslocked; 115 #ifdef LOOKUP_SHARED 116 int exclusive; /* The current intended lock state */ 117 118 exclusive = 0; 119 #endif 120 121 restart: 122 vfslocked = 0; 123 fmode = *flagp; 124 if (fmode & O_CREAT) { 125 ndp->ni_cnd.cn_nameiop = CREATE; 126 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF | MPSAFE; 127 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 128 ndp->ni_cnd.cn_flags |= FOLLOW; 129 bwillwrite(); 130 if ((error = namei(ndp)) != 0) 131 return (error); 132 vfslocked = (ndp->ni_cnd.cn_flags & GIANTHELD) != 0; 133 ndp->ni_cnd.cn_flags &= ~MPSAFE; 134 if (ndp->ni_vp == NULL) { 135 VATTR_NULL(vap); 136 vap->va_type = VREG; 137 vap->va_mode = cmode; 138 if (fmode & O_EXCL) 139 vap->va_vaflags |= VA_EXCLUSIVE; 140 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 141 NDFREE(ndp, NDF_ONLY_PNBUF); 142 vput(ndp->ni_dvp); 143 VFS_UNLOCK_GIANT(vfslocked); 144 if ((error = vn_start_write(NULL, &mp, 145 V_XSLEEP | PCATCH)) != 0) 146 return (error); 147 goto restart; 148 } 149 #ifdef MAC 150 error = mac_check_vnode_create(cred, ndp->ni_dvp, 151 &ndp->ni_cnd, vap); 152 if (error == 0) { 153 #endif 154 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE); 155 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 156 &ndp->ni_cnd, vap); 157 #ifdef MAC 158 } 159 #endif 160 vput(ndp->ni_dvp); 161 vn_finished_write(mp); 162 if (error) { 163 VFS_UNLOCK_GIANT(vfslocked); 164 NDFREE(ndp, NDF_ONLY_PNBUF); 165 return (error); 166 } 167 ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create"); 168 ASSERT_VOP_LOCKED(ndp->ni_vp, "create"); 169 fmode &= ~O_TRUNC; 170 vp = ndp->ni_vp; 171 #ifdef LOOKUP_SHARED 172 exclusive = 1; 173 #endif 174 } else { 175 if (ndp->ni_dvp == ndp->ni_vp) 176 vrele(ndp->ni_dvp); 177 else 178 vput(ndp->ni_dvp); 179 ndp->ni_dvp = NULL; 180 vp = ndp->ni_vp; 181 if (fmode & O_EXCL) { 182 error = EEXIST; 183 goto bad; 184 } 185 fmode &= ~O_CREAT; 186 } 187 } else { 188 ndp->ni_cnd.cn_nameiop = LOOKUP; 189 #ifdef LOOKUP_SHARED 190 ndp->ni_cnd.cn_flags = 191 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | 192 LOCKSHARED | LOCKLEAF | MPSAFE; 193 #else 194 ndp->ni_cnd.cn_flags = 195 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | 196 LOCKLEAF | MPSAFE; 197 #endif 198 if ((error = namei(ndp)) != 0) 199 return (error); 200 ndp->ni_cnd.cn_flags &= ~MPSAFE; 201 vfslocked = (ndp->ni_cnd.cn_flags & GIANTHELD) != 0; 202 vp = ndp->ni_vp; 203 } 204 if (vp->v_type == VLNK) { 205 error = EMLINK; 206 goto bad; 207 } 208 if (vp->v_type == VSOCK) { 209 error = EOPNOTSUPP; 210 goto bad; 211 } 212 mode = 0; 213 if (fmode & (FWRITE | O_TRUNC)) { 214 if (vp->v_type == VDIR) { 215 error = EISDIR; 216 goto bad; 217 } 218 mode |= VWRITE; 219 } 220 if (fmode & FREAD) 221 mode |= VREAD; 222 if (fmode & O_APPEND) 223 mode |= VAPPEND; 224 #ifdef MAC 225 error = mac_check_vnode_open(cred, vp, mode); 226 if (error) 227 goto bad; 228 #endif 229 if ((fmode & O_CREAT) == 0) { 230 if (mode & VWRITE) { 231 error = vn_writechk(vp); 232 if (error) 233 goto bad; 234 } 235 if (mode) { 236 error = VOP_ACCESS(vp, mode, cred, td); 237 if (error) 238 goto bad; 239 } 240 } 241 if ((error = VOP_OPEN(vp, fmode, cred, td, fdidx)) != 0) 242 goto bad; 243 244 if (fmode & FWRITE) 245 vp->v_writecount++; 246 *flagp = fmode; 247 ASSERT_VOP_LOCKED(vp, "vn_open_cred"); 248 if (fdidx == -1) 249 VFS_UNLOCK_GIANT(vfslocked); 250 return (0); 251 bad: 252 NDFREE(ndp, NDF_ONLY_PNBUF); 253 vput(vp); 254 VFS_UNLOCK_GIANT(vfslocked); 255 *flagp = fmode; 256 ndp->ni_vp = NULL; 257 return (error); 258 } 259 260 /* 261 * Check for write permissions on the specified vnode. 262 * Prototype text segments cannot be written. 263 */ 264 int 265 vn_writechk(vp) 266 register struct vnode *vp; 267 { 268 269 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 270 /* 271 * If there's shared text associated with 272 * the vnode, try to free it up once. If 273 * we fail, we can't allow writing. 274 */ 275 if (vp->v_vflag & VV_TEXT) 276 return (ETXTBSY); 277 278 return (0); 279 } 280 281 /* 282 * Vnode close call 283 */ 284 int 285 vn_close(vp, flags, file_cred, td) 286 register struct vnode *vp; 287 int flags; 288 struct ucred *file_cred; 289 struct thread *td; 290 { 291 int error; 292 293 VFS_ASSERT_GIANT(vp->v_mount); 294 295 if (flags & FWRITE) 296 vp->v_writecount--; 297 error = VOP_CLOSE(vp, flags, file_cred, td); 298 /* 299 * XXX - In certain instances VOP_CLOSE has to do the vrele 300 * itself. If the vrele has been done, it will return EAGAIN 301 * to indicate that the vrele should not be done again. When 302 * this happens, we just return success. The correct thing to 303 * do would be to have all VOP_CLOSE instances do the vrele. 304 */ 305 if (error == EAGAIN) 306 return (0); 307 vrele(vp); 308 return (error); 309 } 310 311 /* 312 * Sequential heuristic - detect sequential operation 313 */ 314 static __inline 315 int 316 sequential_heuristic(struct uio *uio, struct file *fp) 317 { 318 319 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 320 uio->uio_offset == fp->f_nextoff) { 321 /* 322 * XXX we assume that the filesystem block size is 323 * the default. Not true, but still gives us a pretty 324 * good indicator of how sequential the read operations 325 * are. 326 */ 327 fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 328 if (fp->f_seqcount > IO_SEQMAX) 329 fp->f_seqcount = IO_SEQMAX; 330 return(fp->f_seqcount << IO_SEQSHIFT); 331 } 332 333 /* 334 * Not sequential, quick draw-down of seqcount 335 */ 336 if (fp->f_seqcount > 1) 337 fp->f_seqcount = 1; 338 else 339 fp->f_seqcount = 0; 340 return(0); 341 } 342 343 /* 344 * Package up an I/O request on a vnode into a uio and do it. 345 */ 346 int 347 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred, 348 aresid, td) 349 enum uio_rw rw; 350 struct vnode *vp; 351 caddr_t base; 352 int len; 353 off_t offset; 354 enum uio_seg segflg; 355 int ioflg; 356 struct ucred *active_cred; 357 struct ucred *file_cred; 358 int *aresid; 359 struct thread *td; 360 { 361 struct uio auio; 362 struct iovec aiov; 363 struct mount *mp; 364 struct ucred *cred; 365 int error; 366 367 VFS_ASSERT_GIANT(vp->v_mount); 368 369 if ((ioflg & IO_NODELOCKED) == 0) { 370 mp = NULL; 371 if (rw == UIO_WRITE) { 372 if (vp->v_type != VCHR && 373 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 374 != 0) 375 return (error); 376 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 377 } else { 378 /* 379 * XXX This should be LK_SHARED but I don't trust VFS 380 * enough to leave it like that until it has been 381 * reviewed further. 382 */ 383 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 384 } 385 386 } 387 auio.uio_iov = &aiov; 388 auio.uio_iovcnt = 1; 389 aiov.iov_base = base; 390 aiov.iov_len = len; 391 auio.uio_resid = len; 392 auio.uio_offset = offset; 393 auio.uio_segflg = segflg; 394 auio.uio_rw = rw; 395 auio.uio_td = td; 396 error = 0; 397 #ifdef MAC 398 if ((ioflg & IO_NOMACCHECK) == 0) { 399 if (rw == UIO_READ) 400 error = mac_check_vnode_read(active_cred, file_cred, 401 vp); 402 else 403 error = mac_check_vnode_write(active_cred, file_cred, 404 vp); 405 } 406 #endif 407 if (error == 0) { 408 if (file_cred) 409 cred = file_cred; 410 else 411 cred = active_cred; 412 if (rw == UIO_READ) 413 error = VOP_READ(vp, &auio, ioflg, cred); 414 else 415 error = VOP_WRITE(vp, &auio, ioflg, cred); 416 } 417 if (aresid) 418 *aresid = auio.uio_resid; 419 else 420 if (auio.uio_resid && error == 0) 421 error = EIO; 422 if ((ioflg & IO_NODELOCKED) == 0) { 423 if (rw == UIO_WRITE) 424 vn_finished_write(mp); 425 VOP_UNLOCK(vp, 0, td); 426 } 427 return (error); 428 } 429 430 /* 431 * Package up an I/O request on a vnode into a uio and do it. The I/O 432 * request is split up into smaller chunks and we try to avoid saturating 433 * the buffer cache while potentially holding a vnode locked, so we 434 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield() 435 * to give other processes a chance to lock the vnode (either other processes 436 * core'ing the same binary, or unrelated processes scanning the directory). 437 */ 438 int 439 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 440 file_cred, aresid, td) 441 enum uio_rw rw; 442 struct vnode *vp; 443 caddr_t base; 444 size_t len; 445 off_t offset; 446 enum uio_seg segflg; 447 int ioflg; 448 struct ucred *active_cred; 449 struct ucred *file_cred; 450 size_t *aresid; 451 struct thread *td; 452 { 453 int error = 0; 454 int iaresid; 455 456 VFS_ASSERT_GIANT(vp->v_mount); 457 458 do { 459 int chunk; 460 461 /* 462 * Force `offset' to a multiple of MAXBSIZE except possibly 463 * for the first chunk, so that filesystems only need to 464 * write full blocks except possibly for the first and last 465 * chunks. 466 */ 467 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 468 469 if (chunk > len) 470 chunk = len; 471 if (rw != UIO_READ && vp->v_type == VREG) 472 bwillwrite(); 473 iaresid = 0; 474 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 475 ioflg, active_cred, file_cred, &iaresid, td); 476 len -= chunk; /* aresid calc already includes length */ 477 if (error) 478 break; 479 offset += chunk; 480 base += chunk; 481 uio_yield(); 482 } while (len); 483 if (aresid) 484 *aresid = len + iaresid; 485 return (error); 486 } 487 488 /* 489 * File table vnode read routine. 490 */ 491 static int 492 vn_read(fp, uio, active_cred, flags, td) 493 struct file *fp; 494 struct uio *uio; 495 struct ucred *active_cred; 496 struct thread *td; 497 int flags; 498 { 499 struct vnode *vp; 500 int error, ioflag; 501 int vfslocked; 502 503 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 504 uio->uio_td, td)); 505 vp = fp->f_vnode; 506 ioflag = 0; 507 if (fp->f_flag & FNONBLOCK) 508 ioflag |= IO_NDELAY; 509 if (fp->f_flag & O_DIRECT) 510 ioflag |= IO_DIRECT; 511 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 512 VOP_LEASE(vp, td, fp->f_cred, LEASE_READ); 513 /* 514 * According to McKusick the vn lock is protecting f_offset here. 515 * Once this field has it's own lock we can acquire this shared. 516 */ 517 if ((flags & FOF_OFFSET) == 0) { 518 vn_lock(vp, LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td); 519 uio->uio_offset = fp->f_offset; 520 } else 521 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); 522 523 ioflag |= sequential_heuristic(uio, fp); 524 525 #ifdef MAC 526 error = mac_check_vnode_read(active_cred, fp->f_cred, vp); 527 if (error == 0) 528 #endif 529 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 530 if ((flags & FOF_OFFSET) == 0) 531 fp->f_offset = uio->uio_offset; 532 fp->f_nextoff = uio->uio_offset; 533 VOP_UNLOCK(vp, 0, td); 534 VFS_UNLOCK_GIANT(vfslocked); 535 return (error); 536 } 537 538 /* 539 * File table vnode write routine. 540 */ 541 static int 542 vn_write(fp, uio, active_cred, flags, td) 543 struct file *fp; 544 struct uio *uio; 545 struct ucred *active_cred; 546 struct thread *td; 547 int flags; 548 { 549 struct vnode *vp; 550 struct mount *mp; 551 int error, ioflag; 552 int vfslocked; 553 554 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 555 uio->uio_td, td)); 556 vp = fp->f_vnode; 557 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 558 if (vp->v_type == VREG) 559 bwillwrite(); 560 ioflag = IO_UNIT; 561 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 562 ioflag |= IO_APPEND; 563 if (fp->f_flag & FNONBLOCK) 564 ioflag |= IO_NDELAY; 565 if (fp->f_flag & O_DIRECT) 566 ioflag |= IO_DIRECT; 567 if ((fp->f_flag & O_FSYNC) || 568 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 569 ioflag |= IO_SYNC; 570 mp = NULL; 571 if (vp->v_type != VCHR && 572 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 573 goto unlock; 574 VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE); 575 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 576 if ((flags & FOF_OFFSET) == 0) 577 uio->uio_offset = fp->f_offset; 578 ioflag |= sequential_heuristic(uio, fp); 579 #ifdef MAC 580 error = mac_check_vnode_write(active_cred, fp->f_cred, vp); 581 if (error == 0) 582 #endif 583 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 584 if ((flags & FOF_OFFSET) == 0) 585 fp->f_offset = uio->uio_offset; 586 fp->f_nextoff = uio->uio_offset; 587 VOP_UNLOCK(vp, 0, td); 588 vn_finished_write(mp); 589 unlock: 590 VFS_UNLOCK_GIANT(vfslocked); 591 return (error); 592 } 593 594 /* 595 * File table vnode stat routine. 596 */ 597 static int 598 vn_statfile(fp, sb, active_cred, td) 599 struct file *fp; 600 struct stat *sb; 601 struct ucred *active_cred; 602 struct thread *td; 603 { 604 struct vnode *vp = fp->f_vnode; 605 int vfslocked; 606 int error; 607 608 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 609 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 610 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 611 VOP_UNLOCK(vp, 0, td); 612 VFS_UNLOCK_GIANT(vfslocked); 613 614 return (error); 615 } 616 617 /* 618 * Stat a vnode; implementation for the stat syscall 619 */ 620 int 621 vn_stat(vp, sb, active_cred, file_cred, td) 622 struct vnode *vp; 623 register struct stat *sb; 624 struct ucred *active_cred; 625 struct ucred *file_cred; 626 struct thread *td; 627 { 628 struct vattr vattr; 629 register struct vattr *vap; 630 int error; 631 u_short mode; 632 633 #ifdef MAC 634 error = mac_check_vnode_stat(active_cred, file_cred, vp); 635 if (error) 636 return (error); 637 #endif 638 639 vap = &vattr; 640 error = VOP_GETATTR(vp, vap, active_cred, td); 641 if (error) 642 return (error); 643 644 /* 645 * Zero the spare stat fields 646 */ 647 bzero(sb, sizeof *sb); 648 649 /* 650 * Copy from vattr table 651 */ 652 if (vap->va_fsid != VNOVAL) 653 sb->st_dev = vap->va_fsid; 654 else 655 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 656 sb->st_ino = vap->va_fileid; 657 mode = vap->va_mode; 658 switch (vap->va_type) { 659 case VREG: 660 mode |= S_IFREG; 661 break; 662 case VDIR: 663 mode |= S_IFDIR; 664 break; 665 case VBLK: 666 mode |= S_IFBLK; 667 break; 668 case VCHR: 669 mode |= S_IFCHR; 670 break; 671 case VLNK: 672 mode |= S_IFLNK; 673 /* This is a cosmetic change, symlinks do not have a mode. */ 674 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 675 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 676 else 677 sb->st_mode |= ACCESSPERMS; /* 0777 */ 678 break; 679 case VSOCK: 680 mode |= S_IFSOCK; 681 break; 682 case VFIFO: 683 mode |= S_IFIFO; 684 break; 685 default: 686 return (EBADF); 687 }; 688 sb->st_mode = mode; 689 sb->st_nlink = vap->va_nlink; 690 sb->st_uid = vap->va_uid; 691 sb->st_gid = vap->va_gid; 692 sb->st_rdev = vap->va_rdev; 693 if (vap->va_size > OFF_MAX) 694 return (EOVERFLOW); 695 sb->st_size = vap->va_size; 696 sb->st_atimespec = vap->va_atime; 697 sb->st_mtimespec = vap->va_mtime; 698 sb->st_ctimespec = vap->va_ctime; 699 sb->st_birthtimespec = vap->va_birthtime; 700 701 /* 702 * According to www.opengroup.org, the meaning of st_blksize is 703 * "a filesystem-specific preferred I/O block size for this 704 * object. In some filesystem types, this may vary from file 705 * to file" 706 * Default to PAGE_SIZE after much discussion. 707 * XXX: min(PAGE_SIZE, vp->v_bufobj.bo_bsize) may be more correct. 708 */ 709 710 sb->st_blksize = PAGE_SIZE; 711 712 sb->st_flags = vap->va_flags; 713 if (suser(td)) 714 sb->st_gen = 0; 715 else 716 sb->st_gen = vap->va_gen; 717 718 #if (S_BLKSIZE == 512) 719 /* Optimize this case */ 720 sb->st_blocks = vap->va_bytes >> 9; 721 #else 722 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 723 #endif 724 return (0); 725 } 726 727 /* 728 * File table vnode ioctl routine. 729 */ 730 static int 731 vn_ioctl(fp, com, data, active_cred, td) 732 struct file *fp; 733 u_long com; 734 void *data; 735 struct ucred *active_cred; 736 struct thread *td; 737 { 738 struct vnode *vp = fp->f_vnode; 739 struct vattr vattr; 740 int vfslocked; 741 int error; 742 743 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 744 error = ENOTTY; 745 switch (vp->v_type) { 746 case VREG: 747 case VDIR: 748 if (com == FIONREAD) { 749 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 750 error = VOP_GETATTR(vp, &vattr, active_cred, td); 751 VOP_UNLOCK(vp, 0, td); 752 if (!error) 753 *(int *)data = vattr.va_size - fp->f_offset; 754 } 755 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 756 error = 0; 757 else 758 error = VOP_IOCTL(vp, com, data, fp->f_flag, 759 active_cred, td); 760 break; 761 762 default: 763 break; 764 } 765 VFS_UNLOCK_GIANT(vfslocked); 766 return (error); 767 } 768 769 /* 770 * File table vnode poll routine. 771 */ 772 static int 773 vn_poll(fp, events, active_cred, td) 774 struct file *fp; 775 int events; 776 struct ucred *active_cred; 777 struct thread *td; 778 { 779 struct vnode *vp; 780 int error; 781 782 mtx_lock(&Giant); 783 784 vp = fp->f_vnode; 785 #ifdef MAC 786 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 787 error = mac_check_vnode_poll(active_cred, fp->f_cred, vp); 788 VOP_UNLOCK(vp, 0, td); 789 if (!error) 790 #endif 791 792 error = VOP_POLL(vp, events, fp->f_cred, td); 793 mtx_unlock(&Giant); 794 return (error); 795 } 796 797 /* 798 * Check that the vnode is still valid, and if so 799 * acquire requested lock. 800 */ 801 int 802 #ifndef DEBUG_LOCKS 803 vn_lock(vp, flags, td) 804 #else 805 debug_vn_lock(vp, flags, td, filename, line) 806 #endif 807 struct vnode *vp; 808 int flags; 809 struct thread *td; 810 #ifdef DEBUG_LOCKS 811 const char *filename; 812 int line; 813 #endif 814 { 815 int error; 816 817 do { 818 if ((flags & LK_INTERLOCK) == 0) 819 VI_LOCK(vp); 820 if ((vp->v_iflag & VI_XLOCK) && vp->v_vxthread != curthread) { 821 if ((flags & LK_NOWAIT) != 0) { 822 VI_UNLOCK(vp); 823 return (ENOENT); 824 } 825 vp->v_iflag |= VI_XWANT; 826 msleep(vp, VI_MTX(vp), PINOD, "vn_lock", 0); 827 if ((flags & LK_RETRY) == 0) { 828 VI_UNLOCK(vp); 829 return (ENOENT); 830 } 831 } 832 #ifdef DEBUG_LOCKS 833 vp->filename = filename; 834 vp->line = line; 835 #endif 836 /* 837 * lockmgr drops interlock before it will return for 838 * any reason. So force the code above to relock it. 839 */ 840 error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, td); 841 flags &= ~LK_INTERLOCK; 842 } while (flags & LK_RETRY && error != 0); 843 return (error); 844 } 845 846 /* 847 * File table vnode close routine. 848 */ 849 static int 850 vn_closefile(fp, td) 851 struct file *fp; 852 struct thread *td; 853 { 854 struct vnode *vp; 855 struct flock lf; 856 int vfslocked; 857 int error; 858 859 vp = fp->f_vnode; 860 861 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 862 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) { 863 lf.l_whence = SEEK_SET; 864 lf.l_start = 0; 865 lf.l_len = 0; 866 lf.l_type = F_UNLCK; 867 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK); 868 } 869 870 fp->f_ops = &badfileops; 871 872 error = vn_close(vp, fp->f_flag, fp->f_cred, td); 873 VFS_UNLOCK_GIANT(vfslocked); 874 return (error); 875 } 876 877 /* 878 * Preparing to start a filesystem write operation. If the operation is 879 * permitted, then we bump the count of operations in progress and 880 * proceed. If a suspend request is in progress, we wait until the 881 * suspension is over, and then proceed. 882 */ 883 int 884 vn_start_write(vp, mpp, flags) 885 struct vnode *vp; 886 struct mount **mpp; 887 int flags; 888 { 889 struct mount *mp; 890 int error; 891 892 error = 0; 893 /* 894 * If a vnode is provided, get and return the mount point that 895 * to which it will write. 896 */ 897 if (vp != NULL) { 898 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 899 *mpp = NULL; 900 if (error != EOPNOTSUPP) 901 return (error); 902 return (0); 903 } 904 } 905 if ((mp = *mpp) == NULL) 906 return (0); 907 MNT_ILOCK(mp); 908 /* 909 * Check on status of suspension. 910 */ 911 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 912 if (flags & V_NOWAIT) { 913 error = EWOULDBLOCK; 914 goto unlock; 915 } 916 error = msleep(&mp->mnt_flag, MNT_MTX(mp), 917 (PUSER - 1) | (flags & PCATCH), "suspfs", 0); 918 if (error) 919 goto unlock; 920 } 921 if (flags & V_XSLEEP) 922 goto unlock; 923 mp->mnt_writeopcount++; 924 unlock: 925 MNT_IUNLOCK(mp); 926 return (error); 927 } 928 929 /* 930 * Secondary suspension. Used by operations such as vop_inactive 931 * routines that are needed by the higher level functions. These 932 * are allowed to proceed until all the higher level functions have 933 * completed (indicated by mnt_writeopcount dropping to zero). At that 934 * time, these operations are halted until the suspension is over. 935 */ 936 int 937 vn_write_suspend_wait(vp, mp, flags) 938 struct vnode *vp; 939 struct mount *mp; 940 int flags; 941 { 942 int error; 943 944 if (vp != NULL) { 945 if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) { 946 if (error != EOPNOTSUPP) 947 return (error); 948 return (0); 949 } 950 } 951 /* 952 * If we are not suspended or have not yet reached suspended 953 * mode, then let the operation proceed. 954 */ 955 if (mp == NULL) 956 return (0); 957 MNT_ILOCK(mp); 958 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) == 0) { 959 MNT_IUNLOCK(mp); 960 return (0); 961 } 962 if (flags & V_NOWAIT) { 963 MNT_IUNLOCK(mp); 964 return (EWOULDBLOCK); 965 } 966 /* 967 * Wait for the suspension to finish. 968 */ 969 return (msleep(&mp->mnt_flag, MNT_MTX(mp), 970 (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0)); 971 } 972 973 /* 974 * Filesystem write operation has completed. If we are suspending and this 975 * operation is the last one, notify the suspender that the suspension is 976 * now in effect. 977 */ 978 void 979 vn_finished_write(mp) 980 struct mount *mp; 981 { 982 if (mp == NULL) 983 return; 984 MNT_ILOCK(mp); 985 mp->mnt_writeopcount--; 986 if (mp->mnt_writeopcount < 0) 987 panic("vn_finished_write: neg cnt"); 988 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 989 mp->mnt_writeopcount <= 0) 990 wakeup(&mp->mnt_writeopcount); 991 MNT_IUNLOCK(mp); 992 } 993 994 /* 995 * Request a filesystem to suspend write operations. 996 */ 997 int 998 vfs_write_suspend(mp) 999 struct mount *mp; 1000 { 1001 struct thread *td = curthread; 1002 int error; 1003 1004 error = 0; 1005 MNT_ILOCK(mp); 1006 if (mp->mnt_kern_flag & MNTK_SUSPEND) 1007 goto unlock; 1008 mp->mnt_kern_flag |= MNTK_SUSPEND; 1009 if (mp->mnt_writeopcount > 0) 1010 (void) msleep(&mp->mnt_writeopcount, 1011 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); 1012 else 1013 MNT_IUNLOCK(mp); 1014 if ((error = VFS_SYNC(mp, MNT_WAIT, td)) != 0) { 1015 vfs_write_resume(mp); 1016 return (error); 1017 } 1018 MNT_ILOCK(mp); 1019 mp->mnt_kern_flag |= MNTK_SUSPENDED; 1020 unlock: 1021 MNT_IUNLOCK(mp); 1022 return (error); 1023 } 1024 1025 /* 1026 * Request a filesystem to resume write operations. 1027 */ 1028 void 1029 vfs_write_resume(mp) 1030 struct mount *mp; 1031 { 1032 1033 MNT_ILOCK(mp); 1034 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1035 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED); 1036 wakeup(&mp->mnt_writeopcount); 1037 wakeup(&mp->mnt_flag); 1038 } 1039 MNT_IUNLOCK(mp); 1040 } 1041 1042 /* 1043 * Implement kqueues for files by translating it to vnode operation. 1044 */ 1045 static int 1046 vn_kqfilter(struct file *fp, struct knote *kn) 1047 { 1048 int error; 1049 1050 mtx_lock(&Giant); 1051 error = VOP_KQFILTER(fp->f_vnode, kn); 1052 mtx_unlock(&Giant); 1053 1054 return error; 1055 } 1056 1057 /* 1058 * Simplified in-kernel wrapper calls for extended attribute access. 1059 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1060 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1061 */ 1062 int 1063 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1064 const char *attrname, int *buflen, char *buf, struct thread *td) 1065 { 1066 struct uio auio; 1067 struct iovec iov; 1068 int error; 1069 1070 iov.iov_len = *buflen; 1071 iov.iov_base = buf; 1072 1073 auio.uio_iov = &iov; 1074 auio.uio_iovcnt = 1; 1075 auio.uio_rw = UIO_READ; 1076 auio.uio_segflg = UIO_SYSSPACE; 1077 auio.uio_td = td; 1078 auio.uio_offset = 0; 1079 auio.uio_resid = *buflen; 1080 1081 if ((ioflg & IO_NODELOCKED) == 0) 1082 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1083 1084 /* authorize attribute retrieval as kernel */ 1085 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1086 td); 1087 1088 if ((ioflg & IO_NODELOCKED) == 0) 1089 VOP_UNLOCK(vp, 0, td); 1090 1091 if (error == 0) { 1092 *buflen = *buflen - auio.uio_resid; 1093 } 1094 1095 return (error); 1096 } 1097 1098 /* 1099 * XXX failure mode if partially written? 1100 */ 1101 int 1102 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1103 const char *attrname, int buflen, char *buf, struct thread *td) 1104 { 1105 struct uio auio; 1106 struct iovec iov; 1107 struct mount *mp; 1108 int error; 1109 1110 iov.iov_len = buflen; 1111 iov.iov_base = buf; 1112 1113 auio.uio_iov = &iov; 1114 auio.uio_iovcnt = 1; 1115 auio.uio_rw = UIO_WRITE; 1116 auio.uio_segflg = UIO_SYSSPACE; 1117 auio.uio_td = td; 1118 auio.uio_offset = 0; 1119 auio.uio_resid = buflen; 1120 1121 if ((ioflg & IO_NODELOCKED) == 0) { 1122 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1123 return (error); 1124 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1125 } 1126 1127 /* authorize attribute setting as kernel */ 1128 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1129 1130 if ((ioflg & IO_NODELOCKED) == 0) { 1131 vn_finished_write(mp); 1132 VOP_UNLOCK(vp, 0, td); 1133 } 1134 1135 return (error); 1136 } 1137 1138 int 1139 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1140 const char *attrname, struct thread *td) 1141 { 1142 struct mount *mp; 1143 int error; 1144 1145 if ((ioflg & IO_NODELOCKED) == 0) { 1146 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1147 return (error); 1148 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1149 } 1150 1151 /* authorize attribute removal as kernel */ 1152 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 1153 if (error == EOPNOTSUPP) 1154 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 1155 NULL, td); 1156 1157 if ((ioflg & IO_NODELOCKED) == 0) { 1158 vn_finished_write(mp); 1159 VOP_UNLOCK(vp, 0, td); 1160 } 1161 1162 return (error); 1163 } 1164