1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_mac.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/stat.h> 49 #include <sys/proc.h> 50 #include <sys/lock.h> 51 #include <sys/mac.h> 52 #include <sys/mount.h> 53 #include <sys/mutex.h> 54 #include <sys/namei.h> 55 #include <sys/vnode.h> 56 #include <sys/bio.h> 57 #include <sys/buf.h> 58 #include <sys/filio.h> 59 #include <sys/sx.h> 60 #include <sys/ttycom.h> 61 #include <sys/conf.h> 62 #include <sys/syslog.h> 63 64 #include <machine/limits.h> 65 66 static int vn_closefile(struct file *fp, struct thread *td); 67 static int vn_ioctl(struct file *fp, u_long com, void *data, 68 struct ucred *active_cred, struct thread *td); 69 static int vn_read(struct file *fp, struct uio *uio, 70 struct ucred *active_cred, int flags, struct thread *td); 71 static int vn_poll(struct file *fp, int events, struct ucred *active_cred, 72 struct thread *td); 73 static int vn_kqfilter(struct file *fp, struct knote *kn); 74 static int vn_statfile(struct file *fp, struct stat *sb, 75 struct ucred *active_cred, struct thread *td); 76 static int vn_write(struct file *fp, struct uio *uio, 77 struct ucred *active_cred, int flags, struct thread *td); 78 79 struct fileops vnops = { 80 vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter, 81 vn_statfile, vn_closefile 82 }; 83 84 int 85 vn_open(ndp, flagp, cmode) 86 register struct nameidata *ndp; 87 int *flagp, cmode; 88 { 89 struct thread *td = ndp->ni_cnd.cn_thread; 90 91 return (vn_open_cred(ndp, flagp, cmode, td->td_ucred)); 92 } 93 94 /* 95 * Common code for vnode open operations. 96 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 97 * 98 * Note that this does NOT free nameidata for the successful case, 99 * due to the NDINIT being done elsewhere. 100 */ 101 int 102 vn_open_cred(ndp, flagp, cmode, cred) 103 register struct nameidata *ndp; 104 int *flagp, cmode; 105 struct ucred *cred; 106 { 107 struct vnode *vp; 108 struct mount *mp; 109 struct thread *td = ndp->ni_cnd.cn_thread; 110 struct vattr vat; 111 struct vattr *vap = &vat; 112 int mode, fmode, error; 113 #ifdef LOOKUP_SHARED 114 int exclusive; /* The current intended lock state */ 115 116 exclusive = 0; 117 #endif 118 119 restart: 120 fmode = *flagp; 121 if (fmode & O_CREAT) { 122 ndp->ni_cnd.cn_nameiop = CREATE; 123 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; 124 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 125 ndp->ni_cnd.cn_flags |= FOLLOW; 126 bwillwrite(); 127 if ((error = namei(ndp)) != 0) 128 return (error); 129 if (ndp->ni_vp == NULL) { 130 VATTR_NULL(vap); 131 vap->va_type = VREG; 132 vap->va_mode = cmode; 133 if (fmode & O_EXCL) 134 vap->va_vaflags |= VA_EXCLUSIVE; 135 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 136 NDFREE(ndp, NDF_ONLY_PNBUF); 137 vput(ndp->ni_dvp); 138 if ((error = vn_start_write(NULL, &mp, 139 V_XSLEEP | PCATCH)) != 0) 140 return (error); 141 goto restart; 142 } 143 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE); 144 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 145 &ndp->ni_cnd, vap); 146 vput(ndp->ni_dvp); 147 vn_finished_write(mp); 148 if (error) { 149 NDFREE(ndp, NDF_ONLY_PNBUF); 150 return (error); 151 } 152 ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create"); 153 ASSERT_VOP_LOCKED(ndp->ni_vp, "create"); 154 fmode &= ~O_TRUNC; 155 vp = ndp->ni_vp; 156 #ifdef LOOKUP_SHARED 157 exclusive = 1; 158 #endif 159 } else { 160 if (ndp->ni_dvp == ndp->ni_vp) 161 vrele(ndp->ni_dvp); 162 else 163 vput(ndp->ni_dvp); 164 ndp->ni_dvp = NULL; 165 vp = ndp->ni_vp; 166 if (fmode & O_EXCL) { 167 error = EEXIST; 168 goto bad; 169 } 170 fmode &= ~O_CREAT; 171 } 172 } else { 173 ndp->ni_cnd.cn_nameiop = LOOKUP; 174 #ifdef LOOKUP_SHARED 175 ndp->ni_cnd.cn_flags = 176 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | 177 LOCKSHARED | LOCKLEAF; 178 #else 179 ndp->ni_cnd.cn_flags = 180 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 181 #endif 182 if ((error = namei(ndp)) != 0) 183 return (error); 184 vp = ndp->ni_vp; 185 } 186 if (vp->v_type == VLNK) { 187 error = EMLINK; 188 goto bad; 189 } 190 if (vp->v_type == VSOCK) { 191 error = EOPNOTSUPP; 192 goto bad; 193 } 194 mode = 0; 195 if (fmode & (FWRITE | O_TRUNC)) { 196 if (vp->v_type == VDIR) { 197 error = EISDIR; 198 goto bad; 199 } 200 mode |= VWRITE; 201 } 202 if (fmode & FREAD) 203 mode |= VREAD; 204 if (fmode & O_APPEND) 205 mode |= VAPPEND; 206 #ifdef MAC 207 error = mac_check_vnode_open(cred, vp, mode); 208 if (error) 209 goto bad; 210 #endif 211 if ((fmode & O_CREAT) == 0) { 212 if (mode & VWRITE) { 213 error = vn_writechk(vp); 214 if (error) 215 goto bad; 216 } 217 if (mode) { 218 error = VOP_ACCESS(vp, mode, cred, td); 219 if (error) 220 goto bad; 221 } 222 } 223 if ((error = VOP_GETATTR(vp, vap, cred, td)) == 0) { 224 vp->v_cachedfs = vap->va_fsid; 225 vp->v_cachedid = vap->va_fileid; 226 } 227 if ((error = VOP_OPEN(vp, fmode, cred, td)) != 0) 228 goto bad; 229 /* 230 * Make sure that a VM object is created for VMIO support. 231 */ 232 if (vn_canvmio(vp) == TRUE) { 233 #ifdef LOOKUP_SHARED 234 int flock; 235 236 if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0) 237 VOP_LOCK(vp, LK_UPGRADE, td); 238 /* 239 * In cases where the object is marked as dead object_create 240 * will unlock and relock exclusive. It is safe to call in 241 * here with a shared lock because we only examine fields that 242 * the shared lock guarantees will be stable. In the UPGRADE 243 * case it is not likely that anyone has used this vnode yet 244 * so there will be no contention. The logic after this call 245 * restores the requested locking state. 246 */ 247 #endif 248 if ((error = vfs_object_create(vp, td, cred)) != 0) { 249 VOP_UNLOCK(vp, 0, td); 250 VOP_CLOSE(vp, fmode, cred, td); 251 NDFREE(ndp, NDF_ONLY_PNBUF); 252 vrele(vp); 253 *flagp = fmode; 254 return (error); 255 } 256 #ifdef LOOKUP_SHARED 257 flock = VOP_ISLOCKED(vp, td); 258 if (!exclusive && flock == LK_EXCLUSIVE) 259 VOP_LOCK(vp, LK_DOWNGRADE, td); 260 #endif 261 } 262 263 if (fmode & FWRITE) 264 vp->v_writecount++; 265 *flagp = fmode; 266 return (0); 267 bad: 268 NDFREE(ndp, NDF_ONLY_PNBUF); 269 vput(vp); 270 *flagp = fmode; 271 return (error); 272 } 273 274 /* 275 * Check for write permissions on the specified vnode. 276 * Prototype text segments cannot be written. 277 */ 278 int 279 vn_writechk(vp) 280 register struct vnode *vp; 281 { 282 283 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 284 /* 285 * If there's shared text associated with 286 * the vnode, try to free it up once. If 287 * we fail, we can't allow writing. 288 */ 289 if (vp->v_vflag & VV_TEXT) 290 return (ETXTBSY); 291 292 return (0); 293 } 294 295 /* 296 * Vnode close call 297 */ 298 int 299 vn_close(vp, flags, file_cred, td) 300 register struct vnode *vp; 301 int flags; 302 struct ucred *file_cred; 303 struct thread *td; 304 { 305 int error; 306 307 if (flags & FWRITE) 308 vp->v_writecount--; 309 error = VOP_CLOSE(vp, flags, file_cred, td); 310 /* 311 * XXX - In certain instances VOP_CLOSE has to do the vrele 312 * itself. If the vrele has been done, it will return EAGAIN 313 * to indicate that the vrele should not be done again. When 314 * this happens, we just return success. The correct thing to 315 * do would be to have all VOP_CLOSE instances do the vrele. 316 */ 317 if (error == EAGAIN) 318 return (0); 319 vrele(vp); 320 return (error); 321 } 322 323 /* 324 * Sequential heuristic - detect sequential operation 325 */ 326 static __inline 327 int 328 sequential_heuristic(struct uio *uio, struct file *fp) 329 { 330 331 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 332 uio->uio_offset == fp->f_nextoff) { 333 /* 334 * XXX we assume that the filesystem block size is 335 * the default. Not true, but still gives us a pretty 336 * good indicator of how sequential the read operations 337 * are. 338 */ 339 fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 340 if (fp->f_seqcount >= 127) 341 fp->f_seqcount = 127; 342 return(fp->f_seqcount << 16); 343 } 344 345 /* 346 * Not sequential, quick draw-down of seqcount 347 */ 348 if (fp->f_seqcount > 1) 349 fp->f_seqcount = 1; 350 else 351 fp->f_seqcount = 0; 352 return(0); 353 } 354 355 /* 356 * Package up an I/O request on a vnode into a uio and do it. 357 */ 358 int 359 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred, 360 aresid, td) 361 enum uio_rw rw; 362 struct vnode *vp; 363 caddr_t base; 364 int len; 365 off_t offset; 366 enum uio_seg segflg; 367 int ioflg; 368 struct ucred *active_cred; 369 struct ucred *file_cred; 370 int *aresid; 371 struct thread *td; 372 { 373 struct uio auio; 374 struct iovec aiov; 375 struct mount *mp; 376 struct ucred *cred; 377 int error; 378 379 if ((ioflg & IO_NODELOCKED) == 0) { 380 mp = NULL; 381 if (rw == UIO_WRITE) { 382 if (vp->v_type != VCHR && 383 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 384 != 0) 385 return (error); 386 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 387 } else { 388 vn_lock(vp, LK_SHARED | LK_RETRY, td); 389 } 390 391 } 392 auio.uio_iov = &aiov; 393 auio.uio_iovcnt = 1; 394 aiov.iov_base = base; 395 aiov.iov_len = len; 396 auio.uio_resid = len; 397 auio.uio_offset = offset; 398 auio.uio_segflg = segflg; 399 auio.uio_rw = rw; 400 auio.uio_td = td; 401 error = 0; 402 #ifdef MAC 403 if ((ioflg & IO_NOMACCHECK) == 0) { 404 if (rw == UIO_READ) 405 error = mac_check_vnode_read(active_cred, file_cred, 406 vp); 407 else 408 error = mac_check_vnode_write(active_cred, file_cred, 409 vp); 410 } 411 #endif 412 if (error == 0) { 413 if (file_cred) 414 cred = file_cred; 415 else 416 cred = active_cred; 417 if (rw == UIO_READ) 418 error = VOP_READ(vp, &auio, ioflg, cred); 419 else 420 error = VOP_WRITE(vp, &auio, ioflg, cred); 421 } 422 if (aresid) 423 *aresid = auio.uio_resid; 424 else 425 if (auio.uio_resid && error == 0) 426 error = EIO; 427 if ((ioflg & IO_NODELOCKED) == 0) { 428 if (rw == UIO_WRITE) 429 vn_finished_write(mp); 430 VOP_UNLOCK(vp, 0, td); 431 } 432 return (error); 433 } 434 435 /* 436 * Package up an I/O request on a vnode into a uio and do it. The I/O 437 * request is split up into smaller chunks and we try to avoid saturating 438 * the buffer cache while potentially holding a vnode locked, so we 439 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield() 440 * to give other processes a chance to lock the vnode (either other processes 441 * core'ing the same binary, or unrelated processes scanning the directory). 442 */ 443 int 444 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 445 file_cred, aresid, td) 446 enum uio_rw rw; 447 struct vnode *vp; 448 caddr_t base; 449 int len; 450 off_t offset; 451 enum uio_seg segflg; 452 int ioflg; 453 struct ucred *active_cred; 454 struct ucred *file_cred; 455 int *aresid; 456 struct thread *td; 457 { 458 int error = 0; 459 460 do { 461 int chunk = (len > MAXBSIZE) ? MAXBSIZE : len; 462 463 if (rw != UIO_READ && vp->v_type == VREG) 464 bwillwrite(); 465 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 466 ioflg, active_cred, file_cred, aresid, td); 467 len -= chunk; /* aresid calc already includes length */ 468 if (error) 469 break; 470 offset += chunk; 471 base += chunk; 472 uio_yield(); 473 } while (len); 474 if (aresid) 475 *aresid += len; 476 return (error); 477 } 478 479 /* 480 * File table vnode read routine. 481 */ 482 static int 483 vn_read(fp, uio, active_cred, flags, td) 484 struct file *fp; 485 struct uio *uio; 486 struct ucred *active_cred; 487 struct thread *td; 488 int flags; 489 { 490 struct vnode *vp; 491 int error, ioflag; 492 493 mtx_lock(&Giant); 494 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 495 uio->uio_td, td)); 496 vp = (struct vnode *)fp->f_data; 497 ioflag = 0; 498 if (fp->f_flag & FNONBLOCK) 499 ioflag |= IO_NDELAY; 500 if (fp->f_flag & O_DIRECT) 501 ioflag |= IO_DIRECT; 502 VOP_LEASE(vp, td, fp->f_cred, LEASE_READ); 503 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); 504 if ((flags & FOF_OFFSET) == 0) 505 uio->uio_offset = fp->f_offset; 506 507 ioflag |= sequential_heuristic(uio, fp); 508 509 #ifdef MAC 510 error = mac_check_vnode_read(active_cred, fp->f_cred, vp); 511 if (error == 0) 512 #endif 513 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 514 if ((flags & FOF_OFFSET) == 0) 515 fp->f_offset = uio->uio_offset; 516 fp->f_nextoff = uio->uio_offset; 517 VOP_UNLOCK(vp, 0, td); 518 mtx_unlock(&Giant); 519 return (error); 520 } 521 522 /* 523 * File table vnode write routine. 524 */ 525 static int 526 vn_write(fp, uio, active_cred, flags, td) 527 struct file *fp; 528 struct uio *uio; 529 struct ucred *active_cred; 530 struct thread *td; 531 int flags; 532 { 533 struct vnode *vp; 534 struct mount *mp; 535 int error, ioflag; 536 537 mtx_lock(&Giant); 538 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 539 uio->uio_td, td)); 540 vp = (struct vnode *)fp->f_data; 541 if (vp->v_type == VREG) 542 bwillwrite(); 543 ioflag = IO_UNIT; 544 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 545 ioflag |= IO_APPEND; 546 if (fp->f_flag & FNONBLOCK) 547 ioflag |= IO_NDELAY; 548 if (fp->f_flag & O_DIRECT) 549 ioflag |= IO_DIRECT; 550 if ((fp->f_flag & O_FSYNC) || 551 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 552 ioflag |= IO_SYNC; 553 mp = NULL; 554 if (vp->v_type != VCHR && 555 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) { 556 mtx_unlock(&Giant); 557 return (error); 558 } 559 VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE); 560 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 561 if ((flags & FOF_OFFSET) == 0) 562 uio->uio_offset = fp->f_offset; 563 ioflag |= sequential_heuristic(uio, fp); 564 #ifdef MAC 565 error = mac_check_vnode_write(active_cred, fp->f_cred, vp); 566 if (error == 0) 567 #endif 568 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 569 if ((flags & FOF_OFFSET) == 0) 570 fp->f_offset = uio->uio_offset; 571 fp->f_nextoff = uio->uio_offset; 572 VOP_UNLOCK(vp, 0, td); 573 vn_finished_write(mp); 574 mtx_unlock(&Giant); 575 return (error); 576 } 577 578 /* 579 * File table vnode stat routine. 580 */ 581 static int 582 vn_statfile(fp, sb, active_cred, td) 583 struct file *fp; 584 struct stat *sb; 585 struct ucred *active_cred; 586 struct thread *td; 587 { 588 struct vnode *vp = (struct vnode *)fp->f_data; 589 int error; 590 591 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 592 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 593 VOP_UNLOCK(vp, 0, td); 594 595 return (error); 596 } 597 598 /* 599 * Stat a vnode; implementation for the stat syscall 600 */ 601 int 602 vn_stat(vp, sb, active_cred, file_cred, td) 603 struct vnode *vp; 604 register struct stat *sb; 605 struct ucred *active_cred; 606 struct ucred *file_cred; 607 struct thread *td; 608 { 609 struct vattr vattr; 610 register struct vattr *vap; 611 int error; 612 u_short mode; 613 614 #ifdef MAC 615 error = mac_check_vnode_stat(active_cred, file_cred, vp); 616 if (error) 617 return (error); 618 #endif 619 620 vap = &vattr; 621 error = VOP_GETATTR(vp, vap, active_cred, td); 622 if (error) 623 return (error); 624 625 vp->v_cachedfs = vap->va_fsid; 626 vp->v_cachedid = vap->va_fileid; 627 628 /* 629 * Zero the spare stat fields 630 */ 631 bzero(sb, sizeof *sb); 632 633 /* 634 * Copy from vattr table 635 */ 636 if (vap->va_fsid != VNOVAL) 637 sb->st_dev = vap->va_fsid; 638 else 639 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 640 sb->st_ino = vap->va_fileid; 641 mode = vap->va_mode; 642 switch (vap->va_type) { 643 case VREG: 644 mode |= S_IFREG; 645 break; 646 case VDIR: 647 mode |= S_IFDIR; 648 break; 649 case VBLK: 650 mode |= S_IFBLK; 651 break; 652 case VCHR: 653 mode |= S_IFCHR; 654 break; 655 case VLNK: 656 mode |= S_IFLNK; 657 /* This is a cosmetic change, symlinks do not have a mode. */ 658 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 659 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 660 else 661 sb->st_mode |= ACCESSPERMS; /* 0777 */ 662 break; 663 case VSOCK: 664 mode |= S_IFSOCK; 665 break; 666 case VFIFO: 667 mode |= S_IFIFO; 668 break; 669 default: 670 return (EBADF); 671 }; 672 sb->st_mode = mode; 673 sb->st_nlink = vap->va_nlink; 674 sb->st_uid = vap->va_uid; 675 sb->st_gid = vap->va_gid; 676 sb->st_rdev = vap->va_rdev; 677 if (vap->va_size > OFF_MAX) 678 return (EOVERFLOW); 679 sb->st_size = vap->va_size; 680 sb->st_atimespec = vap->va_atime; 681 sb->st_mtimespec = vap->va_mtime; 682 sb->st_ctimespec = vap->va_ctime; 683 sb->st_birthtimespec = vap->va_birthtime; 684 685 /* 686 * According to www.opengroup.org, the meaning of st_blksize is 687 * "a filesystem-specific preferred I/O block size for this 688 * object. In some filesystem types, this may vary from file 689 * to file" 690 * Default to PAGE_SIZE after much discussion. 691 */ 692 693 if (vap->va_type == VREG) { 694 sb->st_blksize = vap->va_blocksize; 695 } else if (vn_isdisk(vp, NULL)) { 696 sb->st_blksize = vp->v_rdev->si_bsize_best; 697 if (sb->st_blksize < vp->v_rdev->si_bsize_phys) 698 sb->st_blksize = vp->v_rdev->si_bsize_phys; 699 if (sb->st_blksize < BLKDEV_IOSIZE) 700 sb->st_blksize = BLKDEV_IOSIZE; 701 } else { 702 sb->st_blksize = PAGE_SIZE; 703 } 704 705 sb->st_flags = vap->va_flags; 706 if (suser(td)) 707 sb->st_gen = 0; 708 else 709 sb->st_gen = vap->va_gen; 710 711 #if (S_BLKSIZE == 512) 712 /* Optimize this case */ 713 sb->st_blocks = vap->va_bytes >> 9; 714 #else 715 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 716 #endif 717 return (0); 718 } 719 720 /* 721 * File table vnode ioctl routine. 722 */ 723 static int 724 vn_ioctl(fp, com, data, active_cred, td) 725 struct file *fp; 726 u_long com; 727 void *data; 728 struct ucred *active_cred; 729 struct thread *td; 730 { 731 register struct vnode *vp = ((struct vnode *)fp->f_data); 732 struct vnode *vpold; 733 struct vattr vattr; 734 int error; 735 736 switch (vp->v_type) { 737 738 case VREG: 739 case VDIR: 740 if (com == FIONREAD) { 741 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 742 error = VOP_GETATTR(vp, &vattr, active_cred, td); 743 VOP_UNLOCK(vp, 0, td); 744 if (error) 745 return (error); 746 *(int *)data = vattr.va_size - fp->f_offset; 747 return (0); 748 } 749 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 750 return (0); /* XXX */ 751 /* fall into ... */ 752 753 default: 754 #if 0 755 return (ENOTTY); 756 #endif 757 case VFIFO: 758 case VCHR: 759 case VBLK: 760 if (com == FIODTYPE) { 761 if (vp->v_type != VCHR && vp->v_type != VBLK) 762 return (ENOTTY); 763 *(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK; 764 return (0); 765 } 766 error = VOP_IOCTL(vp, com, data, fp->f_flag, active_cred, td); 767 if (error == 0 && com == TIOCSCTTY) { 768 769 /* Do nothing if reassigning same control tty */ 770 sx_slock(&proctree_lock); 771 if (td->td_proc->p_session->s_ttyvp == vp) { 772 sx_sunlock(&proctree_lock); 773 return (0); 774 } 775 776 vpold = td->td_proc->p_session->s_ttyvp; 777 VREF(vp); 778 SESS_LOCK(td->td_proc->p_session); 779 td->td_proc->p_session->s_ttyvp = vp; 780 SESS_UNLOCK(td->td_proc->p_session); 781 782 sx_sunlock(&proctree_lock); 783 784 /* Get rid of reference to old control tty */ 785 if (vpold) 786 vrele(vpold); 787 } 788 return (error); 789 } 790 } 791 792 /* 793 * File table vnode poll routine. 794 */ 795 static int 796 vn_poll(fp, events, active_cred, td) 797 struct file *fp; 798 int events; 799 struct ucred *active_cred; 800 struct thread *td; 801 { 802 struct vnode *vp; 803 #ifdef MAC 804 int error; 805 #endif 806 807 vp = (struct vnode *)fp->f_data; 808 #ifdef MAC 809 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 810 error = mac_check_vnode_poll(active_cred, fp->f_cred, vp); 811 VOP_UNLOCK(vp, 0, td); 812 if (error) 813 return (error); 814 #endif 815 816 return (VOP_POLL(vp, events, fp->f_cred, td)); 817 } 818 819 /* 820 * Check that the vnode is still valid, and if so 821 * acquire requested lock. 822 */ 823 int 824 #ifndef DEBUG_LOCKS 825 vn_lock(vp, flags, td) 826 #else 827 debug_vn_lock(vp, flags, td, filename, line) 828 #endif 829 struct vnode *vp; 830 int flags; 831 struct thread *td; 832 #ifdef DEBUG_LOCKS 833 const char *filename; 834 int line; 835 #endif 836 { 837 int error; 838 839 do { 840 if ((flags & LK_INTERLOCK) == 0) 841 VI_LOCK(vp); 842 if ((vp->v_iflag & VI_XLOCK) && vp->v_vxproc != curthread) { 843 vp->v_iflag |= VI_XWANT; 844 msleep(vp, VI_MTX(vp), PINOD | PDROP, 845 "vn_lock", 0); 846 mp_fixme("interlock not released."); 847 error = ENOENT; 848 } else { 849 #if 0 850 /* this can now occur in normal operation */ 851 if (vp->v_vxproc != NULL) 852 log(LOG_INFO, "VXLOCK interlock avoided in vn_lock\n"); 853 #endif 854 #ifdef DEBUG_LOCKS 855 vp->filename = filename; 856 vp->line = line; 857 #endif 858 error = VOP_LOCK(vp, 859 flags | LK_NOPAUSE | LK_INTERLOCK, td); 860 if (error == 0) 861 return (error); 862 } 863 flags &= ~LK_INTERLOCK; 864 } while (flags & LK_RETRY); 865 return (error); 866 } 867 868 /* 869 * File table vnode close routine. 870 */ 871 static int 872 vn_closefile(fp, td) 873 struct file *fp; 874 struct thread *td; 875 { 876 877 fp->f_ops = &badfileops; 878 return (vn_close(((struct vnode *)fp->f_data), fp->f_flag, 879 fp->f_cred, td)); 880 } 881 882 /* 883 * Preparing to start a filesystem write operation. If the operation is 884 * permitted, then we bump the count of operations in progress and 885 * proceed. If a suspend request is in progress, we wait until the 886 * suspension is over, and then proceed. 887 */ 888 int 889 vn_start_write(vp, mpp, flags) 890 struct vnode *vp; 891 struct mount **mpp; 892 int flags; 893 { 894 struct mount *mp; 895 int error; 896 897 /* 898 * If a vnode is provided, get and return the mount point that 899 * to which it will write. 900 */ 901 if (vp != NULL) { 902 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 903 *mpp = NULL; 904 if (error != EOPNOTSUPP) 905 return (error); 906 return (0); 907 } 908 } 909 if ((mp = *mpp) == NULL) 910 return (0); 911 /* 912 * Check on status of suspension. 913 */ 914 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 915 if (flags & V_NOWAIT) 916 return (EWOULDBLOCK); 917 error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 918 "suspfs", 0); 919 if (error) 920 return (error); 921 } 922 if (flags & V_XSLEEP) 923 return (0); 924 mp->mnt_writeopcount++; 925 return (0); 926 } 927 928 /* 929 * Secondary suspension. Used by operations such as vop_inactive 930 * routines that are needed by the higher level functions. These 931 * are allowed to proceed until all the higher level functions have 932 * completed (indicated by mnt_writeopcount dropping to zero). At that 933 * time, these operations are halted until the suspension is over. 934 */ 935 int 936 vn_write_suspend_wait(vp, mp, flags) 937 struct vnode *vp; 938 struct mount *mp; 939 int flags; 940 { 941 int error; 942 943 if (vp != NULL) { 944 if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) { 945 if (error != EOPNOTSUPP) 946 return (error); 947 return (0); 948 } 949 } 950 /* 951 * If we are not suspended or have not yet reached suspended 952 * mode, then let the operation proceed. 953 */ 954 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0) 955 return (0); 956 if (flags & V_NOWAIT) 957 return (EWOULDBLOCK); 958 /* 959 * Wait for the suspension to finish. 960 */ 961 return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 962 "suspfs", 0)); 963 } 964 965 /* 966 * Filesystem write operation has completed. If we are suspending and this 967 * operation is the last one, notify the suspender that the suspension is 968 * now in effect. 969 */ 970 void 971 vn_finished_write(mp) 972 struct mount *mp; 973 { 974 975 if (mp == NULL) 976 return; 977 mp->mnt_writeopcount--; 978 if (mp->mnt_writeopcount < 0) 979 panic("vn_finished_write: neg cnt"); 980 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 981 mp->mnt_writeopcount <= 0) 982 wakeup(&mp->mnt_writeopcount); 983 } 984 985 /* 986 * Request a filesystem to suspend write operations. 987 */ 988 void 989 vfs_write_suspend(mp) 990 struct mount *mp; 991 { 992 struct thread *td = curthread; 993 994 if (mp->mnt_kern_flag & MNTK_SUSPEND) 995 return; 996 mp->mnt_kern_flag |= MNTK_SUSPEND; 997 if (mp->mnt_writeopcount > 0) 998 (void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0); 999 VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td); 1000 mp->mnt_kern_flag |= MNTK_SUSPENDED; 1001 } 1002 1003 /* 1004 * Request a filesystem to resume write operations. 1005 */ 1006 void 1007 vfs_write_resume(mp) 1008 struct mount *mp; 1009 { 1010 1011 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) 1012 return; 1013 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED); 1014 wakeup(&mp->mnt_writeopcount); 1015 wakeup(&mp->mnt_flag); 1016 } 1017 1018 /* 1019 * Implement kqueues for files by translating it to vnode operation. 1020 */ 1021 static int 1022 vn_kqfilter(struct file *fp, struct knote *kn) 1023 { 1024 1025 return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn)); 1026 } 1027 1028 /* 1029 * Simplified in-kernel wrapper calls for extended attribute access. 1030 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1031 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1032 */ 1033 int 1034 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1035 const char *attrname, int *buflen, char *buf, struct thread *td) 1036 { 1037 struct uio auio; 1038 struct iovec iov; 1039 int error; 1040 1041 iov.iov_len = *buflen; 1042 iov.iov_base = buf; 1043 1044 auio.uio_iov = &iov; 1045 auio.uio_iovcnt = 1; 1046 auio.uio_rw = UIO_READ; 1047 auio.uio_segflg = UIO_SYSSPACE; 1048 auio.uio_td = td; 1049 auio.uio_offset = 0; 1050 auio.uio_resid = *buflen; 1051 1052 if ((ioflg & IO_NODELOCKED) == 0) 1053 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1054 1055 /* authorize attribute retrieval as kernel */ 1056 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1057 td); 1058 1059 if ((ioflg & IO_NODELOCKED) == 0) 1060 VOP_UNLOCK(vp, 0, td); 1061 1062 if (error == 0) { 1063 *buflen = *buflen - auio.uio_resid; 1064 } 1065 1066 return (error); 1067 } 1068 1069 /* 1070 * XXX failure mode if partially written? 1071 */ 1072 int 1073 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1074 const char *attrname, int buflen, char *buf, struct thread *td) 1075 { 1076 struct uio auio; 1077 struct iovec iov; 1078 struct mount *mp; 1079 int error; 1080 1081 iov.iov_len = buflen; 1082 iov.iov_base = buf; 1083 1084 auio.uio_iov = &iov; 1085 auio.uio_iovcnt = 1; 1086 auio.uio_rw = UIO_WRITE; 1087 auio.uio_segflg = UIO_SYSSPACE; 1088 auio.uio_td = td; 1089 auio.uio_offset = 0; 1090 auio.uio_resid = buflen; 1091 1092 if ((ioflg & IO_NODELOCKED) == 0) { 1093 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1094 return (error); 1095 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1096 } 1097 1098 /* authorize attribute setting as kernel */ 1099 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1100 1101 if ((ioflg & IO_NODELOCKED) == 0) { 1102 vn_finished_write(mp); 1103 VOP_UNLOCK(vp, 0, td); 1104 } 1105 1106 return (error); 1107 } 1108 1109 int 1110 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1111 const char *attrname, struct thread *td) 1112 { 1113 struct mount *mp; 1114 int error; 1115 1116 if ((ioflg & IO_NODELOCKED) == 0) { 1117 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1118 return (error); 1119 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1120 } 1121 1122 /* authorize attribute removal as kernel */ 1123 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, td); 1124 1125 if ((ioflg & IO_NODELOCKED) == 0) { 1126 vn_finished_write(mp); 1127 VOP_UNLOCK(vp, 0, td); 1128 } 1129 1130 return (error); 1131 } 1132