1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed 8 * to Berkeley by John Heidemann of the UCLA Ficus project. 9 * 10 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bio.h> 43 #include <sys/buf.h> 44 #include <sys/conf.h> 45 #include <sys/event.h> 46 #include <sys/filio.h> 47 #include <sys/kernel.h> 48 #include <sys/limits.h> 49 #include <sys/lock.h> 50 #include <sys/lockf.h> 51 #include <sys/malloc.h> 52 #include <sys/mount.h> 53 #include <sys/namei.h> 54 #include <sys/rwlock.h> 55 #include <sys/fcntl.h> 56 #include <sys/unistd.h> 57 #include <sys/vnode.h> 58 #include <sys/dirent.h> 59 #include <sys/poll.h> 60 #include <sys/stat.h> 61 #include <security/audit/audit.h> 62 #include <sys/priv.h> 63 64 #include <security/mac/mac_framework.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_extern.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_pager.h> 73 #include <vm/vnode_pager.h> 74 75 static int vop_nolookup(struct vop_lookup_args *); 76 static int vop_norename(struct vop_rename_args *); 77 static int vop_nostrategy(struct vop_strategy_args *); 78 static int get_next_dirent(struct vnode *vp, struct dirent **dpp, 79 char *dirbuf, int dirbuflen, off_t *off, 80 char **cpos, int *len, int *eofflag, 81 struct thread *td); 82 static int dirent_exists(struct vnode *vp, const char *dirname, 83 struct thread *td); 84 85 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 86 87 static int vop_stdis_text(struct vop_is_text_args *ap); 88 static int vop_stdunset_text(struct vop_unset_text_args *ap); 89 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); 90 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap); 91 static int vop_stdfdatasync(struct vop_fdatasync_args *ap); 92 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); 93 static int vop_stdstat(struct vop_stat_args *ap); 94 95 /* 96 * This vnode table stores what we want to do if the filesystem doesn't 97 * implement a particular VOP. 98 * 99 * If there is no specific entry here, we will return EOPNOTSUPP. 100 * 101 * Note that every filesystem has to implement either vop_access 102 * or vop_accessx; failing to do so will result in immediate crash 103 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 104 * which calls vop_stdaccess() etc. 105 */ 106 107 struct vop_vector default_vnodeops = { 108 .vop_default = NULL, 109 .vop_bypass = VOP_EOPNOTSUPP, 110 111 .vop_access = vop_stdaccess, 112 .vop_accessx = vop_stdaccessx, 113 .vop_advise = vop_stdadvise, 114 .vop_advlock = vop_stdadvlock, 115 .vop_advlockasync = vop_stdadvlockasync, 116 .vop_advlockpurge = vop_stdadvlockpurge, 117 .vop_allocate = vop_stdallocate, 118 .vop_bmap = vop_stdbmap, 119 .vop_close = VOP_NULL, 120 .vop_fsync = VOP_NULL, 121 .vop_stat = vop_stdstat, 122 .vop_fdatasync = vop_stdfdatasync, 123 .vop_getpages = vop_stdgetpages, 124 .vop_getpages_async = vop_stdgetpages_async, 125 .vop_getwritemount = vop_stdgetwritemount, 126 .vop_inactive = VOP_NULL, 127 .vop_need_inactive = vop_stdneed_inactive, 128 .vop_ioctl = vop_stdioctl, 129 .vop_kqfilter = vop_stdkqfilter, 130 .vop_islocked = vop_stdislocked, 131 .vop_lock1 = vop_stdlock, 132 .vop_lookup = vop_nolookup, 133 .vop_open = VOP_NULL, 134 .vop_pathconf = VOP_EINVAL, 135 .vop_poll = vop_nopoll, 136 .vop_putpages = vop_stdputpages, 137 .vop_readlink = VOP_EINVAL, 138 .vop_rename = vop_norename, 139 .vop_revoke = VOP_PANIC, 140 .vop_strategy = vop_nostrategy, 141 .vop_unlock = vop_stdunlock, 142 .vop_vptocnp = vop_stdvptocnp, 143 .vop_vptofh = vop_stdvptofh, 144 .vop_unp_bind = vop_stdunp_bind, 145 .vop_unp_connect = vop_stdunp_connect, 146 .vop_unp_detach = vop_stdunp_detach, 147 .vop_is_text = vop_stdis_text, 148 .vop_set_text = vop_stdset_text, 149 .vop_unset_text = vop_stdunset_text, 150 .vop_add_writecount = vop_stdadd_writecount, 151 .vop_copy_file_range = vop_stdcopy_file_range, 152 }; 153 VFS_VOP_VECTOR_REGISTER(default_vnodeops); 154 155 /* 156 * Series of placeholder functions for various error returns for 157 * VOPs. 158 */ 159 160 int 161 vop_eopnotsupp(struct vop_generic_args *ap) 162 { 163 /* 164 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 165 */ 166 167 return (EOPNOTSUPP); 168 } 169 170 int 171 vop_ebadf(struct vop_generic_args *ap) 172 { 173 174 return (EBADF); 175 } 176 177 int 178 vop_enotty(struct vop_generic_args *ap) 179 { 180 181 return (ENOTTY); 182 } 183 184 int 185 vop_einval(struct vop_generic_args *ap) 186 { 187 188 return (EINVAL); 189 } 190 191 int 192 vop_enoent(struct vop_generic_args *ap) 193 { 194 195 return (ENOENT); 196 } 197 198 int 199 vop_null(struct vop_generic_args *ap) 200 { 201 202 return (0); 203 } 204 205 /* 206 * Helper function to panic on some bad VOPs in some filesystems. 207 */ 208 int 209 vop_panic(struct vop_generic_args *ap) 210 { 211 212 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 213 } 214 215 /* 216 * vop_std<something> and vop_no<something> are default functions for use by 217 * filesystems that need the "default reasonable" implementation for a 218 * particular operation. 219 * 220 * The documentation for the operations they implement exists (if it exists) 221 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 222 */ 223 224 /* 225 * Default vop for filesystems that do not support name lookup 226 */ 227 static int 228 vop_nolookup(ap) 229 struct vop_lookup_args /* { 230 struct vnode *a_dvp; 231 struct vnode **a_vpp; 232 struct componentname *a_cnp; 233 } */ *ap; 234 { 235 236 *ap->a_vpp = NULL; 237 return (ENOTDIR); 238 } 239 240 /* 241 * vop_norename: 242 * 243 * Handle unlock and reference counting for arguments of vop_rename 244 * for filesystems that do not implement rename operation. 245 */ 246 static int 247 vop_norename(struct vop_rename_args *ap) 248 { 249 250 vop_rename_fail(ap); 251 return (EOPNOTSUPP); 252 } 253 254 /* 255 * vop_nostrategy: 256 * 257 * Strategy routine for VFS devices that have none. 258 * 259 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 260 * routine. Typically this is done for a BIO_READ strategy call. 261 * Typically B_INVAL is assumed to already be clear prior to a write 262 * and should not be cleared manually unless you just made the buffer 263 * invalid. BIO_ERROR should be cleared either way. 264 */ 265 266 static int 267 vop_nostrategy (struct vop_strategy_args *ap) 268 { 269 printf("No strategy for buffer at %p\n", ap->a_bp); 270 vn_printf(ap->a_vp, "vnode "); 271 ap->a_bp->b_ioflags |= BIO_ERROR; 272 ap->a_bp->b_error = EOPNOTSUPP; 273 bufdone(ap->a_bp); 274 return (EOPNOTSUPP); 275 } 276 277 static int 278 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 279 int dirbuflen, off_t *off, char **cpos, int *len, 280 int *eofflag, struct thread *td) 281 { 282 int error, reclen; 283 struct uio uio; 284 struct iovec iov; 285 struct dirent *dp; 286 287 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 288 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 289 290 if (*len == 0) { 291 iov.iov_base = dirbuf; 292 iov.iov_len = dirbuflen; 293 294 uio.uio_iov = &iov; 295 uio.uio_iovcnt = 1; 296 uio.uio_offset = *off; 297 uio.uio_resid = dirbuflen; 298 uio.uio_segflg = UIO_SYSSPACE; 299 uio.uio_rw = UIO_READ; 300 uio.uio_td = td; 301 302 *eofflag = 0; 303 304 #ifdef MAC 305 error = mac_vnode_check_readdir(td->td_ucred, vp); 306 if (error == 0) 307 #endif 308 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 309 NULL, NULL); 310 if (error) 311 return (error); 312 313 *off = uio.uio_offset; 314 315 *cpos = dirbuf; 316 *len = (dirbuflen - uio.uio_resid); 317 318 if (*len == 0) 319 return (ENOENT); 320 } 321 322 dp = (struct dirent *)(*cpos); 323 reclen = dp->d_reclen; 324 *dpp = dp; 325 326 /* check for malformed directory.. */ 327 if (reclen < DIRENT_MINSIZE) 328 return (EINVAL); 329 330 *cpos += reclen; 331 *len -= reclen; 332 333 return (0); 334 } 335 336 /* 337 * Check if a named file exists in a given directory vnode. 338 */ 339 static int 340 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 341 { 342 char *dirbuf, *cpos; 343 int error, eofflag, dirbuflen, len, found; 344 off_t off; 345 struct dirent *dp; 346 struct vattr va; 347 348 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 349 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 350 351 found = 0; 352 353 error = VOP_GETATTR(vp, &va, td->td_ucred); 354 if (error) 355 return (found); 356 357 dirbuflen = DEV_BSIZE; 358 if (dirbuflen < va.va_blocksize) 359 dirbuflen = va.va_blocksize; 360 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 361 362 off = 0; 363 len = 0; 364 do { 365 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 366 &cpos, &len, &eofflag, td); 367 if (error) 368 goto out; 369 370 if (dp->d_type != DT_WHT && dp->d_fileno != 0 && 371 strcmp(dp->d_name, dirname) == 0) { 372 found = 1; 373 goto out; 374 } 375 } while (len > 0 || !eofflag); 376 377 out: 378 free(dirbuf, M_TEMP); 379 return (found); 380 } 381 382 int 383 vop_stdaccess(struct vop_access_args *ap) 384 { 385 386 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 387 VAPPEND)) == 0, ("invalid bit in accmode")); 388 389 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 390 } 391 392 int 393 vop_stdaccessx(struct vop_accessx_args *ap) 394 { 395 int error; 396 accmode_t accmode = ap->a_accmode; 397 398 error = vfs_unixify_accmode(&accmode); 399 if (error != 0) 400 return (error); 401 402 if (accmode == 0) 403 return (0); 404 405 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 406 } 407 408 /* 409 * Advisory record locking support 410 */ 411 int 412 vop_stdadvlock(struct vop_advlock_args *ap) 413 { 414 struct vnode *vp; 415 struct vattr vattr; 416 int error; 417 418 vp = ap->a_vp; 419 if (ap->a_fl->l_whence == SEEK_END) { 420 /* 421 * The NFSv4 server must avoid doing a vn_lock() here, since it 422 * can deadlock the nfsd threads, due to a LOR. Fortunately 423 * the NFSv4 server always uses SEEK_SET and this code is 424 * only required for the SEEK_END case. 425 */ 426 vn_lock(vp, LK_SHARED | LK_RETRY); 427 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 428 VOP_UNLOCK(vp); 429 if (error) 430 return (error); 431 } else 432 vattr.va_size = 0; 433 434 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 435 } 436 437 int 438 vop_stdadvlockasync(struct vop_advlockasync_args *ap) 439 { 440 struct vnode *vp; 441 struct vattr vattr; 442 int error; 443 444 vp = ap->a_vp; 445 if (ap->a_fl->l_whence == SEEK_END) { 446 /* The size argument is only needed for SEEK_END. */ 447 vn_lock(vp, LK_SHARED | LK_RETRY); 448 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 449 VOP_UNLOCK(vp); 450 if (error) 451 return (error); 452 } else 453 vattr.va_size = 0; 454 455 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 456 } 457 458 int 459 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap) 460 { 461 struct vnode *vp; 462 463 vp = ap->a_vp; 464 lf_purgelocks(vp, &vp->v_lockf); 465 return (0); 466 } 467 468 /* 469 * vop_stdpathconf: 470 * 471 * Standard implementation of POSIX pathconf, to get information about limits 472 * for a filesystem. 473 * Override per filesystem for the case where the filesystem has smaller 474 * limits. 475 */ 476 int 477 vop_stdpathconf(ap) 478 struct vop_pathconf_args /* { 479 struct vnode *a_vp; 480 int a_name; 481 int *a_retval; 482 } */ *ap; 483 { 484 485 switch (ap->a_name) { 486 case _PC_ASYNC_IO: 487 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; 488 return (0); 489 case _PC_PATH_MAX: 490 *ap->a_retval = PATH_MAX; 491 return (0); 492 case _PC_ACL_EXTENDED: 493 case _PC_ACL_NFS4: 494 case _PC_CAP_PRESENT: 495 case _PC_INF_PRESENT: 496 case _PC_MAC_PRESENT: 497 *ap->a_retval = 0; 498 return (0); 499 default: 500 return (EINVAL); 501 } 502 /* NOTREACHED */ 503 } 504 505 /* 506 * Standard lock, unlock and islocked functions. 507 */ 508 int 509 vop_stdlock(ap) 510 struct vop_lock1_args /* { 511 struct vnode *a_vp; 512 int a_flags; 513 char *file; 514 int line; 515 } */ *ap; 516 { 517 struct vnode *vp = ap->a_vp; 518 struct mtx *ilk; 519 520 ilk = VI_MTX(vp); 521 return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags, 522 &ilk->lock_object, ap->a_file, ap->a_line)); 523 } 524 525 /* See above. */ 526 int 527 vop_stdunlock(ap) 528 struct vop_unlock_args /* { 529 struct vnode *a_vp; 530 } */ *ap; 531 { 532 struct vnode *vp = ap->a_vp; 533 534 return (lockmgr_unlock(vp->v_vnlock)); 535 } 536 537 /* See above. */ 538 int 539 vop_stdislocked(ap) 540 struct vop_islocked_args /* { 541 struct vnode *a_vp; 542 } */ *ap; 543 { 544 545 return (lockstatus(ap->a_vp->v_vnlock)); 546 } 547 548 /* 549 * Variants of the above set. 550 * 551 * Differences are: 552 * - shared locking disablement is not supported 553 * - v_vnlock pointer is not honored 554 */ 555 int 556 vop_lock(ap) 557 struct vop_lock1_args /* { 558 struct vnode *a_vp; 559 int a_flags; 560 char *file; 561 int line; 562 } */ *ap; 563 { 564 struct vnode *vp = ap->a_vp; 565 int flags = ap->a_flags; 566 struct mtx *ilk; 567 568 MPASS(vp->v_vnlock == &vp->v_lock); 569 570 if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0)) 571 goto other; 572 573 switch (flags & LK_TYPE_MASK) { 574 case LK_SHARED: 575 return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line)); 576 case LK_EXCLUSIVE: 577 return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line)); 578 } 579 other: 580 ilk = VI_MTX(vp); 581 return (lockmgr_lock_flags(&vp->v_lock, flags, 582 &ilk->lock_object, ap->a_file, ap->a_line)); 583 } 584 585 int 586 vop_unlock(ap) 587 struct vop_unlock_args /* { 588 struct vnode *a_vp; 589 } */ *ap; 590 { 591 struct vnode *vp = ap->a_vp; 592 593 MPASS(vp->v_vnlock == &vp->v_lock); 594 595 return (lockmgr_unlock(&vp->v_lock)); 596 } 597 598 int 599 vop_islocked(ap) 600 struct vop_islocked_args /* { 601 struct vnode *a_vp; 602 } */ *ap; 603 { 604 struct vnode *vp = ap->a_vp; 605 606 MPASS(vp->v_vnlock == &vp->v_lock); 607 608 return (lockstatus(&vp->v_lock)); 609 } 610 611 /* 612 * Return true for select/poll. 613 */ 614 int 615 vop_nopoll(ap) 616 struct vop_poll_args /* { 617 struct vnode *a_vp; 618 int a_events; 619 struct ucred *a_cred; 620 struct thread *a_td; 621 } */ *ap; 622 { 623 624 if (ap->a_events & ~POLLSTANDARD) 625 return (POLLNVAL); 626 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 627 } 628 629 /* 630 * Implement poll for local filesystems that support it. 631 */ 632 int 633 vop_stdpoll(ap) 634 struct vop_poll_args /* { 635 struct vnode *a_vp; 636 int a_events; 637 struct ucred *a_cred; 638 struct thread *a_td; 639 } */ *ap; 640 { 641 if (ap->a_events & ~POLLSTANDARD) 642 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 643 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 644 } 645 646 /* 647 * Return our mount point, as we will take charge of the writes. 648 */ 649 int 650 vop_stdgetwritemount(ap) 651 struct vop_getwritemount_args /* { 652 struct vnode *a_vp; 653 struct mount **a_mpp; 654 } */ *ap; 655 { 656 struct mount *mp; 657 struct vnode *vp; 658 659 /* 660 * Note that having a reference does not prevent forced unmount from 661 * setting ->v_mount to NULL after the lock gets released. This is of 662 * no consequence for typical consumers (most notably vn_start_write) 663 * since in this case the vnode is VIRF_DOOMED. Unmount might have 664 * progressed far enough that its completion is only delayed by the 665 * reference obtained here. The consumer only needs to concern itself 666 * with releasing it. 667 */ 668 vp = ap->a_vp; 669 mp = vp->v_mount; 670 if (mp == NULL) { 671 *(ap->a_mpp) = NULL; 672 return (0); 673 } 674 if (vfs_op_thread_enter(mp)) { 675 if (mp == vp->v_mount) { 676 vfs_mp_count_add_pcpu(mp, ref, 1); 677 vfs_op_thread_exit(mp); 678 } else { 679 vfs_op_thread_exit(mp); 680 mp = NULL; 681 } 682 } else { 683 MNT_ILOCK(mp); 684 if (mp == vp->v_mount) { 685 MNT_REF(mp); 686 MNT_IUNLOCK(mp); 687 } else { 688 MNT_IUNLOCK(mp); 689 mp = NULL; 690 } 691 } 692 *(ap->a_mpp) = mp; 693 return (0); 694 } 695 696 /* 697 * If the file system doesn't implement VOP_BMAP, then return sensible defaults: 698 * - Return the vnode's bufobj instead of any underlying device's bufobj 699 * - Calculate the physical block number as if there were equal size 700 * consecutive blocks, but 701 * - Report no contiguous runs of blocks. 702 */ 703 int 704 vop_stdbmap(ap) 705 struct vop_bmap_args /* { 706 struct vnode *a_vp; 707 daddr_t a_bn; 708 struct bufobj **a_bop; 709 daddr_t *a_bnp; 710 int *a_runp; 711 int *a_runb; 712 } */ *ap; 713 { 714 715 if (ap->a_bop != NULL) 716 *ap->a_bop = &ap->a_vp->v_bufobj; 717 if (ap->a_bnp != NULL) 718 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 719 if (ap->a_runp != NULL) 720 *ap->a_runp = 0; 721 if (ap->a_runb != NULL) 722 *ap->a_runb = 0; 723 return (0); 724 } 725 726 int 727 vop_stdfsync(ap) 728 struct vop_fsync_args /* { 729 struct vnode *a_vp; 730 int a_waitfor; 731 struct thread *a_td; 732 } */ *ap; 733 { 734 735 return (vn_fsync_buf(ap->a_vp, ap->a_waitfor)); 736 } 737 738 static int 739 vop_stdfdatasync(struct vop_fdatasync_args *ap) 740 { 741 742 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); 743 } 744 745 int 746 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap) 747 { 748 749 return (vn_fsync_buf(ap->a_vp, MNT_WAIT)); 750 } 751 752 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 753 int 754 vop_stdgetpages(ap) 755 struct vop_getpages_args /* { 756 struct vnode *a_vp; 757 vm_page_t *a_m; 758 int a_count; 759 int *a_rbehind; 760 int *a_rahead; 761 } */ *ap; 762 { 763 764 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 765 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); 766 } 767 768 static int 769 vop_stdgetpages_async(struct vop_getpages_async_args *ap) 770 { 771 int error; 772 773 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, 774 ap->a_rahead); 775 if (ap->a_iodone != NULL) 776 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 777 return (error); 778 } 779 780 int 781 vop_stdkqfilter(struct vop_kqfilter_args *ap) 782 { 783 return vfs_kqfilter(ap); 784 } 785 786 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 787 int 788 vop_stdputpages(ap) 789 struct vop_putpages_args /* { 790 struct vnode *a_vp; 791 vm_page_t *a_m; 792 int a_count; 793 int a_sync; 794 int *a_rtvals; 795 } */ *ap; 796 { 797 798 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 799 ap->a_sync, ap->a_rtvals); 800 } 801 802 int 803 vop_stdvptofh(struct vop_vptofh_args *ap) 804 { 805 return (EOPNOTSUPP); 806 } 807 808 int 809 vop_stdvptocnp(struct vop_vptocnp_args *ap) 810 { 811 struct vnode *vp = ap->a_vp; 812 struct vnode **dvp = ap->a_vpp; 813 struct ucred *cred = ap->a_cred; 814 char *buf = ap->a_buf; 815 size_t *buflen = ap->a_buflen; 816 char *dirbuf, *cpos; 817 int i, error, eofflag, dirbuflen, flags, locked, len, covered; 818 off_t off; 819 ino_t fileno; 820 struct vattr va; 821 struct nameidata nd; 822 struct thread *td; 823 struct dirent *dp; 824 struct vnode *mvp; 825 826 i = *buflen; 827 error = 0; 828 covered = 0; 829 td = curthread; 830 831 if (vp->v_type != VDIR) 832 return (ENOENT); 833 834 error = VOP_GETATTR(vp, &va, cred); 835 if (error) 836 return (error); 837 838 VREF(vp); 839 locked = VOP_ISLOCKED(vp); 840 VOP_UNLOCK(vp); 841 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 842 "..", vp, td); 843 flags = FREAD; 844 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 845 if (error) { 846 vn_lock(vp, locked | LK_RETRY); 847 return (error); 848 } 849 NDFREE(&nd, NDF_ONLY_PNBUF); 850 851 mvp = *dvp = nd.ni_vp; 852 853 if (vp->v_mount != (*dvp)->v_mount && 854 ((*dvp)->v_vflag & VV_ROOT) && 855 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 856 *dvp = (*dvp)->v_mount->mnt_vnodecovered; 857 VREF(mvp); 858 VOP_UNLOCK(mvp); 859 vn_close(mvp, FREAD, cred, td); 860 VREF(*dvp); 861 vn_lock(*dvp, LK_SHARED | LK_RETRY); 862 covered = 1; 863 } 864 865 fileno = va.va_fileid; 866 867 dirbuflen = DEV_BSIZE; 868 if (dirbuflen < va.va_blocksize) 869 dirbuflen = va.va_blocksize; 870 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 871 872 if ((*dvp)->v_type != VDIR) { 873 error = ENOENT; 874 goto out; 875 } 876 877 off = 0; 878 len = 0; 879 do { 880 /* call VOP_READDIR of parent */ 881 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 882 &cpos, &len, &eofflag, td); 883 if (error) 884 goto out; 885 886 if ((dp->d_type != DT_WHT) && 887 (dp->d_fileno == fileno)) { 888 if (covered) { 889 VOP_UNLOCK(*dvp); 890 vn_lock(mvp, LK_SHARED | LK_RETRY); 891 if (dirent_exists(mvp, dp->d_name, td)) { 892 error = ENOENT; 893 VOP_UNLOCK(mvp); 894 vn_lock(*dvp, LK_SHARED | LK_RETRY); 895 goto out; 896 } 897 VOP_UNLOCK(mvp); 898 vn_lock(*dvp, LK_SHARED | LK_RETRY); 899 } 900 i -= dp->d_namlen; 901 902 if (i < 0) { 903 error = ENOMEM; 904 goto out; 905 } 906 if (dp->d_namlen == 1 && dp->d_name[0] == '.') { 907 error = ENOENT; 908 } else { 909 bcopy(dp->d_name, buf + i, dp->d_namlen); 910 error = 0; 911 } 912 goto out; 913 } 914 } while (len > 0 || !eofflag); 915 error = ENOENT; 916 917 out: 918 free(dirbuf, M_TEMP); 919 if (!error) { 920 *buflen = i; 921 vref(*dvp); 922 } 923 if (covered) { 924 vput(*dvp); 925 vrele(mvp); 926 } else { 927 VOP_UNLOCK(mvp); 928 vn_close(mvp, FREAD, cred, td); 929 } 930 vn_lock(vp, locked | LK_RETRY); 931 return (error); 932 } 933 934 int 935 vop_stdallocate(struct vop_allocate_args *ap) 936 { 937 #ifdef __notyet__ 938 struct statfs *sfs; 939 off_t maxfilesize = 0; 940 #endif 941 struct iovec aiov; 942 struct vattr vattr, *vap; 943 struct uio auio; 944 off_t fsize, len, cur, offset; 945 uint8_t *buf; 946 struct thread *td; 947 struct vnode *vp; 948 size_t iosize; 949 int error; 950 951 buf = NULL; 952 error = 0; 953 td = curthread; 954 vap = &vattr; 955 vp = ap->a_vp; 956 len = *ap->a_len; 957 offset = *ap->a_offset; 958 959 error = VOP_GETATTR(vp, vap, td->td_ucred); 960 if (error != 0) 961 goto out; 962 fsize = vap->va_size; 963 iosize = vap->va_blocksize; 964 if (iosize == 0) 965 iosize = BLKDEV_IOSIZE; 966 if (iosize > MAXPHYS) 967 iosize = MAXPHYS; 968 buf = malloc(iosize, M_TEMP, M_WAITOK); 969 970 #ifdef __notyet__ 971 /* 972 * Check if the filesystem sets f_maxfilesize; if not use 973 * VOP_SETATTR to perform the check. 974 */ 975 sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); 976 error = VFS_STATFS(vp->v_mount, sfs, td); 977 if (error == 0) 978 maxfilesize = sfs->f_maxfilesize; 979 free(sfs, M_STATFS); 980 if (error != 0) 981 goto out; 982 if (maxfilesize) { 983 if (offset > maxfilesize || len > maxfilesize || 984 offset + len > maxfilesize) { 985 error = EFBIG; 986 goto out; 987 } 988 } else 989 #endif 990 if (offset + len > vap->va_size) { 991 /* 992 * Test offset + len against the filesystem's maxfilesize. 993 */ 994 VATTR_NULL(vap); 995 vap->va_size = offset + len; 996 error = VOP_SETATTR(vp, vap, td->td_ucred); 997 if (error != 0) 998 goto out; 999 VATTR_NULL(vap); 1000 vap->va_size = fsize; 1001 error = VOP_SETATTR(vp, vap, td->td_ucred); 1002 if (error != 0) 1003 goto out; 1004 } 1005 1006 for (;;) { 1007 /* 1008 * Read and write back anything below the nominal file 1009 * size. There's currently no way outside the filesystem 1010 * to know whether this area is sparse or not. 1011 */ 1012 cur = iosize; 1013 if ((offset % iosize) != 0) 1014 cur -= (offset % iosize); 1015 if (cur > len) 1016 cur = len; 1017 if (offset < fsize) { 1018 aiov.iov_base = buf; 1019 aiov.iov_len = cur; 1020 auio.uio_iov = &aiov; 1021 auio.uio_iovcnt = 1; 1022 auio.uio_offset = offset; 1023 auio.uio_resid = cur; 1024 auio.uio_segflg = UIO_SYSSPACE; 1025 auio.uio_rw = UIO_READ; 1026 auio.uio_td = td; 1027 error = VOP_READ(vp, &auio, 0, td->td_ucred); 1028 if (error != 0) 1029 break; 1030 if (auio.uio_resid > 0) { 1031 bzero(buf + cur - auio.uio_resid, 1032 auio.uio_resid); 1033 } 1034 } else { 1035 bzero(buf, cur); 1036 } 1037 1038 aiov.iov_base = buf; 1039 aiov.iov_len = cur; 1040 auio.uio_iov = &aiov; 1041 auio.uio_iovcnt = 1; 1042 auio.uio_offset = offset; 1043 auio.uio_resid = cur; 1044 auio.uio_segflg = UIO_SYSSPACE; 1045 auio.uio_rw = UIO_WRITE; 1046 auio.uio_td = td; 1047 1048 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 1049 if (error != 0) 1050 break; 1051 1052 len -= cur; 1053 offset += cur; 1054 if (len == 0) 1055 break; 1056 if (should_yield()) 1057 break; 1058 } 1059 1060 out: 1061 *ap->a_len = len; 1062 *ap->a_offset = offset; 1063 free(buf, M_TEMP); 1064 return (error); 1065 } 1066 1067 int 1068 vop_stdadvise(struct vop_advise_args *ap) 1069 { 1070 struct vnode *vp; 1071 struct bufobj *bo; 1072 daddr_t startn, endn; 1073 off_t bstart, bend, start, end; 1074 int bsize, error; 1075 1076 vp = ap->a_vp; 1077 switch (ap->a_advice) { 1078 case POSIX_FADV_WILLNEED: 1079 /* 1080 * Do nothing for now. Filesystems should provide a 1081 * custom method which starts an asynchronous read of 1082 * the requested region. 1083 */ 1084 error = 0; 1085 break; 1086 case POSIX_FADV_DONTNEED: 1087 error = 0; 1088 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1089 if (VN_IS_DOOMED(vp)) { 1090 VOP_UNLOCK(vp); 1091 break; 1092 } 1093 1094 /* 1095 * Round to block boundaries (and later possibly further to 1096 * page boundaries). Applications cannot reasonably be aware 1097 * of the boundaries, and the rounding must be to expand at 1098 * both extremities to cover enough. It still doesn't cover 1099 * read-ahead. For partial blocks, this gives unnecessary 1100 * discarding of buffers but is efficient enough since the 1101 * pages usually remain in VMIO for some time. 1102 */ 1103 bsize = vp->v_bufobj.bo_bsize; 1104 bstart = rounddown(ap->a_start, bsize); 1105 bend = roundup(ap->a_end, bsize); 1106 1107 /* 1108 * Deactivate pages in the specified range from the backing VM 1109 * object. Pages that are resident in the buffer cache will 1110 * remain wired until their corresponding buffers are released 1111 * below. 1112 */ 1113 if (vp->v_object != NULL) { 1114 start = trunc_page(bstart); 1115 end = round_page(bend); 1116 VM_OBJECT_RLOCK(vp->v_object); 1117 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), 1118 OFF_TO_IDX(end)); 1119 VM_OBJECT_RUNLOCK(vp->v_object); 1120 } 1121 1122 bo = &vp->v_bufobj; 1123 BO_RLOCK(bo); 1124 startn = bstart / bsize; 1125 endn = bend / bsize; 1126 error = bnoreuselist(&bo->bo_clean, bo, startn, endn); 1127 if (error == 0) 1128 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); 1129 BO_RUNLOCK(bo); 1130 VOP_UNLOCK(vp); 1131 break; 1132 default: 1133 error = EINVAL; 1134 break; 1135 } 1136 return (error); 1137 } 1138 1139 int 1140 vop_stdunp_bind(struct vop_unp_bind_args *ap) 1141 { 1142 1143 ap->a_vp->v_unpcb = ap->a_unpcb; 1144 return (0); 1145 } 1146 1147 int 1148 vop_stdunp_connect(struct vop_unp_connect_args *ap) 1149 { 1150 1151 *ap->a_unpcb = ap->a_vp->v_unpcb; 1152 return (0); 1153 } 1154 1155 int 1156 vop_stdunp_detach(struct vop_unp_detach_args *ap) 1157 { 1158 1159 ap->a_vp->v_unpcb = NULL; 1160 return (0); 1161 } 1162 1163 static int 1164 vop_stdis_text(struct vop_is_text_args *ap) 1165 { 1166 1167 return (ap->a_vp->v_writecount < 0); 1168 } 1169 1170 int 1171 vop_stdset_text(struct vop_set_text_args *ap) 1172 { 1173 struct vnode *vp; 1174 struct mount *mp; 1175 int error; 1176 1177 vp = ap->a_vp; 1178 VI_LOCK(vp); 1179 if (vp->v_writecount > 0) { 1180 error = ETXTBSY; 1181 } else { 1182 /* 1183 * If requested by fs, keep a use reference to the 1184 * vnode until the last text reference is released. 1185 */ 1186 mp = vp->v_mount; 1187 if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 && 1188 vp->v_writecount == 0) { 1189 VNPASS((vp->v_iflag & VI_TEXT_REF) == 0, vp); 1190 vp->v_iflag |= VI_TEXT_REF; 1191 vrefl(vp); 1192 } 1193 1194 vp->v_writecount--; 1195 error = 0; 1196 } 1197 VI_UNLOCK(vp); 1198 return (error); 1199 } 1200 1201 static int 1202 vop_stdunset_text(struct vop_unset_text_args *ap) 1203 { 1204 struct vnode *vp; 1205 int error; 1206 bool last; 1207 1208 vp = ap->a_vp; 1209 last = false; 1210 VI_LOCK(vp); 1211 if (vp->v_writecount < 0) { 1212 if ((vp->v_iflag & VI_TEXT_REF) != 0 && 1213 vp->v_writecount == -1) { 1214 last = true; 1215 vp->v_iflag &= ~VI_TEXT_REF; 1216 } 1217 vp->v_writecount++; 1218 error = 0; 1219 } else { 1220 error = EINVAL; 1221 } 1222 VI_UNLOCK(vp); 1223 if (last) 1224 vunref(vp); 1225 return (error); 1226 } 1227 1228 static int 1229 vop_stdadd_writecount(struct vop_add_writecount_args *ap) 1230 { 1231 struct vnode *vp; 1232 struct mount *mp; 1233 int error; 1234 1235 vp = ap->a_vp; 1236 VI_LOCK_FLAGS(vp, MTX_DUPOK); 1237 if (vp->v_writecount < 0) { 1238 error = ETXTBSY; 1239 } else { 1240 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, 1241 ("neg writecount increment %d", ap->a_inc)); 1242 if (vp->v_writecount == 0) { 1243 mp = vp->v_mount; 1244 if (mp != NULL && (mp->mnt_kern_flag & MNTK_NOMSYNC) == 0) 1245 vlazy(vp); 1246 } 1247 vp->v_writecount += ap->a_inc; 1248 error = 0; 1249 } 1250 VI_UNLOCK(vp); 1251 return (error); 1252 } 1253 1254 int 1255 vop_stdneed_inactive(struct vop_need_inactive_args *ap) 1256 { 1257 1258 return (1); 1259 } 1260 1261 int 1262 vop_stdioctl(struct vop_ioctl_args *ap) 1263 { 1264 struct vnode *vp; 1265 struct vattr va; 1266 off_t *offp; 1267 int error; 1268 1269 switch (ap->a_command) { 1270 case FIOSEEKDATA: 1271 case FIOSEEKHOLE: 1272 vp = ap->a_vp; 1273 error = vn_lock(vp, LK_SHARED); 1274 if (error != 0) 1275 return (EBADF); 1276 if (vp->v_type == VREG) 1277 error = VOP_GETATTR(vp, &va, ap->a_cred); 1278 else 1279 error = ENOTTY; 1280 if (error == 0) { 1281 offp = ap->a_data; 1282 if (*offp < 0 || *offp >= va.va_size) 1283 error = ENXIO; 1284 else if (ap->a_command == FIOSEEKHOLE) 1285 *offp = va.va_size; 1286 } 1287 VOP_UNLOCK(vp); 1288 break; 1289 default: 1290 error = ENOTTY; 1291 break; 1292 } 1293 return (error); 1294 } 1295 1296 /* 1297 * vfs default ops 1298 * used to fill the vfs function table to get reasonable default return values. 1299 */ 1300 int 1301 vfs_stdroot (mp, flags, vpp) 1302 struct mount *mp; 1303 int flags; 1304 struct vnode **vpp; 1305 { 1306 1307 return (EOPNOTSUPP); 1308 } 1309 1310 int 1311 vfs_stdstatfs (mp, sbp) 1312 struct mount *mp; 1313 struct statfs *sbp; 1314 { 1315 1316 return (EOPNOTSUPP); 1317 } 1318 1319 int 1320 vfs_stdquotactl (mp, cmds, uid, arg) 1321 struct mount *mp; 1322 int cmds; 1323 uid_t uid; 1324 void *arg; 1325 { 1326 1327 return (EOPNOTSUPP); 1328 } 1329 1330 int 1331 vfs_stdsync(mp, waitfor) 1332 struct mount *mp; 1333 int waitfor; 1334 { 1335 struct vnode *vp, *mvp; 1336 struct thread *td; 1337 int error, lockreq, allerror = 0; 1338 1339 td = curthread; 1340 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 1341 if (waitfor != MNT_WAIT) 1342 lockreq |= LK_NOWAIT; 1343 /* 1344 * Force stale buffer cache information to be flushed. 1345 */ 1346 loop: 1347 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1348 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1349 VI_UNLOCK(vp); 1350 continue; 1351 } 1352 if ((error = vget(vp, lockreq)) != 0) { 1353 if (error == ENOENT) { 1354 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1355 goto loop; 1356 } 1357 continue; 1358 } 1359 error = VOP_FSYNC(vp, waitfor, td); 1360 if (error) 1361 allerror = error; 1362 vput(vp); 1363 } 1364 return (allerror); 1365 } 1366 1367 int 1368 vfs_stdnosync (mp, waitfor) 1369 struct mount *mp; 1370 int waitfor; 1371 { 1372 1373 return (0); 1374 } 1375 1376 static int 1377 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap) 1378 { 1379 int error; 1380 1381 error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp, 1382 ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred, 1383 ap->a_outcred, ap->a_fsizetd); 1384 return (error); 1385 } 1386 1387 int 1388 vfs_stdvget (mp, ino, flags, vpp) 1389 struct mount *mp; 1390 ino_t ino; 1391 int flags; 1392 struct vnode **vpp; 1393 { 1394 1395 return (EOPNOTSUPP); 1396 } 1397 1398 int 1399 vfs_stdfhtovp (mp, fhp, flags, vpp) 1400 struct mount *mp; 1401 struct fid *fhp; 1402 int flags; 1403 struct vnode **vpp; 1404 { 1405 1406 return (EOPNOTSUPP); 1407 } 1408 1409 int 1410 vfs_stdinit (vfsp) 1411 struct vfsconf *vfsp; 1412 { 1413 1414 return (0); 1415 } 1416 1417 int 1418 vfs_stduninit (vfsp) 1419 struct vfsconf *vfsp; 1420 { 1421 1422 return(0); 1423 } 1424 1425 int 1426 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 1427 struct mount *mp; 1428 int cmd; 1429 struct vnode *filename_vp; 1430 int attrnamespace; 1431 const char *attrname; 1432 { 1433 1434 if (filename_vp != NULL) 1435 VOP_UNLOCK(filename_vp); 1436 return (EOPNOTSUPP); 1437 } 1438 1439 int 1440 vfs_stdsysctl(mp, op, req) 1441 struct mount *mp; 1442 fsctlop_t op; 1443 struct sysctl_req *req; 1444 { 1445 1446 return (EOPNOTSUPP); 1447 } 1448 1449 static vop_bypass_t * 1450 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a) 1451 { 1452 1453 return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset)); 1454 } 1455 1456 int 1457 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a) 1458 { 1459 vop_bypass_t *bp; 1460 int prev_stops, rc; 1461 1462 bp = bp_by_off(vop, a); 1463 MPASS(bp != NULL); 1464 1465 prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); 1466 rc = bp(a); 1467 sigallowstop(prev_stops); 1468 return (rc); 1469 } 1470 1471 static int 1472 vop_stdstat(struct vop_stat_args *a) 1473 { 1474 struct vattr vattr; 1475 struct vattr *vap; 1476 struct vnode *vp; 1477 struct stat *sb; 1478 int error; 1479 u_short mode; 1480 1481 vp = a->a_vp; 1482 sb = a->a_sb; 1483 1484 error = vop_stat_helper_pre(a); 1485 if (error != 0) 1486 return (error); 1487 1488 vap = &vattr; 1489 1490 /* 1491 * Initialize defaults for new and unusual fields, so that file 1492 * systems which don't support these fields don't need to know 1493 * about them. 1494 */ 1495 vap->va_birthtime.tv_sec = -1; 1496 vap->va_birthtime.tv_nsec = 0; 1497 vap->va_fsid = VNOVAL; 1498 vap->va_rdev = NODEV; 1499 1500 error = VOP_GETATTR(vp, vap, a->a_active_cred); 1501 if (error) 1502 goto out; 1503 1504 /* 1505 * Zero the spare stat fields 1506 */ 1507 bzero(sb, sizeof *sb); 1508 1509 /* 1510 * Copy from vattr table 1511 */ 1512 if (vap->va_fsid != VNOVAL) 1513 sb->st_dev = vap->va_fsid; 1514 else 1515 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1516 sb->st_ino = vap->va_fileid; 1517 mode = vap->va_mode; 1518 switch (vap->va_type) { 1519 case VREG: 1520 mode |= S_IFREG; 1521 break; 1522 case VDIR: 1523 mode |= S_IFDIR; 1524 break; 1525 case VBLK: 1526 mode |= S_IFBLK; 1527 break; 1528 case VCHR: 1529 mode |= S_IFCHR; 1530 break; 1531 case VLNK: 1532 mode |= S_IFLNK; 1533 break; 1534 case VSOCK: 1535 mode |= S_IFSOCK; 1536 break; 1537 case VFIFO: 1538 mode |= S_IFIFO; 1539 break; 1540 default: 1541 error = EBADF; 1542 goto out; 1543 } 1544 sb->st_mode = mode; 1545 sb->st_nlink = vap->va_nlink; 1546 sb->st_uid = vap->va_uid; 1547 sb->st_gid = vap->va_gid; 1548 sb->st_rdev = vap->va_rdev; 1549 if (vap->va_size > OFF_MAX) { 1550 error = EOVERFLOW; 1551 goto out; 1552 } 1553 sb->st_size = vap->va_size; 1554 sb->st_atim.tv_sec = vap->va_atime.tv_sec; 1555 sb->st_atim.tv_nsec = vap->va_atime.tv_nsec; 1556 sb->st_mtim.tv_sec = vap->va_mtime.tv_sec; 1557 sb->st_mtim.tv_nsec = vap->va_mtime.tv_nsec; 1558 sb->st_ctim.tv_sec = vap->va_ctime.tv_sec; 1559 sb->st_ctim.tv_nsec = vap->va_ctime.tv_nsec; 1560 sb->st_birthtim.tv_sec = vap->va_birthtime.tv_sec; 1561 sb->st_birthtim.tv_nsec = vap->va_birthtime.tv_nsec; 1562 1563 /* 1564 * According to www.opengroup.org, the meaning of st_blksize is 1565 * "a filesystem-specific preferred I/O block size for this 1566 * object. In some filesystem types, this may vary from file 1567 * to file" 1568 * Use minimum/default of PAGE_SIZE (e.g. for VCHR). 1569 */ 1570 1571 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1572 sb->st_flags = vap->va_flags; 1573 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1574 sb->st_gen = vap->va_gen; 1575 out: 1576 return (vop_stat_helper_post(a, error)); 1577 } 1578