1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed 8 * to Berkeley by John Heidemann of the UCLA Ficus project. 9 * 10 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bio.h> 43 #include <sys/buf.h> 44 #include <sys/conf.h> 45 #include <sys/event.h> 46 #include <sys/filio.h> 47 #include <sys/kernel.h> 48 #include <sys/limits.h> 49 #include <sys/lock.h> 50 #include <sys/lockf.h> 51 #include <sys/malloc.h> 52 #include <sys/mount.h> 53 #include <sys/namei.h> 54 #include <sys/rwlock.h> 55 #include <sys/fcntl.h> 56 #include <sys/unistd.h> 57 #include <sys/vnode.h> 58 #include <sys/dirent.h> 59 #include <sys/poll.h> 60 #include <sys/stat.h> 61 #include <security/audit/audit.h> 62 #include <sys/priv.h> 63 64 #include <security/mac/mac_framework.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_extern.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_pager.h> 73 #include <vm/vnode_pager.h> 74 75 static int vop_nolookup(struct vop_lookup_args *); 76 static int vop_norename(struct vop_rename_args *); 77 static int vop_nostrategy(struct vop_strategy_args *); 78 static int get_next_dirent(struct vnode *vp, struct dirent **dpp, 79 char *dirbuf, int dirbuflen, off_t *off, 80 char **cpos, int *len, int *eofflag, 81 struct thread *td); 82 static int dirent_exists(struct vnode *vp, const char *dirname, 83 struct thread *td); 84 85 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 86 87 static int vop_stdis_text(struct vop_is_text_args *ap); 88 static int vop_stdunset_text(struct vop_unset_text_args *ap); 89 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); 90 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap); 91 static int vop_stdfdatasync(struct vop_fdatasync_args *ap); 92 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); 93 static int vop_stdread_pgcache(struct vop_read_pgcache_args *ap); 94 static int vop_stdstat(struct vop_stat_args *ap); 95 96 /* 97 * This vnode table stores what we want to do if the filesystem doesn't 98 * implement a particular VOP. 99 * 100 * If there is no specific entry here, we will return EOPNOTSUPP. 101 * 102 * Note that every filesystem has to implement either vop_access 103 * or vop_accessx; failing to do so will result in immediate crash 104 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 105 * which calls vop_stdaccess() etc. 106 */ 107 108 struct vop_vector default_vnodeops = { 109 .vop_default = NULL, 110 .vop_bypass = VOP_EOPNOTSUPP, 111 112 .vop_access = vop_stdaccess, 113 .vop_accessx = vop_stdaccessx, 114 .vop_advise = vop_stdadvise, 115 .vop_advlock = vop_stdadvlock, 116 .vop_advlockasync = vop_stdadvlockasync, 117 .vop_advlockpurge = vop_stdadvlockpurge, 118 .vop_allocate = vop_stdallocate, 119 .vop_bmap = vop_stdbmap, 120 .vop_close = VOP_NULL, 121 .vop_fsync = VOP_NULL, 122 .vop_stat = vop_stdstat, 123 .vop_fdatasync = vop_stdfdatasync, 124 .vop_getpages = vop_stdgetpages, 125 .vop_getpages_async = vop_stdgetpages_async, 126 .vop_getwritemount = vop_stdgetwritemount, 127 .vop_inactive = VOP_NULL, 128 .vop_need_inactive = vop_stdneed_inactive, 129 .vop_ioctl = vop_stdioctl, 130 .vop_kqfilter = vop_stdkqfilter, 131 .vop_islocked = vop_stdislocked, 132 .vop_lock1 = vop_stdlock, 133 .vop_lookup = vop_nolookup, 134 .vop_open = VOP_NULL, 135 .vop_pathconf = VOP_EINVAL, 136 .vop_poll = vop_nopoll, 137 .vop_putpages = vop_stdputpages, 138 .vop_readlink = VOP_EINVAL, 139 .vop_read_pgcache = vop_stdread_pgcache, 140 .vop_rename = vop_norename, 141 .vop_revoke = VOP_PANIC, 142 .vop_strategy = vop_nostrategy, 143 .vop_unlock = vop_stdunlock, 144 .vop_vptocnp = vop_stdvptocnp, 145 .vop_vptofh = vop_stdvptofh, 146 .vop_unp_bind = vop_stdunp_bind, 147 .vop_unp_connect = vop_stdunp_connect, 148 .vop_unp_detach = vop_stdunp_detach, 149 .vop_is_text = vop_stdis_text, 150 .vop_set_text = vop_stdset_text, 151 .vop_unset_text = vop_stdunset_text, 152 .vop_add_writecount = vop_stdadd_writecount, 153 .vop_copy_file_range = vop_stdcopy_file_range, 154 }; 155 VFS_VOP_VECTOR_REGISTER(default_vnodeops); 156 157 /* 158 * Series of placeholder functions for various error returns for 159 * VOPs. 160 */ 161 162 int 163 vop_eopnotsupp(struct vop_generic_args *ap) 164 { 165 /* 166 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 167 */ 168 169 return (EOPNOTSUPP); 170 } 171 172 int 173 vop_ebadf(struct vop_generic_args *ap) 174 { 175 176 return (EBADF); 177 } 178 179 int 180 vop_enotty(struct vop_generic_args *ap) 181 { 182 183 return (ENOTTY); 184 } 185 186 int 187 vop_einval(struct vop_generic_args *ap) 188 { 189 190 return (EINVAL); 191 } 192 193 int 194 vop_enoent(struct vop_generic_args *ap) 195 { 196 197 return (ENOENT); 198 } 199 200 int 201 vop_null(struct vop_generic_args *ap) 202 { 203 204 return (0); 205 } 206 207 /* 208 * Helper function to panic on some bad VOPs in some filesystems. 209 */ 210 int 211 vop_panic(struct vop_generic_args *ap) 212 { 213 214 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 215 } 216 217 /* 218 * vop_std<something> and vop_no<something> are default functions for use by 219 * filesystems that need the "default reasonable" implementation for a 220 * particular operation. 221 * 222 * The documentation for the operations they implement exists (if it exists) 223 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 224 */ 225 226 /* 227 * Default vop for filesystems that do not support name lookup 228 */ 229 static int 230 vop_nolookup(ap) 231 struct vop_lookup_args /* { 232 struct vnode *a_dvp; 233 struct vnode **a_vpp; 234 struct componentname *a_cnp; 235 } */ *ap; 236 { 237 238 *ap->a_vpp = NULL; 239 return (ENOTDIR); 240 } 241 242 /* 243 * vop_norename: 244 * 245 * Handle unlock and reference counting for arguments of vop_rename 246 * for filesystems that do not implement rename operation. 247 */ 248 static int 249 vop_norename(struct vop_rename_args *ap) 250 { 251 252 vop_rename_fail(ap); 253 return (EOPNOTSUPP); 254 } 255 256 /* 257 * vop_nostrategy: 258 * 259 * Strategy routine for VFS devices that have none. 260 * 261 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 262 * routine. Typically this is done for a BIO_READ strategy call. 263 * Typically B_INVAL is assumed to already be clear prior to a write 264 * and should not be cleared manually unless you just made the buffer 265 * invalid. BIO_ERROR should be cleared either way. 266 */ 267 268 static int 269 vop_nostrategy (struct vop_strategy_args *ap) 270 { 271 printf("No strategy for buffer at %p\n", ap->a_bp); 272 vn_printf(ap->a_vp, "vnode "); 273 ap->a_bp->b_ioflags |= BIO_ERROR; 274 ap->a_bp->b_error = EOPNOTSUPP; 275 bufdone(ap->a_bp); 276 return (EOPNOTSUPP); 277 } 278 279 static int 280 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 281 int dirbuflen, off_t *off, char **cpos, int *len, 282 int *eofflag, struct thread *td) 283 { 284 int error, reclen; 285 struct uio uio; 286 struct iovec iov; 287 struct dirent *dp; 288 289 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 290 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 291 292 if (*len == 0) { 293 iov.iov_base = dirbuf; 294 iov.iov_len = dirbuflen; 295 296 uio.uio_iov = &iov; 297 uio.uio_iovcnt = 1; 298 uio.uio_offset = *off; 299 uio.uio_resid = dirbuflen; 300 uio.uio_segflg = UIO_SYSSPACE; 301 uio.uio_rw = UIO_READ; 302 uio.uio_td = td; 303 304 *eofflag = 0; 305 306 #ifdef MAC 307 error = mac_vnode_check_readdir(td->td_ucred, vp); 308 if (error == 0) 309 #endif 310 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 311 NULL, NULL); 312 if (error) 313 return (error); 314 315 *off = uio.uio_offset; 316 317 *cpos = dirbuf; 318 *len = (dirbuflen - uio.uio_resid); 319 320 if (*len == 0) 321 return (ENOENT); 322 } 323 324 dp = (struct dirent *)(*cpos); 325 reclen = dp->d_reclen; 326 *dpp = dp; 327 328 /* check for malformed directory.. */ 329 if (reclen < DIRENT_MINSIZE) 330 return (EINVAL); 331 332 *cpos += reclen; 333 *len -= reclen; 334 335 return (0); 336 } 337 338 /* 339 * Check if a named file exists in a given directory vnode. 340 */ 341 static int 342 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 343 { 344 char *dirbuf, *cpos; 345 int error, eofflag, dirbuflen, len, found; 346 off_t off; 347 struct dirent *dp; 348 struct vattr va; 349 350 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 351 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 352 353 found = 0; 354 355 error = VOP_GETATTR(vp, &va, td->td_ucred); 356 if (error) 357 return (found); 358 359 dirbuflen = DEV_BSIZE; 360 if (dirbuflen < va.va_blocksize) 361 dirbuflen = va.va_blocksize; 362 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 363 364 off = 0; 365 len = 0; 366 do { 367 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 368 &cpos, &len, &eofflag, td); 369 if (error) 370 goto out; 371 372 if (dp->d_type != DT_WHT && dp->d_fileno != 0 && 373 strcmp(dp->d_name, dirname) == 0) { 374 found = 1; 375 goto out; 376 } 377 } while (len > 0 || !eofflag); 378 379 out: 380 free(dirbuf, M_TEMP); 381 return (found); 382 } 383 384 int 385 vop_stdaccess(struct vop_access_args *ap) 386 { 387 388 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 389 VAPPEND)) == 0, ("invalid bit in accmode")); 390 391 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 392 } 393 394 int 395 vop_stdaccessx(struct vop_accessx_args *ap) 396 { 397 int error; 398 accmode_t accmode = ap->a_accmode; 399 400 error = vfs_unixify_accmode(&accmode); 401 if (error != 0) 402 return (error); 403 404 if (accmode == 0) 405 return (0); 406 407 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 408 } 409 410 /* 411 * Advisory record locking support 412 */ 413 int 414 vop_stdadvlock(struct vop_advlock_args *ap) 415 { 416 struct vnode *vp; 417 struct vattr vattr; 418 int error; 419 420 vp = ap->a_vp; 421 if (ap->a_fl->l_whence == SEEK_END) { 422 /* 423 * The NFSv4 server must avoid doing a vn_lock() here, since it 424 * can deadlock the nfsd threads, due to a LOR. Fortunately 425 * the NFSv4 server always uses SEEK_SET and this code is 426 * only required for the SEEK_END case. 427 */ 428 vn_lock(vp, LK_SHARED | LK_RETRY); 429 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 430 VOP_UNLOCK(vp); 431 if (error) 432 return (error); 433 } else 434 vattr.va_size = 0; 435 436 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 437 } 438 439 int 440 vop_stdadvlockasync(struct vop_advlockasync_args *ap) 441 { 442 struct vnode *vp; 443 struct vattr vattr; 444 int error; 445 446 vp = ap->a_vp; 447 if (ap->a_fl->l_whence == SEEK_END) { 448 /* The size argument is only needed for SEEK_END. */ 449 vn_lock(vp, LK_SHARED | LK_RETRY); 450 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 451 VOP_UNLOCK(vp); 452 if (error) 453 return (error); 454 } else 455 vattr.va_size = 0; 456 457 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 458 } 459 460 int 461 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap) 462 { 463 struct vnode *vp; 464 465 vp = ap->a_vp; 466 lf_purgelocks(vp, &vp->v_lockf); 467 return (0); 468 } 469 470 /* 471 * vop_stdpathconf: 472 * 473 * Standard implementation of POSIX pathconf, to get information about limits 474 * for a filesystem. 475 * Override per filesystem for the case where the filesystem has smaller 476 * limits. 477 */ 478 int 479 vop_stdpathconf(ap) 480 struct vop_pathconf_args /* { 481 struct vnode *a_vp; 482 int a_name; 483 int *a_retval; 484 } */ *ap; 485 { 486 487 switch (ap->a_name) { 488 case _PC_ASYNC_IO: 489 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; 490 return (0); 491 case _PC_PATH_MAX: 492 *ap->a_retval = PATH_MAX; 493 return (0); 494 case _PC_ACL_EXTENDED: 495 case _PC_ACL_NFS4: 496 case _PC_CAP_PRESENT: 497 case _PC_INF_PRESENT: 498 case _PC_MAC_PRESENT: 499 *ap->a_retval = 0; 500 return (0); 501 default: 502 return (EINVAL); 503 } 504 /* NOTREACHED */ 505 } 506 507 /* 508 * Standard lock, unlock and islocked functions. 509 */ 510 int 511 vop_stdlock(ap) 512 struct vop_lock1_args /* { 513 struct vnode *a_vp; 514 int a_flags; 515 char *file; 516 int line; 517 } */ *ap; 518 { 519 struct vnode *vp = ap->a_vp; 520 struct mtx *ilk; 521 522 ilk = VI_MTX(vp); 523 return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags, 524 &ilk->lock_object, ap->a_file, ap->a_line)); 525 } 526 527 /* See above. */ 528 int 529 vop_stdunlock(ap) 530 struct vop_unlock_args /* { 531 struct vnode *a_vp; 532 } */ *ap; 533 { 534 struct vnode *vp = ap->a_vp; 535 536 return (lockmgr_unlock(vp->v_vnlock)); 537 } 538 539 /* See above. */ 540 int 541 vop_stdislocked(ap) 542 struct vop_islocked_args /* { 543 struct vnode *a_vp; 544 } */ *ap; 545 { 546 547 return (lockstatus(ap->a_vp->v_vnlock)); 548 } 549 550 /* 551 * Variants of the above set. 552 * 553 * Differences are: 554 * - shared locking disablement is not supported 555 * - v_vnlock pointer is not honored 556 */ 557 int 558 vop_lock(ap) 559 struct vop_lock1_args /* { 560 struct vnode *a_vp; 561 int a_flags; 562 char *file; 563 int line; 564 } */ *ap; 565 { 566 struct vnode *vp = ap->a_vp; 567 int flags = ap->a_flags; 568 struct mtx *ilk; 569 570 MPASS(vp->v_vnlock == &vp->v_lock); 571 572 if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0)) 573 goto other; 574 575 switch (flags & LK_TYPE_MASK) { 576 case LK_SHARED: 577 return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line)); 578 case LK_EXCLUSIVE: 579 return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line)); 580 } 581 other: 582 ilk = VI_MTX(vp); 583 return (lockmgr_lock_flags(&vp->v_lock, flags, 584 &ilk->lock_object, ap->a_file, ap->a_line)); 585 } 586 587 int 588 vop_unlock(ap) 589 struct vop_unlock_args /* { 590 struct vnode *a_vp; 591 } */ *ap; 592 { 593 struct vnode *vp = ap->a_vp; 594 595 MPASS(vp->v_vnlock == &vp->v_lock); 596 597 return (lockmgr_unlock(&vp->v_lock)); 598 } 599 600 int 601 vop_islocked(ap) 602 struct vop_islocked_args /* { 603 struct vnode *a_vp; 604 } */ *ap; 605 { 606 struct vnode *vp = ap->a_vp; 607 608 MPASS(vp->v_vnlock == &vp->v_lock); 609 610 return (lockstatus(&vp->v_lock)); 611 } 612 613 /* 614 * Return true for select/poll. 615 */ 616 int 617 vop_nopoll(ap) 618 struct vop_poll_args /* { 619 struct vnode *a_vp; 620 int a_events; 621 struct ucred *a_cred; 622 struct thread *a_td; 623 } */ *ap; 624 { 625 626 if (ap->a_events & ~POLLSTANDARD) 627 return (POLLNVAL); 628 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 629 } 630 631 /* 632 * Implement poll for local filesystems that support it. 633 */ 634 int 635 vop_stdpoll(ap) 636 struct vop_poll_args /* { 637 struct vnode *a_vp; 638 int a_events; 639 struct ucred *a_cred; 640 struct thread *a_td; 641 } */ *ap; 642 { 643 if (ap->a_events & ~POLLSTANDARD) 644 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 645 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 646 } 647 648 /* 649 * Return our mount point, as we will take charge of the writes. 650 */ 651 int 652 vop_stdgetwritemount(ap) 653 struct vop_getwritemount_args /* { 654 struct vnode *a_vp; 655 struct mount **a_mpp; 656 } */ *ap; 657 { 658 struct mount *mp; 659 struct vnode *vp; 660 661 /* 662 * Note that having a reference does not prevent forced unmount from 663 * setting ->v_mount to NULL after the lock gets released. This is of 664 * no consequence for typical consumers (most notably vn_start_write) 665 * since in this case the vnode is VIRF_DOOMED. Unmount might have 666 * progressed far enough that its completion is only delayed by the 667 * reference obtained here. The consumer only needs to concern itself 668 * with releasing it. 669 */ 670 vp = ap->a_vp; 671 mp = vp->v_mount; 672 if (mp == NULL) { 673 *(ap->a_mpp) = NULL; 674 return (0); 675 } 676 if (vfs_op_thread_enter(mp)) { 677 if (mp == vp->v_mount) { 678 vfs_mp_count_add_pcpu(mp, ref, 1); 679 vfs_op_thread_exit(mp); 680 } else { 681 vfs_op_thread_exit(mp); 682 mp = NULL; 683 } 684 } else { 685 MNT_ILOCK(mp); 686 if (mp == vp->v_mount) { 687 MNT_REF(mp); 688 MNT_IUNLOCK(mp); 689 } else { 690 MNT_IUNLOCK(mp); 691 mp = NULL; 692 } 693 } 694 *(ap->a_mpp) = mp; 695 return (0); 696 } 697 698 /* 699 * If the file system doesn't implement VOP_BMAP, then return sensible defaults: 700 * - Return the vnode's bufobj instead of any underlying device's bufobj 701 * - Calculate the physical block number as if there were equal size 702 * consecutive blocks, but 703 * - Report no contiguous runs of blocks. 704 */ 705 int 706 vop_stdbmap(ap) 707 struct vop_bmap_args /* { 708 struct vnode *a_vp; 709 daddr_t a_bn; 710 struct bufobj **a_bop; 711 daddr_t *a_bnp; 712 int *a_runp; 713 int *a_runb; 714 } */ *ap; 715 { 716 717 if (ap->a_bop != NULL) 718 *ap->a_bop = &ap->a_vp->v_bufobj; 719 if (ap->a_bnp != NULL) 720 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 721 if (ap->a_runp != NULL) 722 *ap->a_runp = 0; 723 if (ap->a_runb != NULL) 724 *ap->a_runb = 0; 725 return (0); 726 } 727 728 int 729 vop_stdfsync(ap) 730 struct vop_fsync_args /* { 731 struct vnode *a_vp; 732 int a_waitfor; 733 struct thread *a_td; 734 } */ *ap; 735 { 736 737 return (vn_fsync_buf(ap->a_vp, ap->a_waitfor)); 738 } 739 740 static int 741 vop_stdfdatasync(struct vop_fdatasync_args *ap) 742 { 743 744 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); 745 } 746 747 int 748 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap) 749 { 750 751 return (vn_fsync_buf(ap->a_vp, MNT_WAIT)); 752 } 753 754 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 755 int 756 vop_stdgetpages(ap) 757 struct vop_getpages_args /* { 758 struct vnode *a_vp; 759 vm_page_t *a_m; 760 int a_count; 761 int *a_rbehind; 762 int *a_rahead; 763 } */ *ap; 764 { 765 766 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 767 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); 768 } 769 770 static int 771 vop_stdgetpages_async(struct vop_getpages_async_args *ap) 772 { 773 int error; 774 775 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, 776 ap->a_rahead); 777 if (ap->a_iodone != NULL) 778 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 779 return (error); 780 } 781 782 int 783 vop_stdkqfilter(struct vop_kqfilter_args *ap) 784 { 785 return vfs_kqfilter(ap); 786 } 787 788 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 789 int 790 vop_stdputpages(ap) 791 struct vop_putpages_args /* { 792 struct vnode *a_vp; 793 vm_page_t *a_m; 794 int a_count; 795 int a_sync; 796 int *a_rtvals; 797 } */ *ap; 798 { 799 800 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 801 ap->a_sync, ap->a_rtvals); 802 } 803 804 int 805 vop_stdvptofh(struct vop_vptofh_args *ap) 806 { 807 return (EOPNOTSUPP); 808 } 809 810 int 811 vop_stdvptocnp(struct vop_vptocnp_args *ap) 812 { 813 struct vnode *vp = ap->a_vp; 814 struct vnode **dvp = ap->a_vpp; 815 struct ucred *cred = ap->a_cred; 816 char *buf = ap->a_buf; 817 size_t *buflen = ap->a_buflen; 818 char *dirbuf, *cpos; 819 int i, error, eofflag, dirbuflen, flags, locked, len, covered; 820 off_t off; 821 ino_t fileno; 822 struct vattr va; 823 struct nameidata nd; 824 struct thread *td; 825 struct dirent *dp; 826 struct vnode *mvp; 827 828 i = *buflen; 829 error = 0; 830 covered = 0; 831 td = curthread; 832 833 if (vp->v_type != VDIR) 834 return (ENOENT); 835 836 error = VOP_GETATTR(vp, &va, cred); 837 if (error) 838 return (error); 839 840 VREF(vp); 841 locked = VOP_ISLOCKED(vp); 842 VOP_UNLOCK(vp); 843 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 844 "..", vp, td); 845 flags = FREAD; 846 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 847 if (error) { 848 vn_lock(vp, locked | LK_RETRY); 849 return (error); 850 } 851 NDFREE(&nd, NDF_ONLY_PNBUF); 852 853 mvp = *dvp = nd.ni_vp; 854 855 if (vp->v_mount != (*dvp)->v_mount && 856 ((*dvp)->v_vflag & VV_ROOT) && 857 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 858 *dvp = (*dvp)->v_mount->mnt_vnodecovered; 859 VREF(mvp); 860 VOP_UNLOCK(mvp); 861 vn_close(mvp, FREAD, cred, td); 862 VREF(*dvp); 863 vn_lock(*dvp, LK_SHARED | LK_RETRY); 864 covered = 1; 865 } 866 867 fileno = va.va_fileid; 868 869 dirbuflen = DEV_BSIZE; 870 if (dirbuflen < va.va_blocksize) 871 dirbuflen = va.va_blocksize; 872 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 873 874 if ((*dvp)->v_type != VDIR) { 875 error = ENOENT; 876 goto out; 877 } 878 879 off = 0; 880 len = 0; 881 do { 882 /* call VOP_READDIR of parent */ 883 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 884 &cpos, &len, &eofflag, td); 885 if (error) 886 goto out; 887 888 if ((dp->d_type != DT_WHT) && 889 (dp->d_fileno == fileno)) { 890 if (covered) { 891 VOP_UNLOCK(*dvp); 892 vn_lock(mvp, LK_SHARED | LK_RETRY); 893 if (dirent_exists(mvp, dp->d_name, td)) { 894 error = ENOENT; 895 VOP_UNLOCK(mvp); 896 vn_lock(*dvp, LK_SHARED | LK_RETRY); 897 goto out; 898 } 899 VOP_UNLOCK(mvp); 900 vn_lock(*dvp, LK_SHARED | LK_RETRY); 901 } 902 i -= dp->d_namlen; 903 904 if (i < 0) { 905 error = ENOMEM; 906 goto out; 907 } 908 if (dp->d_namlen == 1 && dp->d_name[0] == '.') { 909 error = ENOENT; 910 } else { 911 bcopy(dp->d_name, buf + i, dp->d_namlen); 912 error = 0; 913 } 914 goto out; 915 } 916 } while (len > 0 || !eofflag); 917 error = ENOENT; 918 919 out: 920 free(dirbuf, M_TEMP); 921 if (!error) { 922 *buflen = i; 923 vref(*dvp); 924 } 925 if (covered) { 926 vput(*dvp); 927 vrele(mvp); 928 } else { 929 VOP_UNLOCK(mvp); 930 vn_close(mvp, FREAD, cred, td); 931 } 932 vn_lock(vp, locked | LK_RETRY); 933 return (error); 934 } 935 936 int 937 vop_stdallocate(struct vop_allocate_args *ap) 938 { 939 #ifdef __notyet__ 940 struct statfs *sfs; 941 off_t maxfilesize = 0; 942 #endif 943 struct iovec aiov; 944 struct vattr vattr, *vap; 945 struct uio auio; 946 off_t fsize, len, cur, offset; 947 uint8_t *buf; 948 struct thread *td; 949 struct vnode *vp; 950 size_t iosize; 951 int error; 952 953 buf = NULL; 954 error = 0; 955 td = curthread; 956 vap = &vattr; 957 vp = ap->a_vp; 958 len = *ap->a_len; 959 offset = *ap->a_offset; 960 961 error = VOP_GETATTR(vp, vap, td->td_ucred); 962 if (error != 0) 963 goto out; 964 fsize = vap->va_size; 965 iosize = vap->va_blocksize; 966 if (iosize == 0) 967 iosize = BLKDEV_IOSIZE; 968 if (iosize > MAXPHYS) 969 iosize = MAXPHYS; 970 buf = malloc(iosize, M_TEMP, M_WAITOK); 971 972 #ifdef __notyet__ 973 /* 974 * Check if the filesystem sets f_maxfilesize; if not use 975 * VOP_SETATTR to perform the check. 976 */ 977 sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); 978 error = VFS_STATFS(vp->v_mount, sfs, td); 979 if (error == 0) 980 maxfilesize = sfs->f_maxfilesize; 981 free(sfs, M_STATFS); 982 if (error != 0) 983 goto out; 984 if (maxfilesize) { 985 if (offset > maxfilesize || len > maxfilesize || 986 offset + len > maxfilesize) { 987 error = EFBIG; 988 goto out; 989 } 990 } else 991 #endif 992 if (offset + len > vap->va_size) { 993 /* 994 * Test offset + len against the filesystem's maxfilesize. 995 */ 996 VATTR_NULL(vap); 997 vap->va_size = offset + len; 998 error = VOP_SETATTR(vp, vap, td->td_ucred); 999 if (error != 0) 1000 goto out; 1001 VATTR_NULL(vap); 1002 vap->va_size = fsize; 1003 error = VOP_SETATTR(vp, vap, td->td_ucred); 1004 if (error != 0) 1005 goto out; 1006 } 1007 1008 for (;;) { 1009 /* 1010 * Read and write back anything below the nominal file 1011 * size. There's currently no way outside the filesystem 1012 * to know whether this area is sparse or not. 1013 */ 1014 cur = iosize; 1015 if ((offset % iosize) != 0) 1016 cur -= (offset % iosize); 1017 if (cur > len) 1018 cur = len; 1019 if (offset < fsize) { 1020 aiov.iov_base = buf; 1021 aiov.iov_len = cur; 1022 auio.uio_iov = &aiov; 1023 auio.uio_iovcnt = 1; 1024 auio.uio_offset = offset; 1025 auio.uio_resid = cur; 1026 auio.uio_segflg = UIO_SYSSPACE; 1027 auio.uio_rw = UIO_READ; 1028 auio.uio_td = td; 1029 error = VOP_READ(vp, &auio, 0, td->td_ucred); 1030 if (error != 0) 1031 break; 1032 if (auio.uio_resid > 0) { 1033 bzero(buf + cur - auio.uio_resid, 1034 auio.uio_resid); 1035 } 1036 } else { 1037 bzero(buf, cur); 1038 } 1039 1040 aiov.iov_base = buf; 1041 aiov.iov_len = cur; 1042 auio.uio_iov = &aiov; 1043 auio.uio_iovcnt = 1; 1044 auio.uio_offset = offset; 1045 auio.uio_resid = cur; 1046 auio.uio_segflg = UIO_SYSSPACE; 1047 auio.uio_rw = UIO_WRITE; 1048 auio.uio_td = td; 1049 1050 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 1051 if (error != 0) 1052 break; 1053 1054 len -= cur; 1055 offset += cur; 1056 if (len == 0) 1057 break; 1058 if (should_yield()) 1059 break; 1060 } 1061 1062 out: 1063 *ap->a_len = len; 1064 *ap->a_offset = offset; 1065 free(buf, M_TEMP); 1066 return (error); 1067 } 1068 1069 int 1070 vop_stdadvise(struct vop_advise_args *ap) 1071 { 1072 struct vnode *vp; 1073 struct bufobj *bo; 1074 daddr_t startn, endn; 1075 off_t bstart, bend, start, end; 1076 int bsize, error; 1077 1078 vp = ap->a_vp; 1079 switch (ap->a_advice) { 1080 case POSIX_FADV_WILLNEED: 1081 /* 1082 * Do nothing for now. Filesystems should provide a 1083 * custom method which starts an asynchronous read of 1084 * the requested region. 1085 */ 1086 error = 0; 1087 break; 1088 case POSIX_FADV_DONTNEED: 1089 error = 0; 1090 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1091 if (VN_IS_DOOMED(vp)) { 1092 VOP_UNLOCK(vp); 1093 break; 1094 } 1095 1096 /* 1097 * Round to block boundaries (and later possibly further to 1098 * page boundaries). Applications cannot reasonably be aware 1099 * of the boundaries, and the rounding must be to expand at 1100 * both extremities to cover enough. It still doesn't cover 1101 * read-ahead. For partial blocks, this gives unnecessary 1102 * discarding of buffers but is efficient enough since the 1103 * pages usually remain in VMIO for some time. 1104 */ 1105 bsize = vp->v_bufobj.bo_bsize; 1106 bstart = rounddown(ap->a_start, bsize); 1107 bend = roundup(ap->a_end, bsize); 1108 1109 /* 1110 * Deactivate pages in the specified range from the backing VM 1111 * object. Pages that are resident in the buffer cache will 1112 * remain wired until their corresponding buffers are released 1113 * below. 1114 */ 1115 if (vp->v_object != NULL) { 1116 start = trunc_page(bstart); 1117 end = round_page(bend); 1118 VM_OBJECT_RLOCK(vp->v_object); 1119 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), 1120 OFF_TO_IDX(end)); 1121 VM_OBJECT_RUNLOCK(vp->v_object); 1122 } 1123 1124 bo = &vp->v_bufobj; 1125 BO_RLOCK(bo); 1126 startn = bstart / bsize; 1127 endn = bend / bsize; 1128 error = bnoreuselist(&bo->bo_clean, bo, startn, endn); 1129 if (error == 0) 1130 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); 1131 BO_RUNLOCK(bo); 1132 VOP_UNLOCK(vp); 1133 break; 1134 default: 1135 error = EINVAL; 1136 break; 1137 } 1138 return (error); 1139 } 1140 1141 int 1142 vop_stdunp_bind(struct vop_unp_bind_args *ap) 1143 { 1144 1145 ap->a_vp->v_unpcb = ap->a_unpcb; 1146 return (0); 1147 } 1148 1149 int 1150 vop_stdunp_connect(struct vop_unp_connect_args *ap) 1151 { 1152 1153 *ap->a_unpcb = ap->a_vp->v_unpcb; 1154 return (0); 1155 } 1156 1157 int 1158 vop_stdunp_detach(struct vop_unp_detach_args *ap) 1159 { 1160 1161 ap->a_vp->v_unpcb = NULL; 1162 return (0); 1163 } 1164 1165 static int 1166 vop_stdis_text(struct vop_is_text_args *ap) 1167 { 1168 1169 return (ap->a_vp->v_writecount < 0); 1170 } 1171 1172 int 1173 vop_stdset_text(struct vop_set_text_args *ap) 1174 { 1175 struct vnode *vp; 1176 struct mount *mp; 1177 int error; 1178 1179 vp = ap->a_vp; 1180 VI_LOCK(vp); 1181 if (vp->v_writecount > 0) { 1182 error = ETXTBSY; 1183 } else { 1184 /* 1185 * If requested by fs, keep a use reference to the 1186 * vnode until the last text reference is released. 1187 */ 1188 mp = vp->v_mount; 1189 if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 && 1190 vp->v_writecount == 0) { 1191 VNPASS((vp->v_iflag & VI_TEXT_REF) == 0, vp); 1192 vp->v_iflag |= VI_TEXT_REF; 1193 vrefl(vp); 1194 } 1195 1196 vp->v_writecount--; 1197 error = 0; 1198 } 1199 VI_UNLOCK(vp); 1200 return (error); 1201 } 1202 1203 static int 1204 vop_stdunset_text(struct vop_unset_text_args *ap) 1205 { 1206 struct vnode *vp; 1207 int error; 1208 bool last; 1209 1210 vp = ap->a_vp; 1211 last = false; 1212 VI_LOCK(vp); 1213 if (vp->v_writecount < 0) { 1214 if ((vp->v_iflag & VI_TEXT_REF) != 0 && 1215 vp->v_writecount == -1) { 1216 last = true; 1217 vp->v_iflag &= ~VI_TEXT_REF; 1218 } 1219 vp->v_writecount++; 1220 error = 0; 1221 } else { 1222 error = EINVAL; 1223 } 1224 VI_UNLOCK(vp); 1225 if (last) 1226 vunref(vp); 1227 return (error); 1228 } 1229 1230 static int 1231 vop_stdadd_writecount(struct vop_add_writecount_args *ap) 1232 { 1233 struct vnode *vp; 1234 struct mount *mp; 1235 int error; 1236 1237 vp = ap->a_vp; 1238 VI_LOCK_FLAGS(vp, MTX_DUPOK); 1239 if (vp->v_writecount < 0) { 1240 error = ETXTBSY; 1241 } else { 1242 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, 1243 ("neg writecount increment %d", ap->a_inc)); 1244 if (vp->v_writecount == 0) { 1245 mp = vp->v_mount; 1246 if (mp != NULL && (mp->mnt_kern_flag & MNTK_NOMSYNC) == 0) 1247 vlazy(vp); 1248 } 1249 vp->v_writecount += ap->a_inc; 1250 error = 0; 1251 } 1252 VI_UNLOCK(vp); 1253 return (error); 1254 } 1255 1256 int 1257 vop_stdneed_inactive(struct vop_need_inactive_args *ap) 1258 { 1259 1260 return (1); 1261 } 1262 1263 int 1264 vop_stdioctl(struct vop_ioctl_args *ap) 1265 { 1266 struct vnode *vp; 1267 struct vattr va; 1268 off_t *offp; 1269 int error; 1270 1271 switch (ap->a_command) { 1272 case FIOSEEKDATA: 1273 case FIOSEEKHOLE: 1274 vp = ap->a_vp; 1275 error = vn_lock(vp, LK_SHARED); 1276 if (error != 0) 1277 return (EBADF); 1278 if (vp->v_type == VREG) 1279 error = VOP_GETATTR(vp, &va, ap->a_cred); 1280 else 1281 error = ENOTTY; 1282 if (error == 0) { 1283 offp = ap->a_data; 1284 if (*offp < 0 || *offp >= va.va_size) 1285 error = ENXIO; 1286 else if (ap->a_command == FIOSEEKHOLE) 1287 *offp = va.va_size; 1288 } 1289 VOP_UNLOCK(vp); 1290 break; 1291 default: 1292 error = ENOTTY; 1293 break; 1294 } 1295 return (error); 1296 } 1297 1298 /* 1299 * vfs default ops 1300 * used to fill the vfs function table to get reasonable default return values. 1301 */ 1302 int 1303 vfs_stdroot (mp, flags, vpp) 1304 struct mount *mp; 1305 int flags; 1306 struct vnode **vpp; 1307 { 1308 1309 return (EOPNOTSUPP); 1310 } 1311 1312 int 1313 vfs_stdstatfs (mp, sbp) 1314 struct mount *mp; 1315 struct statfs *sbp; 1316 { 1317 1318 return (EOPNOTSUPP); 1319 } 1320 1321 int 1322 vfs_stdquotactl (mp, cmds, uid, arg) 1323 struct mount *mp; 1324 int cmds; 1325 uid_t uid; 1326 void *arg; 1327 { 1328 1329 return (EOPNOTSUPP); 1330 } 1331 1332 int 1333 vfs_stdsync(mp, waitfor) 1334 struct mount *mp; 1335 int waitfor; 1336 { 1337 struct vnode *vp, *mvp; 1338 struct thread *td; 1339 int error, lockreq, allerror = 0; 1340 1341 td = curthread; 1342 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 1343 if (waitfor != MNT_WAIT) 1344 lockreq |= LK_NOWAIT; 1345 /* 1346 * Force stale buffer cache information to be flushed. 1347 */ 1348 loop: 1349 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1350 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1351 VI_UNLOCK(vp); 1352 continue; 1353 } 1354 if ((error = vget(vp, lockreq)) != 0) { 1355 if (error == ENOENT) { 1356 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1357 goto loop; 1358 } 1359 continue; 1360 } 1361 error = VOP_FSYNC(vp, waitfor, td); 1362 if (error) 1363 allerror = error; 1364 vput(vp); 1365 } 1366 return (allerror); 1367 } 1368 1369 int 1370 vfs_stdnosync (mp, waitfor) 1371 struct mount *mp; 1372 int waitfor; 1373 { 1374 1375 return (0); 1376 } 1377 1378 static int 1379 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap) 1380 { 1381 int error; 1382 1383 error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp, 1384 ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred, 1385 ap->a_outcred, ap->a_fsizetd); 1386 return (error); 1387 } 1388 1389 int 1390 vfs_stdvget (mp, ino, flags, vpp) 1391 struct mount *mp; 1392 ino_t ino; 1393 int flags; 1394 struct vnode **vpp; 1395 { 1396 1397 return (EOPNOTSUPP); 1398 } 1399 1400 int 1401 vfs_stdfhtovp (mp, fhp, flags, vpp) 1402 struct mount *mp; 1403 struct fid *fhp; 1404 int flags; 1405 struct vnode **vpp; 1406 { 1407 1408 return (EOPNOTSUPP); 1409 } 1410 1411 int 1412 vfs_stdinit (vfsp) 1413 struct vfsconf *vfsp; 1414 { 1415 1416 return (0); 1417 } 1418 1419 int 1420 vfs_stduninit (vfsp) 1421 struct vfsconf *vfsp; 1422 { 1423 1424 return(0); 1425 } 1426 1427 int 1428 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 1429 struct mount *mp; 1430 int cmd; 1431 struct vnode *filename_vp; 1432 int attrnamespace; 1433 const char *attrname; 1434 { 1435 1436 if (filename_vp != NULL) 1437 VOP_UNLOCK(filename_vp); 1438 return (EOPNOTSUPP); 1439 } 1440 1441 int 1442 vfs_stdsysctl(mp, op, req) 1443 struct mount *mp; 1444 fsctlop_t op; 1445 struct sysctl_req *req; 1446 { 1447 1448 return (EOPNOTSUPP); 1449 } 1450 1451 static vop_bypass_t * 1452 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a) 1453 { 1454 1455 return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset)); 1456 } 1457 1458 int 1459 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a) 1460 { 1461 vop_bypass_t *bp; 1462 int prev_stops, rc; 1463 1464 bp = bp_by_off(vop, a); 1465 MPASS(bp != NULL); 1466 1467 prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); 1468 rc = bp(a); 1469 sigallowstop(prev_stops); 1470 return (rc); 1471 } 1472 1473 static int 1474 vop_stdstat(struct vop_stat_args *a) 1475 { 1476 struct vattr vattr; 1477 struct vattr *vap; 1478 struct vnode *vp; 1479 struct stat *sb; 1480 int error; 1481 u_short mode; 1482 1483 vp = a->a_vp; 1484 sb = a->a_sb; 1485 1486 error = vop_stat_helper_pre(a); 1487 if (error != 0) 1488 return (error); 1489 1490 vap = &vattr; 1491 1492 /* 1493 * Initialize defaults for new and unusual fields, so that file 1494 * systems which don't support these fields don't need to know 1495 * about them. 1496 */ 1497 vap->va_birthtime.tv_sec = -1; 1498 vap->va_birthtime.tv_nsec = 0; 1499 vap->va_fsid = VNOVAL; 1500 vap->va_rdev = NODEV; 1501 1502 error = VOP_GETATTR(vp, vap, a->a_active_cred); 1503 if (error) 1504 goto out; 1505 1506 /* 1507 * Zero the spare stat fields 1508 */ 1509 bzero(sb, sizeof *sb); 1510 1511 /* 1512 * Copy from vattr table 1513 */ 1514 if (vap->va_fsid != VNOVAL) 1515 sb->st_dev = vap->va_fsid; 1516 else 1517 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1518 sb->st_ino = vap->va_fileid; 1519 mode = vap->va_mode; 1520 switch (vap->va_type) { 1521 case VREG: 1522 mode |= S_IFREG; 1523 break; 1524 case VDIR: 1525 mode |= S_IFDIR; 1526 break; 1527 case VBLK: 1528 mode |= S_IFBLK; 1529 break; 1530 case VCHR: 1531 mode |= S_IFCHR; 1532 break; 1533 case VLNK: 1534 mode |= S_IFLNK; 1535 break; 1536 case VSOCK: 1537 mode |= S_IFSOCK; 1538 break; 1539 case VFIFO: 1540 mode |= S_IFIFO; 1541 break; 1542 default: 1543 error = EBADF; 1544 goto out; 1545 } 1546 sb->st_mode = mode; 1547 sb->st_nlink = vap->va_nlink; 1548 sb->st_uid = vap->va_uid; 1549 sb->st_gid = vap->va_gid; 1550 sb->st_rdev = vap->va_rdev; 1551 if (vap->va_size > OFF_MAX) { 1552 error = EOVERFLOW; 1553 goto out; 1554 } 1555 sb->st_size = vap->va_size; 1556 sb->st_atim.tv_sec = vap->va_atime.tv_sec; 1557 sb->st_atim.tv_nsec = vap->va_atime.tv_nsec; 1558 sb->st_mtim.tv_sec = vap->va_mtime.tv_sec; 1559 sb->st_mtim.tv_nsec = vap->va_mtime.tv_nsec; 1560 sb->st_ctim.tv_sec = vap->va_ctime.tv_sec; 1561 sb->st_ctim.tv_nsec = vap->va_ctime.tv_nsec; 1562 sb->st_birthtim.tv_sec = vap->va_birthtime.tv_sec; 1563 sb->st_birthtim.tv_nsec = vap->va_birthtime.tv_nsec; 1564 1565 /* 1566 * According to www.opengroup.org, the meaning of st_blksize is 1567 * "a filesystem-specific preferred I/O block size for this 1568 * object. In some filesystem types, this may vary from file 1569 * to file" 1570 * Use minimum/default of PAGE_SIZE (e.g. for VCHR). 1571 */ 1572 1573 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1574 sb->st_flags = vap->va_flags; 1575 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1576 sb->st_gen = vap->va_gen; 1577 out: 1578 return (vop_stat_helper_post(a, error)); 1579 } 1580 1581 static int 1582 vop_stdread_pgcache(struct vop_read_pgcache_args *ap __unused) 1583 { 1584 return (EJUSTRETURN); 1585 } 1586