1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed 8 * to Berkeley by John Heidemann of the UCLA Ficus project. 9 * 10 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bio.h> 43 #include <sys/buf.h> 44 #include <sys/conf.h> 45 #include <sys/event.h> 46 #include <sys/filio.h> 47 #include <sys/kernel.h> 48 #include <sys/limits.h> 49 #include <sys/lock.h> 50 #include <sys/lockf.h> 51 #include <sys/malloc.h> 52 #include <sys/mount.h> 53 #include <sys/namei.h> 54 #include <sys/rwlock.h> 55 #include <sys/fcntl.h> 56 #include <sys/unistd.h> 57 #include <sys/vnode.h> 58 #include <sys/dirent.h> 59 #include <sys/poll.h> 60 #include <sys/stat.h> 61 #include <security/audit/audit.h> 62 #include <sys/priv.h> 63 64 #include <security/mac/mac_framework.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_extern.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_pager.h> 73 #include <vm/vnode_pager.h> 74 75 static int vop_nolookup(struct vop_lookup_args *); 76 static int vop_norename(struct vop_rename_args *); 77 static int vop_nostrategy(struct vop_strategy_args *); 78 static int get_next_dirent(struct vnode *vp, struct dirent **dpp, 79 char *dirbuf, int dirbuflen, off_t *off, 80 char **cpos, int *len, int *eofflag, 81 struct thread *td); 82 static int dirent_exists(struct vnode *vp, const char *dirname, 83 struct thread *td); 84 85 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 86 87 static int vop_stdis_text(struct vop_is_text_args *ap); 88 static int vop_stdunset_text(struct vop_unset_text_args *ap); 89 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); 90 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap); 91 static int vop_stdfdatasync(struct vop_fdatasync_args *ap); 92 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); 93 static int vop_stdread_pgcache(struct vop_read_pgcache_args *ap); 94 static int vop_stdstat(struct vop_stat_args *ap); 95 96 /* 97 * This vnode table stores what we want to do if the filesystem doesn't 98 * implement a particular VOP. 99 * 100 * If there is no specific entry here, we will return EOPNOTSUPP. 101 * 102 * Note that every filesystem has to implement either vop_access 103 * or vop_accessx; failing to do so will result in immediate crash 104 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 105 * which calls vop_stdaccess() etc. 106 */ 107 108 struct vop_vector default_vnodeops = { 109 .vop_default = NULL, 110 .vop_bypass = VOP_EOPNOTSUPP, 111 112 .vop_access = vop_stdaccess, 113 .vop_accessx = vop_stdaccessx, 114 .vop_advise = vop_stdadvise, 115 .vop_advlock = vop_stdadvlock, 116 .vop_advlockasync = vop_stdadvlockasync, 117 .vop_advlockpurge = vop_stdadvlockpurge, 118 .vop_allocate = vop_stdallocate, 119 .vop_bmap = vop_stdbmap, 120 .vop_close = VOP_NULL, 121 .vop_fsync = VOP_NULL, 122 .vop_stat = vop_stdstat, 123 .vop_fdatasync = vop_stdfdatasync, 124 .vop_getpages = vop_stdgetpages, 125 .vop_getpages_async = vop_stdgetpages_async, 126 .vop_getwritemount = vop_stdgetwritemount, 127 .vop_inactive = VOP_NULL, 128 .vop_need_inactive = vop_stdneed_inactive, 129 .vop_ioctl = vop_stdioctl, 130 .vop_kqfilter = vop_stdkqfilter, 131 .vop_islocked = vop_stdislocked, 132 .vop_lock1 = vop_stdlock, 133 .vop_lookup = vop_nolookup, 134 .vop_open = VOP_NULL, 135 .vop_pathconf = VOP_EINVAL, 136 .vop_poll = vop_nopoll, 137 .vop_putpages = vop_stdputpages, 138 .vop_readlink = VOP_EINVAL, 139 .vop_read_pgcache = vop_stdread_pgcache, 140 .vop_rename = vop_norename, 141 .vop_revoke = VOP_PANIC, 142 .vop_strategy = vop_nostrategy, 143 .vop_unlock = vop_stdunlock, 144 .vop_vptocnp = vop_stdvptocnp, 145 .vop_vptofh = vop_stdvptofh, 146 .vop_unp_bind = vop_stdunp_bind, 147 .vop_unp_connect = vop_stdunp_connect, 148 .vop_unp_detach = vop_stdunp_detach, 149 .vop_is_text = vop_stdis_text, 150 .vop_set_text = vop_stdset_text, 151 .vop_unset_text = vop_stdunset_text, 152 .vop_add_writecount = vop_stdadd_writecount, 153 .vop_copy_file_range = vop_stdcopy_file_range, 154 }; 155 VFS_VOP_VECTOR_REGISTER(default_vnodeops); 156 157 /* 158 * Series of placeholder functions for various error returns for 159 * VOPs. 160 */ 161 162 int 163 vop_eopnotsupp(struct vop_generic_args *ap) 164 { 165 /* 166 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 167 */ 168 169 return (EOPNOTSUPP); 170 } 171 172 int 173 vop_ebadf(struct vop_generic_args *ap) 174 { 175 176 return (EBADF); 177 } 178 179 int 180 vop_enotty(struct vop_generic_args *ap) 181 { 182 183 return (ENOTTY); 184 } 185 186 int 187 vop_einval(struct vop_generic_args *ap) 188 { 189 190 return (EINVAL); 191 } 192 193 int 194 vop_enoent(struct vop_generic_args *ap) 195 { 196 197 return (ENOENT); 198 } 199 200 int 201 vop_eagain(struct vop_generic_args *ap) 202 { 203 204 return (EAGAIN); 205 } 206 207 int 208 vop_null(struct vop_generic_args *ap) 209 { 210 211 return (0); 212 } 213 214 /* 215 * Helper function to panic on some bad VOPs in some filesystems. 216 */ 217 int 218 vop_panic(struct vop_generic_args *ap) 219 { 220 221 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 222 } 223 224 /* 225 * vop_std<something> and vop_no<something> are default functions for use by 226 * filesystems that need the "default reasonable" implementation for a 227 * particular operation. 228 * 229 * The documentation for the operations they implement exists (if it exists) 230 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 231 */ 232 233 /* 234 * Default vop for filesystems that do not support name lookup 235 */ 236 static int 237 vop_nolookup(ap) 238 struct vop_lookup_args /* { 239 struct vnode *a_dvp; 240 struct vnode **a_vpp; 241 struct componentname *a_cnp; 242 } */ *ap; 243 { 244 245 *ap->a_vpp = NULL; 246 return (ENOTDIR); 247 } 248 249 /* 250 * vop_norename: 251 * 252 * Handle unlock and reference counting for arguments of vop_rename 253 * for filesystems that do not implement rename operation. 254 */ 255 static int 256 vop_norename(struct vop_rename_args *ap) 257 { 258 259 vop_rename_fail(ap); 260 return (EOPNOTSUPP); 261 } 262 263 /* 264 * vop_nostrategy: 265 * 266 * Strategy routine for VFS devices that have none. 267 * 268 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 269 * routine. Typically this is done for a BIO_READ strategy call. 270 * Typically B_INVAL is assumed to already be clear prior to a write 271 * and should not be cleared manually unless you just made the buffer 272 * invalid. BIO_ERROR should be cleared either way. 273 */ 274 275 static int 276 vop_nostrategy (struct vop_strategy_args *ap) 277 { 278 printf("No strategy for buffer at %p\n", ap->a_bp); 279 vn_printf(ap->a_vp, "vnode "); 280 ap->a_bp->b_ioflags |= BIO_ERROR; 281 ap->a_bp->b_error = EOPNOTSUPP; 282 bufdone(ap->a_bp); 283 return (EOPNOTSUPP); 284 } 285 286 static int 287 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 288 int dirbuflen, off_t *off, char **cpos, int *len, 289 int *eofflag, struct thread *td) 290 { 291 int error, reclen; 292 struct uio uio; 293 struct iovec iov; 294 struct dirent *dp; 295 296 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 297 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 298 299 if (*len == 0) { 300 iov.iov_base = dirbuf; 301 iov.iov_len = dirbuflen; 302 303 uio.uio_iov = &iov; 304 uio.uio_iovcnt = 1; 305 uio.uio_offset = *off; 306 uio.uio_resid = dirbuflen; 307 uio.uio_segflg = UIO_SYSSPACE; 308 uio.uio_rw = UIO_READ; 309 uio.uio_td = td; 310 311 *eofflag = 0; 312 313 #ifdef MAC 314 error = mac_vnode_check_readdir(td->td_ucred, vp); 315 if (error == 0) 316 #endif 317 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 318 NULL, NULL); 319 if (error) 320 return (error); 321 322 *off = uio.uio_offset; 323 324 *cpos = dirbuf; 325 *len = (dirbuflen - uio.uio_resid); 326 327 if (*len == 0) 328 return (ENOENT); 329 } 330 331 dp = (struct dirent *)(*cpos); 332 reclen = dp->d_reclen; 333 *dpp = dp; 334 335 /* check for malformed directory.. */ 336 if (reclen < DIRENT_MINSIZE) 337 return (EINVAL); 338 339 *cpos += reclen; 340 *len -= reclen; 341 342 return (0); 343 } 344 345 /* 346 * Check if a named file exists in a given directory vnode. 347 */ 348 static int 349 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 350 { 351 char *dirbuf, *cpos; 352 int error, eofflag, dirbuflen, len, found; 353 off_t off; 354 struct dirent *dp; 355 struct vattr va; 356 357 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 358 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 359 360 found = 0; 361 362 error = VOP_GETATTR(vp, &va, td->td_ucred); 363 if (error) 364 return (found); 365 366 dirbuflen = DEV_BSIZE; 367 if (dirbuflen < va.va_blocksize) 368 dirbuflen = va.va_blocksize; 369 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 370 371 off = 0; 372 len = 0; 373 do { 374 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 375 &cpos, &len, &eofflag, td); 376 if (error) 377 goto out; 378 379 if (dp->d_type != DT_WHT && dp->d_fileno != 0 && 380 strcmp(dp->d_name, dirname) == 0) { 381 found = 1; 382 goto out; 383 } 384 } while (len > 0 || !eofflag); 385 386 out: 387 free(dirbuf, M_TEMP); 388 return (found); 389 } 390 391 int 392 vop_stdaccess(struct vop_access_args *ap) 393 { 394 395 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 396 VAPPEND)) == 0, ("invalid bit in accmode")); 397 398 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 399 } 400 401 int 402 vop_stdaccessx(struct vop_accessx_args *ap) 403 { 404 int error; 405 accmode_t accmode = ap->a_accmode; 406 407 error = vfs_unixify_accmode(&accmode); 408 if (error != 0) 409 return (error); 410 411 if (accmode == 0) 412 return (0); 413 414 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 415 } 416 417 /* 418 * Advisory record locking support 419 */ 420 int 421 vop_stdadvlock(struct vop_advlock_args *ap) 422 { 423 struct vnode *vp; 424 struct vattr vattr; 425 int error; 426 427 vp = ap->a_vp; 428 if (ap->a_fl->l_whence == SEEK_END) { 429 /* 430 * The NFSv4 server must avoid doing a vn_lock() here, since it 431 * can deadlock the nfsd threads, due to a LOR. Fortunately 432 * the NFSv4 server always uses SEEK_SET and this code is 433 * only required for the SEEK_END case. 434 */ 435 vn_lock(vp, LK_SHARED | LK_RETRY); 436 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 437 VOP_UNLOCK(vp); 438 if (error) 439 return (error); 440 } else 441 vattr.va_size = 0; 442 443 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 444 } 445 446 int 447 vop_stdadvlockasync(struct vop_advlockasync_args *ap) 448 { 449 struct vnode *vp; 450 struct vattr vattr; 451 int error; 452 453 vp = ap->a_vp; 454 if (ap->a_fl->l_whence == SEEK_END) { 455 /* The size argument is only needed for SEEK_END. */ 456 vn_lock(vp, LK_SHARED | LK_RETRY); 457 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 458 VOP_UNLOCK(vp); 459 if (error) 460 return (error); 461 } else 462 vattr.va_size = 0; 463 464 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 465 } 466 467 int 468 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap) 469 { 470 struct vnode *vp; 471 472 vp = ap->a_vp; 473 lf_purgelocks(vp, &vp->v_lockf); 474 return (0); 475 } 476 477 /* 478 * vop_stdpathconf: 479 * 480 * Standard implementation of POSIX pathconf, to get information about limits 481 * for a filesystem. 482 * Override per filesystem for the case where the filesystem has smaller 483 * limits. 484 */ 485 int 486 vop_stdpathconf(ap) 487 struct vop_pathconf_args /* { 488 struct vnode *a_vp; 489 int a_name; 490 int *a_retval; 491 } */ *ap; 492 { 493 494 switch (ap->a_name) { 495 case _PC_ASYNC_IO: 496 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; 497 return (0); 498 case _PC_PATH_MAX: 499 *ap->a_retval = PATH_MAX; 500 return (0); 501 case _PC_ACL_EXTENDED: 502 case _PC_ACL_NFS4: 503 case _PC_CAP_PRESENT: 504 case _PC_INF_PRESENT: 505 case _PC_MAC_PRESENT: 506 *ap->a_retval = 0; 507 return (0); 508 default: 509 return (EINVAL); 510 } 511 /* NOTREACHED */ 512 } 513 514 /* 515 * Standard lock, unlock and islocked functions. 516 */ 517 int 518 vop_stdlock(ap) 519 struct vop_lock1_args /* { 520 struct vnode *a_vp; 521 int a_flags; 522 char *file; 523 int line; 524 } */ *ap; 525 { 526 struct vnode *vp = ap->a_vp; 527 struct mtx *ilk; 528 529 ilk = VI_MTX(vp); 530 return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags, 531 &ilk->lock_object, ap->a_file, ap->a_line)); 532 } 533 534 /* See above. */ 535 int 536 vop_stdunlock(ap) 537 struct vop_unlock_args /* { 538 struct vnode *a_vp; 539 } */ *ap; 540 { 541 struct vnode *vp = ap->a_vp; 542 543 return (lockmgr_unlock(vp->v_vnlock)); 544 } 545 546 /* See above. */ 547 int 548 vop_stdislocked(ap) 549 struct vop_islocked_args /* { 550 struct vnode *a_vp; 551 } */ *ap; 552 { 553 554 return (lockstatus(ap->a_vp->v_vnlock)); 555 } 556 557 /* 558 * Variants of the above set. 559 * 560 * Differences are: 561 * - shared locking disablement is not supported 562 * - v_vnlock pointer is not honored 563 */ 564 int 565 vop_lock(ap) 566 struct vop_lock1_args /* { 567 struct vnode *a_vp; 568 int a_flags; 569 char *file; 570 int line; 571 } */ *ap; 572 { 573 struct vnode *vp = ap->a_vp; 574 int flags = ap->a_flags; 575 struct mtx *ilk; 576 577 MPASS(vp->v_vnlock == &vp->v_lock); 578 579 if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0)) 580 goto other; 581 582 switch (flags & LK_TYPE_MASK) { 583 case LK_SHARED: 584 return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line)); 585 case LK_EXCLUSIVE: 586 return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line)); 587 } 588 other: 589 ilk = VI_MTX(vp); 590 return (lockmgr_lock_flags(&vp->v_lock, flags, 591 &ilk->lock_object, ap->a_file, ap->a_line)); 592 } 593 594 int 595 vop_unlock(ap) 596 struct vop_unlock_args /* { 597 struct vnode *a_vp; 598 } */ *ap; 599 { 600 struct vnode *vp = ap->a_vp; 601 602 MPASS(vp->v_vnlock == &vp->v_lock); 603 604 return (lockmgr_unlock(&vp->v_lock)); 605 } 606 607 int 608 vop_islocked(ap) 609 struct vop_islocked_args /* { 610 struct vnode *a_vp; 611 } */ *ap; 612 { 613 struct vnode *vp = ap->a_vp; 614 615 MPASS(vp->v_vnlock == &vp->v_lock); 616 617 return (lockstatus(&vp->v_lock)); 618 } 619 620 /* 621 * Return true for select/poll. 622 */ 623 int 624 vop_nopoll(ap) 625 struct vop_poll_args /* { 626 struct vnode *a_vp; 627 int a_events; 628 struct ucred *a_cred; 629 struct thread *a_td; 630 } */ *ap; 631 { 632 633 if (ap->a_events & ~POLLSTANDARD) 634 return (POLLNVAL); 635 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 636 } 637 638 /* 639 * Implement poll for local filesystems that support it. 640 */ 641 int 642 vop_stdpoll(ap) 643 struct vop_poll_args /* { 644 struct vnode *a_vp; 645 int a_events; 646 struct ucred *a_cred; 647 struct thread *a_td; 648 } */ *ap; 649 { 650 if (ap->a_events & ~POLLSTANDARD) 651 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 652 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 653 } 654 655 /* 656 * Return our mount point, as we will take charge of the writes. 657 */ 658 int 659 vop_stdgetwritemount(ap) 660 struct vop_getwritemount_args /* { 661 struct vnode *a_vp; 662 struct mount **a_mpp; 663 } */ *ap; 664 { 665 struct mount *mp; 666 struct vnode *vp; 667 668 /* 669 * Note that having a reference does not prevent forced unmount from 670 * setting ->v_mount to NULL after the lock gets released. This is of 671 * no consequence for typical consumers (most notably vn_start_write) 672 * since in this case the vnode is VIRF_DOOMED. Unmount might have 673 * progressed far enough that its completion is only delayed by the 674 * reference obtained here. The consumer only needs to concern itself 675 * with releasing it. 676 */ 677 vp = ap->a_vp; 678 mp = vp->v_mount; 679 if (mp == NULL) { 680 *(ap->a_mpp) = NULL; 681 return (0); 682 } 683 if (vfs_op_thread_enter(mp)) { 684 if (mp == vp->v_mount) { 685 vfs_mp_count_add_pcpu(mp, ref, 1); 686 vfs_op_thread_exit(mp); 687 } else { 688 vfs_op_thread_exit(mp); 689 mp = NULL; 690 } 691 } else { 692 MNT_ILOCK(mp); 693 if (mp == vp->v_mount) { 694 MNT_REF(mp); 695 MNT_IUNLOCK(mp); 696 } else { 697 MNT_IUNLOCK(mp); 698 mp = NULL; 699 } 700 } 701 *(ap->a_mpp) = mp; 702 return (0); 703 } 704 705 /* 706 * If the file system doesn't implement VOP_BMAP, then return sensible defaults: 707 * - Return the vnode's bufobj instead of any underlying device's bufobj 708 * - Calculate the physical block number as if there were equal size 709 * consecutive blocks, but 710 * - Report no contiguous runs of blocks. 711 */ 712 int 713 vop_stdbmap(ap) 714 struct vop_bmap_args /* { 715 struct vnode *a_vp; 716 daddr_t a_bn; 717 struct bufobj **a_bop; 718 daddr_t *a_bnp; 719 int *a_runp; 720 int *a_runb; 721 } */ *ap; 722 { 723 724 if (ap->a_bop != NULL) 725 *ap->a_bop = &ap->a_vp->v_bufobj; 726 if (ap->a_bnp != NULL) 727 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 728 if (ap->a_runp != NULL) 729 *ap->a_runp = 0; 730 if (ap->a_runb != NULL) 731 *ap->a_runb = 0; 732 return (0); 733 } 734 735 int 736 vop_stdfsync(ap) 737 struct vop_fsync_args /* { 738 struct vnode *a_vp; 739 int a_waitfor; 740 struct thread *a_td; 741 } */ *ap; 742 { 743 744 return (vn_fsync_buf(ap->a_vp, ap->a_waitfor)); 745 } 746 747 static int 748 vop_stdfdatasync(struct vop_fdatasync_args *ap) 749 { 750 751 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); 752 } 753 754 int 755 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap) 756 { 757 758 return (vn_fsync_buf(ap->a_vp, MNT_WAIT)); 759 } 760 761 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 762 int 763 vop_stdgetpages(ap) 764 struct vop_getpages_args /* { 765 struct vnode *a_vp; 766 vm_page_t *a_m; 767 int a_count; 768 int *a_rbehind; 769 int *a_rahead; 770 } */ *ap; 771 { 772 773 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 774 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); 775 } 776 777 static int 778 vop_stdgetpages_async(struct vop_getpages_async_args *ap) 779 { 780 int error; 781 782 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, 783 ap->a_rahead); 784 if (ap->a_iodone != NULL) 785 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 786 return (error); 787 } 788 789 int 790 vop_stdkqfilter(struct vop_kqfilter_args *ap) 791 { 792 return vfs_kqfilter(ap); 793 } 794 795 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 796 int 797 vop_stdputpages(ap) 798 struct vop_putpages_args /* { 799 struct vnode *a_vp; 800 vm_page_t *a_m; 801 int a_count; 802 int a_sync; 803 int *a_rtvals; 804 } */ *ap; 805 { 806 807 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 808 ap->a_sync, ap->a_rtvals); 809 } 810 811 int 812 vop_stdvptofh(struct vop_vptofh_args *ap) 813 { 814 return (EOPNOTSUPP); 815 } 816 817 int 818 vop_stdvptocnp(struct vop_vptocnp_args *ap) 819 { 820 struct vnode *vp = ap->a_vp; 821 struct vnode **dvp = ap->a_vpp; 822 struct ucred *cred; 823 char *buf = ap->a_buf; 824 size_t *buflen = ap->a_buflen; 825 char *dirbuf, *cpos; 826 int i, error, eofflag, dirbuflen, flags, locked, len, covered; 827 off_t off; 828 ino_t fileno; 829 struct vattr va; 830 struct nameidata nd; 831 struct thread *td; 832 struct dirent *dp; 833 struct vnode *mvp; 834 835 i = *buflen; 836 error = 0; 837 covered = 0; 838 td = curthread; 839 cred = td->td_ucred; 840 841 if (vp->v_type != VDIR) 842 return (ENOENT); 843 844 error = VOP_GETATTR(vp, &va, cred); 845 if (error) 846 return (error); 847 848 VREF(vp); 849 locked = VOP_ISLOCKED(vp); 850 VOP_UNLOCK(vp); 851 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 852 "..", vp, td); 853 flags = FREAD; 854 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 855 if (error) { 856 vn_lock(vp, locked | LK_RETRY); 857 return (error); 858 } 859 NDFREE(&nd, NDF_ONLY_PNBUF); 860 861 mvp = *dvp = nd.ni_vp; 862 863 if (vp->v_mount != (*dvp)->v_mount && 864 ((*dvp)->v_vflag & VV_ROOT) && 865 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 866 *dvp = (*dvp)->v_mount->mnt_vnodecovered; 867 VREF(mvp); 868 VOP_UNLOCK(mvp); 869 vn_close(mvp, FREAD, cred, td); 870 VREF(*dvp); 871 vn_lock(*dvp, LK_SHARED | LK_RETRY); 872 covered = 1; 873 } 874 875 fileno = va.va_fileid; 876 877 dirbuflen = DEV_BSIZE; 878 if (dirbuflen < va.va_blocksize) 879 dirbuflen = va.va_blocksize; 880 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 881 882 if ((*dvp)->v_type != VDIR) { 883 error = ENOENT; 884 goto out; 885 } 886 887 off = 0; 888 len = 0; 889 do { 890 /* call VOP_READDIR of parent */ 891 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 892 &cpos, &len, &eofflag, td); 893 if (error) 894 goto out; 895 896 if ((dp->d_type != DT_WHT) && 897 (dp->d_fileno == fileno)) { 898 if (covered) { 899 VOP_UNLOCK(*dvp); 900 vn_lock(mvp, LK_SHARED | LK_RETRY); 901 if (dirent_exists(mvp, dp->d_name, td)) { 902 error = ENOENT; 903 VOP_UNLOCK(mvp); 904 vn_lock(*dvp, LK_SHARED | LK_RETRY); 905 goto out; 906 } 907 VOP_UNLOCK(mvp); 908 vn_lock(*dvp, LK_SHARED | LK_RETRY); 909 } 910 i -= dp->d_namlen; 911 912 if (i < 0) { 913 error = ENOMEM; 914 goto out; 915 } 916 if (dp->d_namlen == 1 && dp->d_name[0] == '.') { 917 error = ENOENT; 918 } else { 919 bcopy(dp->d_name, buf + i, dp->d_namlen); 920 error = 0; 921 } 922 goto out; 923 } 924 } while (len > 0 || !eofflag); 925 error = ENOENT; 926 927 out: 928 free(dirbuf, M_TEMP); 929 if (!error) { 930 *buflen = i; 931 vref(*dvp); 932 } 933 if (covered) { 934 vput(*dvp); 935 vrele(mvp); 936 } else { 937 VOP_UNLOCK(mvp); 938 vn_close(mvp, FREAD, cred, td); 939 } 940 vn_lock(vp, locked | LK_RETRY); 941 return (error); 942 } 943 944 int 945 vop_stdallocate(struct vop_allocate_args *ap) 946 { 947 #ifdef __notyet__ 948 struct statfs *sfs; 949 off_t maxfilesize = 0; 950 #endif 951 struct iovec aiov; 952 struct vattr vattr, *vap; 953 struct uio auio; 954 off_t fsize, len, cur, offset; 955 uint8_t *buf; 956 struct thread *td; 957 struct vnode *vp; 958 size_t iosize; 959 int error; 960 961 buf = NULL; 962 error = 0; 963 td = curthread; 964 vap = &vattr; 965 vp = ap->a_vp; 966 len = *ap->a_len; 967 offset = *ap->a_offset; 968 969 error = VOP_GETATTR(vp, vap, td->td_ucred); 970 if (error != 0) 971 goto out; 972 fsize = vap->va_size; 973 iosize = vap->va_blocksize; 974 if (iosize == 0) 975 iosize = BLKDEV_IOSIZE; 976 if (iosize > MAXPHYS) 977 iosize = MAXPHYS; 978 buf = malloc(iosize, M_TEMP, M_WAITOK); 979 980 #ifdef __notyet__ 981 /* 982 * Check if the filesystem sets f_maxfilesize; if not use 983 * VOP_SETATTR to perform the check. 984 */ 985 sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); 986 error = VFS_STATFS(vp->v_mount, sfs, td); 987 if (error == 0) 988 maxfilesize = sfs->f_maxfilesize; 989 free(sfs, M_STATFS); 990 if (error != 0) 991 goto out; 992 if (maxfilesize) { 993 if (offset > maxfilesize || len > maxfilesize || 994 offset + len > maxfilesize) { 995 error = EFBIG; 996 goto out; 997 } 998 } else 999 #endif 1000 if (offset + len > vap->va_size) { 1001 /* 1002 * Test offset + len against the filesystem's maxfilesize. 1003 */ 1004 VATTR_NULL(vap); 1005 vap->va_size = offset + len; 1006 error = VOP_SETATTR(vp, vap, td->td_ucred); 1007 if (error != 0) 1008 goto out; 1009 VATTR_NULL(vap); 1010 vap->va_size = fsize; 1011 error = VOP_SETATTR(vp, vap, td->td_ucred); 1012 if (error != 0) 1013 goto out; 1014 } 1015 1016 for (;;) { 1017 /* 1018 * Read and write back anything below the nominal file 1019 * size. There's currently no way outside the filesystem 1020 * to know whether this area is sparse or not. 1021 */ 1022 cur = iosize; 1023 if ((offset % iosize) != 0) 1024 cur -= (offset % iosize); 1025 if (cur > len) 1026 cur = len; 1027 if (offset < fsize) { 1028 aiov.iov_base = buf; 1029 aiov.iov_len = cur; 1030 auio.uio_iov = &aiov; 1031 auio.uio_iovcnt = 1; 1032 auio.uio_offset = offset; 1033 auio.uio_resid = cur; 1034 auio.uio_segflg = UIO_SYSSPACE; 1035 auio.uio_rw = UIO_READ; 1036 auio.uio_td = td; 1037 error = VOP_READ(vp, &auio, 0, td->td_ucred); 1038 if (error != 0) 1039 break; 1040 if (auio.uio_resid > 0) { 1041 bzero(buf + cur - auio.uio_resid, 1042 auio.uio_resid); 1043 } 1044 } else { 1045 bzero(buf, cur); 1046 } 1047 1048 aiov.iov_base = buf; 1049 aiov.iov_len = cur; 1050 auio.uio_iov = &aiov; 1051 auio.uio_iovcnt = 1; 1052 auio.uio_offset = offset; 1053 auio.uio_resid = cur; 1054 auio.uio_segflg = UIO_SYSSPACE; 1055 auio.uio_rw = UIO_WRITE; 1056 auio.uio_td = td; 1057 1058 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 1059 if (error != 0) 1060 break; 1061 1062 len -= cur; 1063 offset += cur; 1064 if (len == 0) 1065 break; 1066 if (should_yield()) 1067 break; 1068 } 1069 1070 out: 1071 *ap->a_len = len; 1072 *ap->a_offset = offset; 1073 free(buf, M_TEMP); 1074 return (error); 1075 } 1076 1077 int 1078 vop_stdadvise(struct vop_advise_args *ap) 1079 { 1080 struct vnode *vp; 1081 struct bufobj *bo; 1082 daddr_t startn, endn; 1083 off_t bstart, bend, start, end; 1084 int bsize, error; 1085 1086 vp = ap->a_vp; 1087 switch (ap->a_advice) { 1088 case POSIX_FADV_WILLNEED: 1089 /* 1090 * Do nothing for now. Filesystems should provide a 1091 * custom method which starts an asynchronous read of 1092 * the requested region. 1093 */ 1094 error = 0; 1095 break; 1096 case POSIX_FADV_DONTNEED: 1097 error = 0; 1098 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1099 if (VN_IS_DOOMED(vp)) { 1100 VOP_UNLOCK(vp); 1101 break; 1102 } 1103 1104 /* 1105 * Round to block boundaries (and later possibly further to 1106 * page boundaries). Applications cannot reasonably be aware 1107 * of the boundaries, and the rounding must be to expand at 1108 * both extremities to cover enough. It still doesn't cover 1109 * read-ahead. For partial blocks, this gives unnecessary 1110 * discarding of buffers but is efficient enough since the 1111 * pages usually remain in VMIO for some time. 1112 */ 1113 bsize = vp->v_bufobj.bo_bsize; 1114 bstart = rounddown(ap->a_start, bsize); 1115 bend = roundup(ap->a_end, bsize); 1116 1117 /* 1118 * Deactivate pages in the specified range from the backing VM 1119 * object. Pages that are resident in the buffer cache will 1120 * remain wired until their corresponding buffers are released 1121 * below. 1122 */ 1123 if (vp->v_object != NULL) { 1124 start = trunc_page(bstart); 1125 end = round_page(bend); 1126 VM_OBJECT_RLOCK(vp->v_object); 1127 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), 1128 OFF_TO_IDX(end)); 1129 VM_OBJECT_RUNLOCK(vp->v_object); 1130 } 1131 1132 bo = &vp->v_bufobj; 1133 BO_RLOCK(bo); 1134 startn = bstart / bsize; 1135 endn = bend / bsize; 1136 error = bnoreuselist(&bo->bo_clean, bo, startn, endn); 1137 if (error == 0) 1138 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); 1139 BO_RUNLOCK(bo); 1140 VOP_UNLOCK(vp); 1141 break; 1142 default: 1143 error = EINVAL; 1144 break; 1145 } 1146 return (error); 1147 } 1148 1149 int 1150 vop_stdunp_bind(struct vop_unp_bind_args *ap) 1151 { 1152 1153 ap->a_vp->v_unpcb = ap->a_unpcb; 1154 return (0); 1155 } 1156 1157 int 1158 vop_stdunp_connect(struct vop_unp_connect_args *ap) 1159 { 1160 1161 *ap->a_unpcb = ap->a_vp->v_unpcb; 1162 return (0); 1163 } 1164 1165 int 1166 vop_stdunp_detach(struct vop_unp_detach_args *ap) 1167 { 1168 1169 ap->a_vp->v_unpcb = NULL; 1170 return (0); 1171 } 1172 1173 static int 1174 vop_stdis_text(struct vop_is_text_args *ap) 1175 { 1176 1177 return (ap->a_vp->v_writecount < 0); 1178 } 1179 1180 int 1181 vop_stdset_text(struct vop_set_text_args *ap) 1182 { 1183 struct vnode *vp; 1184 struct mount *mp; 1185 int error; 1186 1187 vp = ap->a_vp; 1188 VI_LOCK(vp); 1189 if (vp->v_writecount > 0) { 1190 error = ETXTBSY; 1191 } else { 1192 /* 1193 * If requested by fs, keep a use reference to the 1194 * vnode until the last text reference is released. 1195 */ 1196 mp = vp->v_mount; 1197 if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 && 1198 vp->v_writecount == 0) { 1199 VNPASS((vp->v_iflag & VI_TEXT_REF) == 0, vp); 1200 vp->v_iflag |= VI_TEXT_REF; 1201 vrefl(vp); 1202 } 1203 1204 vp->v_writecount--; 1205 error = 0; 1206 } 1207 VI_UNLOCK(vp); 1208 return (error); 1209 } 1210 1211 static int 1212 vop_stdunset_text(struct vop_unset_text_args *ap) 1213 { 1214 struct vnode *vp; 1215 int error; 1216 bool last; 1217 1218 vp = ap->a_vp; 1219 last = false; 1220 VI_LOCK(vp); 1221 if (vp->v_writecount < 0) { 1222 if ((vp->v_iflag & VI_TEXT_REF) != 0 && 1223 vp->v_writecount == -1) { 1224 last = true; 1225 vp->v_iflag &= ~VI_TEXT_REF; 1226 } 1227 vp->v_writecount++; 1228 error = 0; 1229 } else { 1230 error = EINVAL; 1231 } 1232 VI_UNLOCK(vp); 1233 if (last) 1234 vunref(vp); 1235 return (error); 1236 } 1237 1238 static int 1239 vop_stdadd_writecount(struct vop_add_writecount_args *ap) 1240 { 1241 struct vnode *vp; 1242 struct mount *mp; 1243 int error; 1244 1245 vp = ap->a_vp; 1246 VI_LOCK_FLAGS(vp, MTX_DUPOK); 1247 if (vp->v_writecount < 0) { 1248 error = ETXTBSY; 1249 } else { 1250 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, 1251 ("neg writecount increment %d", ap->a_inc)); 1252 if (vp->v_writecount == 0) { 1253 mp = vp->v_mount; 1254 if (mp != NULL && (mp->mnt_kern_flag & MNTK_NOMSYNC) == 0) 1255 vlazy(vp); 1256 } 1257 vp->v_writecount += ap->a_inc; 1258 error = 0; 1259 } 1260 VI_UNLOCK(vp); 1261 return (error); 1262 } 1263 1264 int 1265 vop_stdneed_inactive(struct vop_need_inactive_args *ap) 1266 { 1267 1268 return (1); 1269 } 1270 1271 int 1272 vop_stdioctl(struct vop_ioctl_args *ap) 1273 { 1274 struct vnode *vp; 1275 struct vattr va; 1276 off_t *offp; 1277 int error; 1278 1279 switch (ap->a_command) { 1280 case FIOSEEKDATA: 1281 case FIOSEEKHOLE: 1282 vp = ap->a_vp; 1283 error = vn_lock(vp, LK_SHARED); 1284 if (error != 0) 1285 return (EBADF); 1286 if (vp->v_type == VREG) 1287 error = VOP_GETATTR(vp, &va, ap->a_cred); 1288 else 1289 error = ENOTTY; 1290 if (error == 0) { 1291 offp = ap->a_data; 1292 if (*offp < 0 || *offp >= va.va_size) 1293 error = ENXIO; 1294 else if (ap->a_command == FIOSEEKHOLE) 1295 *offp = va.va_size; 1296 } 1297 VOP_UNLOCK(vp); 1298 break; 1299 default: 1300 error = ENOTTY; 1301 break; 1302 } 1303 return (error); 1304 } 1305 1306 /* 1307 * vfs default ops 1308 * used to fill the vfs function table to get reasonable default return values. 1309 */ 1310 int 1311 vfs_stdroot (mp, flags, vpp) 1312 struct mount *mp; 1313 int flags; 1314 struct vnode **vpp; 1315 { 1316 1317 return (EOPNOTSUPP); 1318 } 1319 1320 int 1321 vfs_stdstatfs (mp, sbp) 1322 struct mount *mp; 1323 struct statfs *sbp; 1324 { 1325 1326 return (EOPNOTSUPP); 1327 } 1328 1329 int 1330 vfs_stdquotactl (mp, cmds, uid, arg) 1331 struct mount *mp; 1332 int cmds; 1333 uid_t uid; 1334 void *arg; 1335 { 1336 1337 return (EOPNOTSUPP); 1338 } 1339 1340 int 1341 vfs_stdsync(mp, waitfor) 1342 struct mount *mp; 1343 int waitfor; 1344 { 1345 struct vnode *vp, *mvp; 1346 struct thread *td; 1347 int error, lockreq, allerror = 0; 1348 1349 td = curthread; 1350 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 1351 if (waitfor != MNT_WAIT) 1352 lockreq |= LK_NOWAIT; 1353 /* 1354 * Force stale buffer cache information to be flushed. 1355 */ 1356 loop: 1357 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1358 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1359 VI_UNLOCK(vp); 1360 continue; 1361 } 1362 if ((error = vget(vp, lockreq)) != 0) { 1363 if (error == ENOENT) { 1364 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1365 goto loop; 1366 } 1367 continue; 1368 } 1369 error = VOP_FSYNC(vp, waitfor, td); 1370 if (error) 1371 allerror = error; 1372 vput(vp); 1373 } 1374 return (allerror); 1375 } 1376 1377 int 1378 vfs_stdnosync (mp, waitfor) 1379 struct mount *mp; 1380 int waitfor; 1381 { 1382 1383 return (0); 1384 } 1385 1386 static int 1387 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap) 1388 { 1389 int error; 1390 1391 error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp, 1392 ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred, 1393 ap->a_outcred, ap->a_fsizetd); 1394 return (error); 1395 } 1396 1397 int 1398 vfs_stdvget (mp, ino, flags, vpp) 1399 struct mount *mp; 1400 ino_t ino; 1401 int flags; 1402 struct vnode **vpp; 1403 { 1404 1405 return (EOPNOTSUPP); 1406 } 1407 1408 int 1409 vfs_stdfhtovp (mp, fhp, flags, vpp) 1410 struct mount *mp; 1411 struct fid *fhp; 1412 int flags; 1413 struct vnode **vpp; 1414 { 1415 1416 return (EOPNOTSUPP); 1417 } 1418 1419 int 1420 vfs_stdinit (vfsp) 1421 struct vfsconf *vfsp; 1422 { 1423 1424 return (0); 1425 } 1426 1427 int 1428 vfs_stduninit (vfsp) 1429 struct vfsconf *vfsp; 1430 { 1431 1432 return(0); 1433 } 1434 1435 int 1436 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 1437 struct mount *mp; 1438 int cmd; 1439 struct vnode *filename_vp; 1440 int attrnamespace; 1441 const char *attrname; 1442 { 1443 1444 if (filename_vp != NULL) 1445 VOP_UNLOCK(filename_vp); 1446 return (EOPNOTSUPP); 1447 } 1448 1449 int 1450 vfs_stdsysctl(mp, op, req) 1451 struct mount *mp; 1452 fsctlop_t op; 1453 struct sysctl_req *req; 1454 { 1455 1456 return (EOPNOTSUPP); 1457 } 1458 1459 static vop_bypass_t * 1460 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a) 1461 { 1462 1463 return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset)); 1464 } 1465 1466 int 1467 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a) 1468 { 1469 vop_bypass_t *bp; 1470 int prev_stops, rc; 1471 1472 bp = bp_by_off(vop, a); 1473 MPASS(bp != NULL); 1474 1475 prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); 1476 rc = bp(a); 1477 sigallowstop(prev_stops); 1478 return (rc); 1479 } 1480 1481 static int 1482 vop_stdstat(struct vop_stat_args *a) 1483 { 1484 struct vattr vattr; 1485 struct vattr *vap; 1486 struct vnode *vp; 1487 struct stat *sb; 1488 int error; 1489 u_short mode; 1490 1491 vp = a->a_vp; 1492 sb = a->a_sb; 1493 1494 error = vop_stat_helper_pre(a); 1495 if (error != 0) 1496 return (error); 1497 1498 vap = &vattr; 1499 1500 /* 1501 * Initialize defaults for new and unusual fields, so that file 1502 * systems which don't support these fields don't need to know 1503 * about them. 1504 */ 1505 vap->va_birthtime.tv_sec = -1; 1506 vap->va_birthtime.tv_nsec = 0; 1507 vap->va_fsid = VNOVAL; 1508 vap->va_rdev = NODEV; 1509 1510 error = VOP_GETATTR(vp, vap, a->a_active_cred); 1511 if (error) 1512 goto out; 1513 1514 /* 1515 * Zero the spare stat fields 1516 */ 1517 bzero(sb, sizeof *sb); 1518 1519 /* 1520 * Copy from vattr table 1521 */ 1522 if (vap->va_fsid != VNOVAL) 1523 sb->st_dev = vap->va_fsid; 1524 else 1525 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1526 sb->st_ino = vap->va_fileid; 1527 mode = vap->va_mode; 1528 switch (vap->va_type) { 1529 case VREG: 1530 mode |= S_IFREG; 1531 break; 1532 case VDIR: 1533 mode |= S_IFDIR; 1534 break; 1535 case VBLK: 1536 mode |= S_IFBLK; 1537 break; 1538 case VCHR: 1539 mode |= S_IFCHR; 1540 break; 1541 case VLNK: 1542 mode |= S_IFLNK; 1543 break; 1544 case VSOCK: 1545 mode |= S_IFSOCK; 1546 break; 1547 case VFIFO: 1548 mode |= S_IFIFO; 1549 break; 1550 default: 1551 error = EBADF; 1552 goto out; 1553 } 1554 sb->st_mode = mode; 1555 sb->st_nlink = vap->va_nlink; 1556 sb->st_uid = vap->va_uid; 1557 sb->st_gid = vap->va_gid; 1558 sb->st_rdev = vap->va_rdev; 1559 if (vap->va_size > OFF_MAX) { 1560 error = EOVERFLOW; 1561 goto out; 1562 } 1563 sb->st_size = vap->va_size; 1564 sb->st_atim.tv_sec = vap->va_atime.tv_sec; 1565 sb->st_atim.tv_nsec = vap->va_atime.tv_nsec; 1566 sb->st_mtim.tv_sec = vap->va_mtime.tv_sec; 1567 sb->st_mtim.tv_nsec = vap->va_mtime.tv_nsec; 1568 sb->st_ctim.tv_sec = vap->va_ctime.tv_sec; 1569 sb->st_ctim.tv_nsec = vap->va_ctime.tv_nsec; 1570 sb->st_birthtim.tv_sec = vap->va_birthtime.tv_sec; 1571 sb->st_birthtim.tv_nsec = vap->va_birthtime.tv_nsec; 1572 1573 /* 1574 * According to www.opengroup.org, the meaning of st_blksize is 1575 * "a filesystem-specific preferred I/O block size for this 1576 * object. In some filesystem types, this may vary from file 1577 * to file" 1578 * Use minimum/default of PAGE_SIZE (e.g. for VCHR). 1579 */ 1580 1581 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1582 sb->st_flags = vap->va_flags; 1583 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1584 sb->st_gen = vap->va_gen; 1585 out: 1586 return (vop_stat_helper_post(a, error)); 1587 } 1588 1589 static int 1590 vop_stdread_pgcache(struct vop_read_pgcache_args *ap __unused) 1591 { 1592 return (EJUSTRETURN); 1593 } 1594