1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed 8 * to Berkeley by John Heidemann of the UCLA Ficus project. 9 * 10 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bio.h> 43 #include <sys/buf.h> 44 #include <sys/conf.h> 45 #include <sys/event.h> 46 #include <sys/filio.h> 47 #include <sys/kernel.h> 48 #include <sys/limits.h> 49 #include <sys/lock.h> 50 #include <sys/lockf.h> 51 #include <sys/malloc.h> 52 #include <sys/mount.h> 53 #include <sys/namei.h> 54 #include <sys/rwlock.h> 55 #include <sys/fcntl.h> 56 #include <sys/unistd.h> 57 #include <sys/vnode.h> 58 #include <sys/dirent.h> 59 #include <sys/poll.h> 60 #include <sys/stat.h> 61 #include <security/audit/audit.h> 62 #include <sys/priv.h> 63 64 #include <security/mac/mac_framework.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_extern.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_pager.h> 73 #include <vm/vnode_pager.h> 74 75 static int vop_nolookup(struct vop_lookup_args *); 76 static int vop_norename(struct vop_rename_args *); 77 static int vop_nostrategy(struct vop_strategy_args *); 78 static int get_next_dirent(struct vnode *vp, struct dirent **dpp, 79 char *dirbuf, int dirbuflen, off_t *off, 80 char **cpos, int *len, int *eofflag, 81 struct thread *td); 82 static int dirent_exists(struct vnode *vp, const char *dirname, 83 struct thread *td); 84 85 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 86 87 static int vop_stdis_text(struct vop_is_text_args *ap); 88 static int vop_stdunset_text(struct vop_unset_text_args *ap); 89 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); 90 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap); 91 static int vop_stdfdatasync(struct vop_fdatasync_args *ap); 92 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); 93 static int vop_stdread_pgcache(struct vop_read_pgcache_args *ap); 94 static int vop_stdstat(struct vop_stat_args *ap); 95 96 /* 97 * This vnode table stores what we want to do if the filesystem doesn't 98 * implement a particular VOP. 99 * 100 * If there is no specific entry here, we will return EOPNOTSUPP. 101 * 102 * Note that every filesystem has to implement either vop_access 103 * or vop_accessx; failing to do so will result in immediate crash 104 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 105 * which calls vop_stdaccess() etc. 106 */ 107 108 struct vop_vector default_vnodeops = { 109 .vop_default = NULL, 110 .vop_bypass = VOP_EOPNOTSUPP, 111 112 .vop_access = vop_stdaccess, 113 .vop_accessx = vop_stdaccessx, 114 .vop_advise = vop_stdadvise, 115 .vop_advlock = vop_stdadvlock, 116 .vop_advlockasync = vop_stdadvlockasync, 117 .vop_advlockpurge = vop_stdadvlockpurge, 118 .vop_allocate = vop_stdallocate, 119 .vop_bmap = vop_stdbmap, 120 .vop_close = VOP_NULL, 121 .vop_fsync = VOP_NULL, 122 .vop_stat = vop_stdstat, 123 .vop_fdatasync = vop_stdfdatasync, 124 .vop_getpages = vop_stdgetpages, 125 .vop_getpages_async = vop_stdgetpages_async, 126 .vop_getwritemount = vop_stdgetwritemount, 127 .vop_inactive = VOP_NULL, 128 .vop_need_inactive = vop_stdneed_inactive, 129 .vop_ioctl = vop_stdioctl, 130 .vop_kqfilter = vop_stdkqfilter, 131 .vop_islocked = vop_stdislocked, 132 .vop_lock1 = vop_stdlock, 133 .vop_lookup = vop_nolookup, 134 .vop_open = VOP_NULL, 135 .vop_pathconf = VOP_EINVAL, 136 .vop_poll = vop_nopoll, 137 .vop_putpages = vop_stdputpages, 138 .vop_readlink = VOP_EINVAL, 139 .vop_read_pgcache = vop_stdread_pgcache, 140 .vop_rename = vop_norename, 141 .vop_revoke = VOP_PANIC, 142 .vop_strategy = vop_nostrategy, 143 .vop_unlock = vop_stdunlock, 144 .vop_vptocnp = vop_stdvptocnp, 145 .vop_vptofh = vop_stdvptofh, 146 .vop_unp_bind = vop_stdunp_bind, 147 .vop_unp_connect = vop_stdunp_connect, 148 .vop_unp_detach = vop_stdunp_detach, 149 .vop_is_text = vop_stdis_text, 150 .vop_set_text = vop_stdset_text, 151 .vop_unset_text = vop_stdunset_text, 152 .vop_add_writecount = vop_stdadd_writecount, 153 .vop_copy_file_range = vop_stdcopy_file_range, 154 }; 155 VFS_VOP_VECTOR_REGISTER(default_vnodeops); 156 157 /* 158 * Series of placeholder functions for various error returns for 159 * VOPs. 160 */ 161 162 int 163 vop_eopnotsupp(struct vop_generic_args *ap) 164 { 165 /* 166 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 167 */ 168 169 return (EOPNOTSUPP); 170 } 171 172 int 173 vop_ebadf(struct vop_generic_args *ap) 174 { 175 176 return (EBADF); 177 } 178 179 int 180 vop_enotty(struct vop_generic_args *ap) 181 { 182 183 return (ENOTTY); 184 } 185 186 int 187 vop_einval(struct vop_generic_args *ap) 188 { 189 190 return (EINVAL); 191 } 192 193 int 194 vop_enoent(struct vop_generic_args *ap) 195 { 196 197 return (ENOENT); 198 } 199 200 int 201 vop_eagain(struct vop_generic_args *ap) 202 { 203 204 return (EAGAIN); 205 } 206 207 int 208 vop_null(struct vop_generic_args *ap) 209 { 210 211 return (0); 212 } 213 214 /* 215 * Helper function to panic on some bad VOPs in some filesystems. 216 */ 217 int 218 vop_panic(struct vop_generic_args *ap) 219 { 220 221 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 222 } 223 224 /* 225 * vop_std<something> and vop_no<something> are default functions for use by 226 * filesystems that need the "default reasonable" implementation for a 227 * particular operation. 228 * 229 * The documentation for the operations they implement exists (if it exists) 230 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 231 */ 232 233 /* 234 * Default vop for filesystems that do not support name lookup 235 */ 236 static int 237 vop_nolookup(ap) 238 struct vop_lookup_args /* { 239 struct vnode *a_dvp; 240 struct vnode **a_vpp; 241 struct componentname *a_cnp; 242 } */ *ap; 243 { 244 245 *ap->a_vpp = NULL; 246 return (ENOTDIR); 247 } 248 249 /* 250 * vop_norename: 251 * 252 * Handle unlock and reference counting for arguments of vop_rename 253 * for filesystems that do not implement rename operation. 254 */ 255 static int 256 vop_norename(struct vop_rename_args *ap) 257 { 258 259 vop_rename_fail(ap); 260 return (EOPNOTSUPP); 261 } 262 263 /* 264 * vop_nostrategy: 265 * 266 * Strategy routine for VFS devices that have none. 267 * 268 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 269 * routine. Typically this is done for a BIO_READ strategy call. 270 * Typically B_INVAL is assumed to already be clear prior to a write 271 * and should not be cleared manually unless you just made the buffer 272 * invalid. BIO_ERROR should be cleared either way. 273 */ 274 275 static int 276 vop_nostrategy (struct vop_strategy_args *ap) 277 { 278 printf("No strategy for buffer at %p\n", ap->a_bp); 279 vn_printf(ap->a_vp, "vnode "); 280 ap->a_bp->b_ioflags |= BIO_ERROR; 281 ap->a_bp->b_error = EOPNOTSUPP; 282 bufdone(ap->a_bp); 283 return (EOPNOTSUPP); 284 } 285 286 static int 287 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 288 int dirbuflen, off_t *off, char **cpos, int *len, 289 int *eofflag, struct thread *td) 290 { 291 int error, reclen; 292 struct uio uio; 293 struct iovec iov; 294 struct dirent *dp; 295 296 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 297 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 298 299 if (*len == 0) { 300 iov.iov_base = dirbuf; 301 iov.iov_len = dirbuflen; 302 303 uio.uio_iov = &iov; 304 uio.uio_iovcnt = 1; 305 uio.uio_offset = *off; 306 uio.uio_resid = dirbuflen; 307 uio.uio_segflg = UIO_SYSSPACE; 308 uio.uio_rw = UIO_READ; 309 uio.uio_td = td; 310 311 *eofflag = 0; 312 313 #ifdef MAC 314 error = mac_vnode_check_readdir(td->td_ucred, vp); 315 if (error == 0) 316 #endif 317 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 318 NULL, NULL); 319 if (error) 320 return (error); 321 322 *off = uio.uio_offset; 323 324 *cpos = dirbuf; 325 *len = (dirbuflen - uio.uio_resid); 326 327 if (*len == 0) 328 return (ENOENT); 329 } 330 331 dp = (struct dirent *)(*cpos); 332 reclen = dp->d_reclen; 333 *dpp = dp; 334 335 /* check for malformed directory.. */ 336 if (reclen < DIRENT_MINSIZE) 337 return (EINVAL); 338 339 *cpos += reclen; 340 *len -= reclen; 341 342 return (0); 343 } 344 345 /* 346 * Check if a named file exists in a given directory vnode. 347 */ 348 static int 349 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 350 { 351 char *dirbuf, *cpos; 352 int error, eofflag, dirbuflen, len, found; 353 off_t off; 354 struct dirent *dp; 355 struct vattr va; 356 357 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 358 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 359 360 found = 0; 361 362 error = VOP_GETATTR(vp, &va, td->td_ucred); 363 if (error) 364 return (found); 365 366 dirbuflen = DEV_BSIZE; 367 if (dirbuflen < va.va_blocksize) 368 dirbuflen = va.va_blocksize; 369 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 370 371 off = 0; 372 len = 0; 373 do { 374 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 375 &cpos, &len, &eofflag, td); 376 if (error) 377 goto out; 378 379 if (dp->d_type != DT_WHT && dp->d_fileno != 0 && 380 strcmp(dp->d_name, dirname) == 0) { 381 found = 1; 382 goto out; 383 } 384 } while (len > 0 || !eofflag); 385 386 out: 387 free(dirbuf, M_TEMP); 388 return (found); 389 } 390 391 int 392 vop_stdaccess(struct vop_access_args *ap) 393 { 394 395 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 396 VAPPEND)) == 0, ("invalid bit in accmode")); 397 398 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 399 } 400 401 int 402 vop_stdaccessx(struct vop_accessx_args *ap) 403 { 404 int error; 405 accmode_t accmode = ap->a_accmode; 406 407 error = vfs_unixify_accmode(&accmode); 408 if (error != 0) 409 return (error); 410 411 if (accmode == 0) 412 return (0); 413 414 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 415 } 416 417 /* 418 * Advisory record locking support 419 */ 420 int 421 vop_stdadvlock(struct vop_advlock_args *ap) 422 { 423 struct vnode *vp; 424 struct vattr vattr; 425 int error; 426 427 vp = ap->a_vp; 428 if (ap->a_fl->l_whence == SEEK_END) { 429 /* 430 * The NFSv4 server must avoid doing a vn_lock() here, since it 431 * can deadlock the nfsd threads, due to a LOR. Fortunately 432 * the NFSv4 server always uses SEEK_SET and this code is 433 * only required for the SEEK_END case. 434 */ 435 vn_lock(vp, LK_SHARED | LK_RETRY); 436 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 437 VOP_UNLOCK(vp); 438 if (error) 439 return (error); 440 } else 441 vattr.va_size = 0; 442 443 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 444 } 445 446 int 447 vop_stdadvlockasync(struct vop_advlockasync_args *ap) 448 { 449 struct vnode *vp; 450 struct vattr vattr; 451 int error; 452 453 vp = ap->a_vp; 454 if (ap->a_fl->l_whence == SEEK_END) { 455 /* The size argument is only needed for SEEK_END. */ 456 vn_lock(vp, LK_SHARED | LK_RETRY); 457 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 458 VOP_UNLOCK(vp); 459 if (error) 460 return (error); 461 } else 462 vattr.va_size = 0; 463 464 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 465 } 466 467 int 468 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap) 469 { 470 struct vnode *vp; 471 472 vp = ap->a_vp; 473 lf_purgelocks(vp, &vp->v_lockf); 474 return (0); 475 } 476 477 /* 478 * vop_stdpathconf: 479 * 480 * Standard implementation of POSIX pathconf, to get information about limits 481 * for a filesystem. 482 * Override per filesystem for the case where the filesystem has smaller 483 * limits. 484 */ 485 int 486 vop_stdpathconf(ap) 487 struct vop_pathconf_args /* { 488 struct vnode *a_vp; 489 int a_name; 490 int *a_retval; 491 } */ *ap; 492 { 493 494 switch (ap->a_name) { 495 case _PC_ASYNC_IO: 496 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; 497 return (0); 498 case _PC_PATH_MAX: 499 *ap->a_retval = PATH_MAX; 500 return (0); 501 case _PC_ACL_EXTENDED: 502 case _PC_ACL_NFS4: 503 case _PC_CAP_PRESENT: 504 case _PC_INF_PRESENT: 505 case _PC_MAC_PRESENT: 506 *ap->a_retval = 0; 507 return (0); 508 default: 509 return (EINVAL); 510 } 511 /* NOTREACHED */ 512 } 513 514 /* 515 * Standard lock, unlock and islocked functions. 516 */ 517 int 518 vop_stdlock(ap) 519 struct vop_lock1_args /* { 520 struct vnode *a_vp; 521 int a_flags; 522 char *file; 523 int line; 524 } */ *ap; 525 { 526 struct vnode *vp = ap->a_vp; 527 struct mtx *ilk; 528 529 ilk = VI_MTX(vp); 530 return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags, 531 &ilk->lock_object, ap->a_file, ap->a_line)); 532 } 533 534 /* See above. */ 535 int 536 vop_stdunlock(ap) 537 struct vop_unlock_args /* { 538 struct vnode *a_vp; 539 } */ *ap; 540 { 541 struct vnode *vp = ap->a_vp; 542 543 return (lockmgr_unlock(vp->v_vnlock)); 544 } 545 546 /* See above. */ 547 int 548 vop_stdislocked(ap) 549 struct vop_islocked_args /* { 550 struct vnode *a_vp; 551 } */ *ap; 552 { 553 554 return (lockstatus(ap->a_vp->v_vnlock)); 555 } 556 557 /* 558 * Variants of the above set. 559 * 560 * Differences are: 561 * - shared locking disablement is not supported 562 * - v_vnlock pointer is not honored 563 */ 564 int 565 vop_lock(ap) 566 struct vop_lock1_args /* { 567 struct vnode *a_vp; 568 int a_flags; 569 char *file; 570 int line; 571 } */ *ap; 572 { 573 struct vnode *vp = ap->a_vp; 574 int flags = ap->a_flags; 575 struct mtx *ilk; 576 577 MPASS(vp->v_vnlock == &vp->v_lock); 578 579 if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0)) 580 goto other; 581 582 switch (flags & LK_TYPE_MASK) { 583 case LK_SHARED: 584 return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line)); 585 case LK_EXCLUSIVE: 586 return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line)); 587 } 588 other: 589 ilk = VI_MTX(vp); 590 return (lockmgr_lock_flags(&vp->v_lock, flags, 591 &ilk->lock_object, ap->a_file, ap->a_line)); 592 } 593 594 int 595 vop_unlock(ap) 596 struct vop_unlock_args /* { 597 struct vnode *a_vp; 598 } */ *ap; 599 { 600 struct vnode *vp = ap->a_vp; 601 602 MPASS(vp->v_vnlock == &vp->v_lock); 603 604 return (lockmgr_unlock(&vp->v_lock)); 605 } 606 607 int 608 vop_islocked(ap) 609 struct vop_islocked_args /* { 610 struct vnode *a_vp; 611 } */ *ap; 612 { 613 struct vnode *vp = ap->a_vp; 614 615 MPASS(vp->v_vnlock == &vp->v_lock); 616 617 return (lockstatus(&vp->v_lock)); 618 } 619 620 /* 621 * Return true for select/poll. 622 */ 623 int 624 vop_nopoll(ap) 625 struct vop_poll_args /* { 626 struct vnode *a_vp; 627 int a_events; 628 struct ucred *a_cred; 629 struct thread *a_td; 630 } */ *ap; 631 { 632 633 if (ap->a_events & ~POLLSTANDARD) 634 return (POLLNVAL); 635 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 636 } 637 638 /* 639 * Implement poll for local filesystems that support it. 640 */ 641 int 642 vop_stdpoll(ap) 643 struct vop_poll_args /* { 644 struct vnode *a_vp; 645 int a_events; 646 struct ucred *a_cred; 647 struct thread *a_td; 648 } */ *ap; 649 { 650 if (ap->a_events & ~POLLSTANDARD) 651 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 652 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 653 } 654 655 /* 656 * Return our mount point, as we will take charge of the writes. 657 */ 658 int 659 vop_stdgetwritemount(ap) 660 struct vop_getwritemount_args /* { 661 struct vnode *a_vp; 662 struct mount **a_mpp; 663 } */ *ap; 664 { 665 struct mount *mp; 666 struct vnode *vp; 667 668 /* 669 * Note that having a reference does not prevent forced unmount from 670 * setting ->v_mount to NULL after the lock gets released. This is of 671 * no consequence for typical consumers (most notably vn_start_write) 672 * since in this case the vnode is VIRF_DOOMED. Unmount might have 673 * progressed far enough that its completion is only delayed by the 674 * reference obtained here. The consumer only needs to concern itself 675 * with releasing it. 676 */ 677 vp = ap->a_vp; 678 mp = vp->v_mount; 679 if (mp == NULL) { 680 *(ap->a_mpp) = NULL; 681 return (0); 682 } 683 if (vfs_op_thread_enter(mp)) { 684 if (mp == vp->v_mount) { 685 vfs_mp_count_add_pcpu(mp, ref, 1); 686 vfs_op_thread_exit(mp); 687 } else { 688 vfs_op_thread_exit(mp); 689 mp = NULL; 690 } 691 } else { 692 MNT_ILOCK(mp); 693 if (mp == vp->v_mount) { 694 MNT_REF(mp); 695 MNT_IUNLOCK(mp); 696 } else { 697 MNT_IUNLOCK(mp); 698 mp = NULL; 699 } 700 } 701 *(ap->a_mpp) = mp; 702 return (0); 703 } 704 705 /* 706 * If the file system doesn't implement VOP_BMAP, then return sensible defaults: 707 * - Return the vnode's bufobj instead of any underlying device's bufobj 708 * - Calculate the physical block number as if there were equal size 709 * consecutive blocks, but 710 * - Report no contiguous runs of blocks. 711 */ 712 int 713 vop_stdbmap(ap) 714 struct vop_bmap_args /* { 715 struct vnode *a_vp; 716 daddr_t a_bn; 717 struct bufobj **a_bop; 718 daddr_t *a_bnp; 719 int *a_runp; 720 int *a_runb; 721 } */ *ap; 722 { 723 724 if (ap->a_bop != NULL) 725 *ap->a_bop = &ap->a_vp->v_bufobj; 726 if (ap->a_bnp != NULL) 727 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 728 if (ap->a_runp != NULL) 729 *ap->a_runp = 0; 730 if (ap->a_runb != NULL) 731 *ap->a_runb = 0; 732 return (0); 733 } 734 735 int 736 vop_stdfsync(ap) 737 struct vop_fsync_args /* { 738 struct vnode *a_vp; 739 int a_waitfor; 740 struct thread *a_td; 741 } */ *ap; 742 { 743 744 return (vn_fsync_buf(ap->a_vp, ap->a_waitfor)); 745 } 746 747 static int 748 vop_stdfdatasync(struct vop_fdatasync_args *ap) 749 { 750 751 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); 752 } 753 754 int 755 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap) 756 { 757 758 return (vn_fsync_buf(ap->a_vp, MNT_WAIT)); 759 } 760 761 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 762 int 763 vop_stdgetpages(ap) 764 struct vop_getpages_args /* { 765 struct vnode *a_vp; 766 vm_page_t *a_m; 767 int a_count; 768 int *a_rbehind; 769 int *a_rahead; 770 } */ *ap; 771 { 772 773 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 774 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); 775 } 776 777 static int 778 vop_stdgetpages_async(struct vop_getpages_async_args *ap) 779 { 780 int error; 781 782 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, 783 ap->a_rahead); 784 if (ap->a_iodone != NULL) 785 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 786 return (error); 787 } 788 789 int 790 vop_stdkqfilter(struct vop_kqfilter_args *ap) 791 { 792 return vfs_kqfilter(ap); 793 } 794 795 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 796 int 797 vop_stdputpages(ap) 798 struct vop_putpages_args /* { 799 struct vnode *a_vp; 800 vm_page_t *a_m; 801 int a_count; 802 int a_sync; 803 int *a_rtvals; 804 } */ *ap; 805 { 806 807 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 808 ap->a_sync, ap->a_rtvals); 809 } 810 811 int 812 vop_stdvptofh(struct vop_vptofh_args *ap) 813 { 814 return (EOPNOTSUPP); 815 } 816 817 int 818 vop_stdvptocnp(struct vop_vptocnp_args *ap) 819 { 820 struct vnode *vp = ap->a_vp; 821 struct vnode **dvp = ap->a_vpp; 822 struct ucred *cred = ap->a_cred; 823 char *buf = ap->a_buf; 824 size_t *buflen = ap->a_buflen; 825 char *dirbuf, *cpos; 826 int i, error, eofflag, dirbuflen, flags, locked, len, covered; 827 off_t off; 828 ino_t fileno; 829 struct vattr va; 830 struct nameidata nd; 831 struct thread *td; 832 struct dirent *dp; 833 struct vnode *mvp; 834 835 i = *buflen; 836 error = 0; 837 covered = 0; 838 td = curthread; 839 840 if (vp->v_type != VDIR) 841 return (ENOENT); 842 843 error = VOP_GETATTR(vp, &va, cred); 844 if (error) 845 return (error); 846 847 VREF(vp); 848 locked = VOP_ISLOCKED(vp); 849 VOP_UNLOCK(vp); 850 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 851 "..", vp, td); 852 flags = FREAD; 853 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 854 if (error) { 855 vn_lock(vp, locked | LK_RETRY); 856 return (error); 857 } 858 NDFREE(&nd, NDF_ONLY_PNBUF); 859 860 mvp = *dvp = nd.ni_vp; 861 862 if (vp->v_mount != (*dvp)->v_mount && 863 ((*dvp)->v_vflag & VV_ROOT) && 864 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 865 *dvp = (*dvp)->v_mount->mnt_vnodecovered; 866 VREF(mvp); 867 VOP_UNLOCK(mvp); 868 vn_close(mvp, FREAD, cred, td); 869 VREF(*dvp); 870 vn_lock(*dvp, LK_SHARED | LK_RETRY); 871 covered = 1; 872 } 873 874 fileno = va.va_fileid; 875 876 dirbuflen = DEV_BSIZE; 877 if (dirbuflen < va.va_blocksize) 878 dirbuflen = va.va_blocksize; 879 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 880 881 if ((*dvp)->v_type != VDIR) { 882 error = ENOENT; 883 goto out; 884 } 885 886 off = 0; 887 len = 0; 888 do { 889 /* call VOP_READDIR of parent */ 890 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 891 &cpos, &len, &eofflag, td); 892 if (error) 893 goto out; 894 895 if ((dp->d_type != DT_WHT) && 896 (dp->d_fileno == fileno)) { 897 if (covered) { 898 VOP_UNLOCK(*dvp); 899 vn_lock(mvp, LK_SHARED | LK_RETRY); 900 if (dirent_exists(mvp, dp->d_name, td)) { 901 error = ENOENT; 902 VOP_UNLOCK(mvp); 903 vn_lock(*dvp, LK_SHARED | LK_RETRY); 904 goto out; 905 } 906 VOP_UNLOCK(mvp); 907 vn_lock(*dvp, LK_SHARED | LK_RETRY); 908 } 909 i -= dp->d_namlen; 910 911 if (i < 0) { 912 error = ENOMEM; 913 goto out; 914 } 915 if (dp->d_namlen == 1 && dp->d_name[0] == '.') { 916 error = ENOENT; 917 } else { 918 bcopy(dp->d_name, buf + i, dp->d_namlen); 919 error = 0; 920 } 921 goto out; 922 } 923 } while (len > 0 || !eofflag); 924 error = ENOENT; 925 926 out: 927 free(dirbuf, M_TEMP); 928 if (!error) { 929 *buflen = i; 930 vref(*dvp); 931 } 932 if (covered) { 933 vput(*dvp); 934 vrele(mvp); 935 } else { 936 VOP_UNLOCK(mvp); 937 vn_close(mvp, FREAD, cred, td); 938 } 939 vn_lock(vp, locked | LK_RETRY); 940 return (error); 941 } 942 943 int 944 vop_stdallocate(struct vop_allocate_args *ap) 945 { 946 #ifdef __notyet__ 947 struct statfs *sfs; 948 off_t maxfilesize = 0; 949 #endif 950 struct iovec aiov; 951 struct vattr vattr, *vap; 952 struct uio auio; 953 off_t fsize, len, cur, offset; 954 uint8_t *buf; 955 struct thread *td; 956 struct vnode *vp; 957 size_t iosize; 958 int error; 959 960 buf = NULL; 961 error = 0; 962 td = curthread; 963 vap = &vattr; 964 vp = ap->a_vp; 965 len = *ap->a_len; 966 offset = *ap->a_offset; 967 968 error = VOP_GETATTR(vp, vap, td->td_ucred); 969 if (error != 0) 970 goto out; 971 fsize = vap->va_size; 972 iosize = vap->va_blocksize; 973 if (iosize == 0) 974 iosize = BLKDEV_IOSIZE; 975 if (iosize > MAXPHYS) 976 iosize = MAXPHYS; 977 buf = malloc(iosize, M_TEMP, M_WAITOK); 978 979 #ifdef __notyet__ 980 /* 981 * Check if the filesystem sets f_maxfilesize; if not use 982 * VOP_SETATTR to perform the check. 983 */ 984 sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); 985 error = VFS_STATFS(vp->v_mount, sfs, td); 986 if (error == 0) 987 maxfilesize = sfs->f_maxfilesize; 988 free(sfs, M_STATFS); 989 if (error != 0) 990 goto out; 991 if (maxfilesize) { 992 if (offset > maxfilesize || len > maxfilesize || 993 offset + len > maxfilesize) { 994 error = EFBIG; 995 goto out; 996 } 997 } else 998 #endif 999 if (offset + len > vap->va_size) { 1000 /* 1001 * Test offset + len against the filesystem's maxfilesize. 1002 */ 1003 VATTR_NULL(vap); 1004 vap->va_size = offset + len; 1005 error = VOP_SETATTR(vp, vap, td->td_ucred); 1006 if (error != 0) 1007 goto out; 1008 VATTR_NULL(vap); 1009 vap->va_size = fsize; 1010 error = VOP_SETATTR(vp, vap, td->td_ucred); 1011 if (error != 0) 1012 goto out; 1013 } 1014 1015 for (;;) { 1016 /* 1017 * Read and write back anything below the nominal file 1018 * size. There's currently no way outside the filesystem 1019 * to know whether this area is sparse or not. 1020 */ 1021 cur = iosize; 1022 if ((offset % iosize) != 0) 1023 cur -= (offset % iosize); 1024 if (cur > len) 1025 cur = len; 1026 if (offset < fsize) { 1027 aiov.iov_base = buf; 1028 aiov.iov_len = cur; 1029 auio.uio_iov = &aiov; 1030 auio.uio_iovcnt = 1; 1031 auio.uio_offset = offset; 1032 auio.uio_resid = cur; 1033 auio.uio_segflg = UIO_SYSSPACE; 1034 auio.uio_rw = UIO_READ; 1035 auio.uio_td = td; 1036 error = VOP_READ(vp, &auio, 0, td->td_ucred); 1037 if (error != 0) 1038 break; 1039 if (auio.uio_resid > 0) { 1040 bzero(buf + cur - auio.uio_resid, 1041 auio.uio_resid); 1042 } 1043 } else { 1044 bzero(buf, cur); 1045 } 1046 1047 aiov.iov_base = buf; 1048 aiov.iov_len = cur; 1049 auio.uio_iov = &aiov; 1050 auio.uio_iovcnt = 1; 1051 auio.uio_offset = offset; 1052 auio.uio_resid = cur; 1053 auio.uio_segflg = UIO_SYSSPACE; 1054 auio.uio_rw = UIO_WRITE; 1055 auio.uio_td = td; 1056 1057 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 1058 if (error != 0) 1059 break; 1060 1061 len -= cur; 1062 offset += cur; 1063 if (len == 0) 1064 break; 1065 if (should_yield()) 1066 break; 1067 } 1068 1069 out: 1070 *ap->a_len = len; 1071 *ap->a_offset = offset; 1072 free(buf, M_TEMP); 1073 return (error); 1074 } 1075 1076 int 1077 vop_stdadvise(struct vop_advise_args *ap) 1078 { 1079 struct vnode *vp; 1080 struct bufobj *bo; 1081 daddr_t startn, endn; 1082 off_t bstart, bend, start, end; 1083 int bsize, error; 1084 1085 vp = ap->a_vp; 1086 switch (ap->a_advice) { 1087 case POSIX_FADV_WILLNEED: 1088 /* 1089 * Do nothing for now. Filesystems should provide a 1090 * custom method which starts an asynchronous read of 1091 * the requested region. 1092 */ 1093 error = 0; 1094 break; 1095 case POSIX_FADV_DONTNEED: 1096 error = 0; 1097 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1098 if (VN_IS_DOOMED(vp)) { 1099 VOP_UNLOCK(vp); 1100 break; 1101 } 1102 1103 /* 1104 * Round to block boundaries (and later possibly further to 1105 * page boundaries). Applications cannot reasonably be aware 1106 * of the boundaries, and the rounding must be to expand at 1107 * both extremities to cover enough. It still doesn't cover 1108 * read-ahead. For partial blocks, this gives unnecessary 1109 * discarding of buffers but is efficient enough since the 1110 * pages usually remain in VMIO for some time. 1111 */ 1112 bsize = vp->v_bufobj.bo_bsize; 1113 bstart = rounddown(ap->a_start, bsize); 1114 bend = roundup(ap->a_end, bsize); 1115 1116 /* 1117 * Deactivate pages in the specified range from the backing VM 1118 * object. Pages that are resident in the buffer cache will 1119 * remain wired until their corresponding buffers are released 1120 * below. 1121 */ 1122 if (vp->v_object != NULL) { 1123 start = trunc_page(bstart); 1124 end = round_page(bend); 1125 VM_OBJECT_RLOCK(vp->v_object); 1126 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), 1127 OFF_TO_IDX(end)); 1128 VM_OBJECT_RUNLOCK(vp->v_object); 1129 } 1130 1131 bo = &vp->v_bufobj; 1132 BO_RLOCK(bo); 1133 startn = bstart / bsize; 1134 endn = bend / bsize; 1135 error = bnoreuselist(&bo->bo_clean, bo, startn, endn); 1136 if (error == 0) 1137 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); 1138 BO_RUNLOCK(bo); 1139 VOP_UNLOCK(vp); 1140 break; 1141 default: 1142 error = EINVAL; 1143 break; 1144 } 1145 return (error); 1146 } 1147 1148 int 1149 vop_stdunp_bind(struct vop_unp_bind_args *ap) 1150 { 1151 1152 ap->a_vp->v_unpcb = ap->a_unpcb; 1153 return (0); 1154 } 1155 1156 int 1157 vop_stdunp_connect(struct vop_unp_connect_args *ap) 1158 { 1159 1160 *ap->a_unpcb = ap->a_vp->v_unpcb; 1161 return (0); 1162 } 1163 1164 int 1165 vop_stdunp_detach(struct vop_unp_detach_args *ap) 1166 { 1167 1168 ap->a_vp->v_unpcb = NULL; 1169 return (0); 1170 } 1171 1172 static int 1173 vop_stdis_text(struct vop_is_text_args *ap) 1174 { 1175 1176 return (ap->a_vp->v_writecount < 0); 1177 } 1178 1179 int 1180 vop_stdset_text(struct vop_set_text_args *ap) 1181 { 1182 struct vnode *vp; 1183 struct mount *mp; 1184 int error; 1185 1186 vp = ap->a_vp; 1187 VI_LOCK(vp); 1188 if (vp->v_writecount > 0) { 1189 error = ETXTBSY; 1190 } else { 1191 /* 1192 * If requested by fs, keep a use reference to the 1193 * vnode until the last text reference is released. 1194 */ 1195 mp = vp->v_mount; 1196 if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 && 1197 vp->v_writecount == 0) { 1198 VNPASS((vp->v_iflag & VI_TEXT_REF) == 0, vp); 1199 vp->v_iflag |= VI_TEXT_REF; 1200 vrefl(vp); 1201 } 1202 1203 vp->v_writecount--; 1204 error = 0; 1205 } 1206 VI_UNLOCK(vp); 1207 return (error); 1208 } 1209 1210 static int 1211 vop_stdunset_text(struct vop_unset_text_args *ap) 1212 { 1213 struct vnode *vp; 1214 int error; 1215 bool last; 1216 1217 vp = ap->a_vp; 1218 last = false; 1219 VI_LOCK(vp); 1220 if (vp->v_writecount < 0) { 1221 if ((vp->v_iflag & VI_TEXT_REF) != 0 && 1222 vp->v_writecount == -1) { 1223 last = true; 1224 vp->v_iflag &= ~VI_TEXT_REF; 1225 } 1226 vp->v_writecount++; 1227 error = 0; 1228 } else { 1229 error = EINVAL; 1230 } 1231 VI_UNLOCK(vp); 1232 if (last) 1233 vunref(vp); 1234 return (error); 1235 } 1236 1237 static int 1238 vop_stdadd_writecount(struct vop_add_writecount_args *ap) 1239 { 1240 struct vnode *vp; 1241 struct mount *mp; 1242 int error; 1243 1244 vp = ap->a_vp; 1245 VI_LOCK_FLAGS(vp, MTX_DUPOK); 1246 if (vp->v_writecount < 0) { 1247 error = ETXTBSY; 1248 } else { 1249 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, 1250 ("neg writecount increment %d", ap->a_inc)); 1251 if (vp->v_writecount == 0) { 1252 mp = vp->v_mount; 1253 if (mp != NULL && (mp->mnt_kern_flag & MNTK_NOMSYNC) == 0) 1254 vlazy(vp); 1255 } 1256 vp->v_writecount += ap->a_inc; 1257 error = 0; 1258 } 1259 VI_UNLOCK(vp); 1260 return (error); 1261 } 1262 1263 int 1264 vop_stdneed_inactive(struct vop_need_inactive_args *ap) 1265 { 1266 1267 return (1); 1268 } 1269 1270 int 1271 vop_stdioctl(struct vop_ioctl_args *ap) 1272 { 1273 struct vnode *vp; 1274 struct vattr va; 1275 off_t *offp; 1276 int error; 1277 1278 switch (ap->a_command) { 1279 case FIOSEEKDATA: 1280 case FIOSEEKHOLE: 1281 vp = ap->a_vp; 1282 error = vn_lock(vp, LK_SHARED); 1283 if (error != 0) 1284 return (EBADF); 1285 if (vp->v_type == VREG) 1286 error = VOP_GETATTR(vp, &va, ap->a_cred); 1287 else 1288 error = ENOTTY; 1289 if (error == 0) { 1290 offp = ap->a_data; 1291 if (*offp < 0 || *offp >= va.va_size) 1292 error = ENXIO; 1293 else if (ap->a_command == FIOSEEKHOLE) 1294 *offp = va.va_size; 1295 } 1296 VOP_UNLOCK(vp); 1297 break; 1298 default: 1299 error = ENOTTY; 1300 break; 1301 } 1302 return (error); 1303 } 1304 1305 /* 1306 * vfs default ops 1307 * used to fill the vfs function table to get reasonable default return values. 1308 */ 1309 int 1310 vfs_stdroot (mp, flags, vpp) 1311 struct mount *mp; 1312 int flags; 1313 struct vnode **vpp; 1314 { 1315 1316 return (EOPNOTSUPP); 1317 } 1318 1319 int 1320 vfs_stdstatfs (mp, sbp) 1321 struct mount *mp; 1322 struct statfs *sbp; 1323 { 1324 1325 return (EOPNOTSUPP); 1326 } 1327 1328 int 1329 vfs_stdquotactl (mp, cmds, uid, arg) 1330 struct mount *mp; 1331 int cmds; 1332 uid_t uid; 1333 void *arg; 1334 { 1335 1336 return (EOPNOTSUPP); 1337 } 1338 1339 int 1340 vfs_stdsync(mp, waitfor) 1341 struct mount *mp; 1342 int waitfor; 1343 { 1344 struct vnode *vp, *mvp; 1345 struct thread *td; 1346 int error, lockreq, allerror = 0; 1347 1348 td = curthread; 1349 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 1350 if (waitfor != MNT_WAIT) 1351 lockreq |= LK_NOWAIT; 1352 /* 1353 * Force stale buffer cache information to be flushed. 1354 */ 1355 loop: 1356 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1357 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1358 VI_UNLOCK(vp); 1359 continue; 1360 } 1361 if ((error = vget(vp, lockreq)) != 0) { 1362 if (error == ENOENT) { 1363 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1364 goto loop; 1365 } 1366 continue; 1367 } 1368 error = VOP_FSYNC(vp, waitfor, td); 1369 if (error) 1370 allerror = error; 1371 vput(vp); 1372 } 1373 return (allerror); 1374 } 1375 1376 int 1377 vfs_stdnosync (mp, waitfor) 1378 struct mount *mp; 1379 int waitfor; 1380 { 1381 1382 return (0); 1383 } 1384 1385 static int 1386 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap) 1387 { 1388 int error; 1389 1390 error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp, 1391 ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred, 1392 ap->a_outcred, ap->a_fsizetd); 1393 return (error); 1394 } 1395 1396 int 1397 vfs_stdvget (mp, ino, flags, vpp) 1398 struct mount *mp; 1399 ino_t ino; 1400 int flags; 1401 struct vnode **vpp; 1402 { 1403 1404 return (EOPNOTSUPP); 1405 } 1406 1407 int 1408 vfs_stdfhtovp (mp, fhp, flags, vpp) 1409 struct mount *mp; 1410 struct fid *fhp; 1411 int flags; 1412 struct vnode **vpp; 1413 { 1414 1415 return (EOPNOTSUPP); 1416 } 1417 1418 int 1419 vfs_stdinit (vfsp) 1420 struct vfsconf *vfsp; 1421 { 1422 1423 return (0); 1424 } 1425 1426 int 1427 vfs_stduninit (vfsp) 1428 struct vfsconf *vfsp; 1429 { 1430 1431 return(0); 1432 } 1433 1434 int 1435 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 1436 struct mount *mp; 1437 int cmd; 1438 struct vnode *filename_vp; 1439 int attrnamespace; 1440 const char *attrname; 1441 { 1442 1443 if (filename_vp != NULL) 1444 VOP_UNLOCK(filename_vp); 1445 return (EOPNOTSUPP); 1446 } 1447 1448 int 1449 vfs_stdsysctl(mp, op, req) 1450 struct mount *mp; 1451 fsctlop_t op; 1452 struct sysctl_req *req; 1453 { 1454 1455 return (EOPNOTSUPP); 1456 } 1457 1458 static vop_bypass_t * 1459 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a) 1460 { 1461 1462 return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset)); 1463 } 1464 1465 int 1466 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a) 1467 { 1468 vop_bypass_t *bp; 1469 int prev_stops, rc; 1470 1471 bp = bp_by_off(vop, a); 1472 MPASS(bp != NULL); 1473 1474 prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); 1475 rc = bp(a); 1476 sigallowstop(prev_stops); 1477 return (rc); 1478 } 1479 1480 static int 1481 vop_stdstat(struct vop_stat_args *a) 1482 { 1483 struct vattr vattr; 1484 struct vattr *vap; 1485 struct vnode *vp; 1486 struct stat *sb; 1487 int error; 1488 u_short mode; 1489 1490 vp = a->a_vp; 1491 sb = a->a_sb; 1492 1493 error = vop_stat_helper_pre(a); 1494 if (error != 0) 1495 return (error); 1496 1497 vap = &vattr; 1498 1499 /* 1500 * Initialize defaults for new and unusual fields, so that file 1501 * systems which don't support these fields don't need to know 1502 * about them. 1503 */ 1504 vap->va_birthtime.tv_sec = -1; 1505 vap->va_birthtime.tv_nsec = 0; 1506 vap->va_fsid = VNOVAL; 1507 vap->va_rdev = NODEV; 1508 1509 error = VOP_GETATTR(vp, vap, a->a_active_cred); 1510 if (error) 1511 goto out; 1512 1513 /* 1514 * Zero the spare stat fields 1515 */ 1516 bzero(sb, sizeof *sb); 1517 1518 /* 1519 * Copy from vattr table 1520 */ 1521 if (vap->va_fsid != VNOVAL) 1522 sb->st_dev = vap->va_fsid; 1523 else 1524 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1525 sb->st_ino = vap->va_fileid; 1526 mode = vap->va_mode; 1527 switch (vap->va_type) { 1528 case VREG: 1529 mode |= S_IFREG; 1530 break; 1531 case VDIR: 1532 mode |= S_IFDIR; 1533 break; 1534 case VBLK: 1535 mode |= S_IFBLK; 1536 break; 1537 case VCHR: 1538 mode |= S_IFCHR; 1539 break; 1540 case VLNK: 1541 mode |= S_IFLNK; 1542 break; 1543 case VSOCK: 1544 mode |= S_IFSOCK; 1545 break; 1546 case VFIFO: 1547 mode |= S_IFIFO; 1548 break; 1549 default: 1550 error = EBADF; 1551 goto out; 1552 } 1553 sb->st_mode = mode; 1554 sb->st_nlink = vap->va_nlink; 1555 sb->st_uid = vap->va_uid; 1556 sb->st_gid = vap->va_gid; 1557 sb->st_rdev = vap->va_rdev; 1558 if (vap->va_size > OFF_MAX) { 1559 error = EOVERFLOW; 1560 goto out; 1561 } 1562 sb->st_size = vap->va_size; 1563 sb->st_atim.tv_sec = vap->va_atime.tv_sec; 1564 sb->st_atim.tv_nsec = vap->va_atime.tv_nsec; 1565 sb->st_mtim.tv_sec = vap->va_mtime.tv_sec; 1566 sb->st_mtim.tv_nsec = vap->va_mtime.tv_nsec; 1567 sb->st_ctim.tv_sec = vap->va_ctime.tv_sec; 1568 sb->st_ctim.tv_nsec = vap->va_ctime.tv_nsec; 1569 sb->st_birthtim.tv_sec = vap->va_birthtime.tv_sec; 1570 sb->st_birthtim.tv_nsec = vap->va_birthtime.tv_nsec; 1571 1572 /* 1573 * According to www.opengroup.org, the meaning of st_blksize is 1574 * "a filesystem-specific preferred I/O block size for this 1575 * object. In some filesystem types, this may vary from file 1576 * to file" 1577 * Use minimum/default of PAGE_SIZE (e.g. for VCHR). 1578 */ 1579 1580 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1581 sb->st_flags = vap->va_flags; 1582 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1583 sb->st_gen = vap->va_gen; 1584 out: 1585 return (vop_stat_helper_post(a, error)); 1586 } 1587 1588 static int 1589 vop_stdread_pgcache(struct vop_read_pgcache_args *ap __unused) 1590 { 1591 return (EJUSTRETURN); 1592 } 1593