1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed 8 * to Berkeley by John Heidemann of the UCLA Ficus project. 9 * 10 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bio.h> 43 #include <sys/buf.h> 44 #include <sys/conf.h> 45 #include <sys/event.h> 46 #include <sys/filio.h> 47 #include <sys/kernel.h> 48 #include <sys/limits.h> 49 #include <sys/lock.h> 50 #include <sys/lockf.h> 51 #include <sys/malloc.h> 52 #include <sys/mount.h> 53 #include <sys/namei.h> 54 #include <sys/rwlock.h> 55 #include <sys/fcntl.h> 56 #include <sys/unistd.h> 57 #include <sys/vnode.h> 58 #include <sys/dirent.h> 59 #include <sys/poll.h> 60 61 #include <security/mac/mac_framework.h> 62 63 #include <vm/vm.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_extern.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_map.h> 68 #include <vm/vm_page.h> 69 #include <vm/vm_pager.h> 70 #include <vm/vnode_pager.h> 71 72 static int vop_nolookup(struct vop_lookup_args *); 73 static int vop_norename(struct vop_rename_args *); 74 static int vop_nostrategy(struct vop_strategy_args *); 75 static int get_next_dirent(struct vnode *vp, struct dirent **dpp, 76 char *dirbuf, int dirbuflen, off_t *off, 77 char **cpos, int *len, int *eofflag, 78 struct thread *td); 79 static int dirent_exists(struct vnode *vp, const char *dirname, 80 struct thread *td); 81 82 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 83 84 static int vop_stdis_text(struct vop_is_text_args *ap); 85 static int vop_stdunset_text(struct vop_unset_text_args *ap); 86 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); 87 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap); 88 static int vop_stdfdatasync(struct vop_fdatasync_args *ap); 89 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); 90 91 /* 92 * This vnode table stores what we want to do if the filesystem doesn't 93 * implement a particular VOP. 94 * 95 * If there is no specific entry here, we will return EOPNOTSUPP. 96 * 97 * Note that every filesystem has to implement either vop_access 98 * or vop_accessx; failing to do so will result in immediate crash 99 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 100 * which calls vop_stdaccess() etc. 101 */ 102 103 struct vop_vector default_vnodeops = { 104 .vop_default = NULL, 105 .vop_bypass = VOP_EOPNOTSUPP, 106 107 .vop_access = vop_stdaccess, 108 .vop_accessx = vop_stdaccessx, 109 .vop_advise = vop_stdadvise, 110 .vop_advlock = vop_stdadvlock, 111 .vop_advlockasync = vop_stdadvlockasync, 112 .vop_advlockpurge = vop_stdadvlockpurge, 113 .vop_allocate = vop_stdallocate, 114 .vop_bmap = vop_stdbmap, 115 .vop_close = VOP_NULL, 116 .vop_fsync = VOP_NULL, 117 .vop_fdatasync = vop_stdfdatasync, 118 .vop_getpages = vop_stdgetpages, 119 .vop_getpages_async = vop_stdgetpages_async, 120 .vop_getwritemount = vop_stdgetwritemount, 121 .vop_inactive = VOP_NULL, 122 .vop_need_inactive = vop_stdneed_inactive, 123 .vop_ioctl = vop_stdioctl, 124 .vop_kqfilter = vop_stdkqfilter, 125 .vop_islocked = vop_stdislocked, 126 .vop_lock1 = vop_stdlock, 127 .vop_lookup = vop_nolookup, 128 .vop_open = VOP_NULL, 129 .vop_pathconf = VOP_EINVAL, 130 .vop_poll = vop_nopoll, 131 .vop_putpages = vop_stdputpages, 132 .vop_readlink = VOP_EINVAL, 133 .vop_rename = vop_norename, 134 .vop_revoke = VOP_PANIC, 135 .vop_strategy = vop_nostrategy, 136 .vop_unlock = vop_stdunlock, 137 .vop_vptocnp = vop_stdvptocnp, 138 .vop_vptofh = vop_stdvptofh, 139 .vop_unp_bind = vop_stdunp_bind, 140 .vop_unp_connect = vop_stdunp_connect, 141 .vop_unp_detach = vop_stdunp_detach, 142 .vop_is_text = vop_stdis_text, 143 .vop_set_text = vop_stdset_text, 144 .vop_unset_text = vop_stdunset_text, 145 .vop_add_writecount = vop_stdadd_writecount, 146 .vop_copy_file_range = vop_stdcopy_file_range, 147 }; 148 VFS_VOP_VECTOR_REGISTER(default_vnodeops); 149 150 /* 151 * Series of placeholder functions for various error returns for 152 * VOPs. 153 */ 154 155 int 156 vop_eopnotsupp(struct vop_generic_args *ap) 157 { 158 /* 159 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 160 */ 161 162 return (EOPNOTSUPP); 163 } 164 165 int 166 vop_ebadf(struct vop_generic_args *ap) 167 { 168 169 return (EBADF); 170 } 171 172 int 173 vop_enotty(struct vop_generic_args *ap) 174 { 175 176 return (ENOTTY); 177 } 178 179 int 180 vop_einval(struct vop_generic_args *ap) 181 { 182 183 return (EINVAL); 184 } 185 186 int 187 vop_enoent(struct vop_generic_args *ap) 188 { 189 190 return (ENOENT); 191 } 192 193 int 194 vop_null(struct vop_generic_args *ap) 195 { 196 197 return (0); 198 } 199 200 /* 201 * Helper function to panic on some bad VOPs in some filesystems. 202 */ 203 int 204 vop_panic(struct vop_generic_args *ap) 205 { 206 207 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 208 } 209 210 /* 211 * vop_std<something> and vop_no<something> are default functions for use by 212 * filesystems that need the "default reasonable" implementation for a 213 * particular operation. 214 * 215 * The documentation for the operations they implement exists (if it exists) 216 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 217 */ 218 219 /* 220 * Default vop for filesystems that do not support name lookup 221 */ 222 static int 223 vop_nolookup(ap) 224 struct vop_lookup_args /* { 225 struct vnode *a_dvp; 226 struct vnode **a_vpp; 227 struct componentname *a_cnp; 228 } */ *ap; 229 { 230 231 *ap->a_vpp = NULL; 232 return (ENOTDIR); 233 } 234 235 /* 236 * vop_norename: 237 * 238 * Handle unlock and reference counting for arguments of vop_rename 239 * for filesystems that do not implement rename operation. 240 */ 241 static int 242 vop_norename(struct vop_rename_args *ap) 243 { 244 245 vop_rename_fail(ap); 246 return (EOPNOTSUPP); 247 } 248 249 /* 250 * vop_nostrategy: 251 * 252 * Strategy routine for VFS devices that have none. 253 * 254 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 255 * routine. Typically this is done for a BIO_READ strategy call. 256 * Typically B_INVAL is assumed to already be clear prior to a write 257 * and should not be cleared manually unless you just made the buffer 258 * invalid. BIO_ERROR should be cleared either way. 259 */ 260 261 static int 262 vop_nostrategy (struct vop_strategy_args *ap) 263 { 264 printf("No strategy for buffer at %p\n", ap->a_bp); 265 vn_printf(ap->a_vp, "vnode "); 266 ap->a_bp->b_ioflags |= BIO_ERROR; 267 ap->a_bp->b_error = EOPNOTSUPP; 268 bufdone(ap->a_bp); 269 return (EOPNOTSUPP); 270 } 271 272 static int 273 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 274 int dirbuflen, off_t *off, char **cpos, int *len, 275 int *eofflag, struct thread *td) 276 { 277 int error, reclen; 278 struct uio uio; 279 struct iovec iov; 280 struct dirent *dp; 281 282 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 283 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 284 285 if (*len == 0) { 286 iov.iov_base = dirbuf; 287 iov.iov_len = dirbuflen; 288 289 uio.uio_iov = &iov; 290 uio.uio_iovcnt = 1; 291 uio.uio_offset = *off; 292 uio.uio_resid = dirbuflen; 293 uio.uio_segflg = UIO_SYSSPACE; 294 uio.uio_rw = UIO_READ; 295 uio.uio_td = td; 296 297 *eofflag = 0; 298 299 #ifdef MAC 300 error = mac_vnode_check_readdir(td->td_ucred, vp); 301 if (error == 0) 302 #endif 303 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 304 NULL, NULL); 305 if (error) 306 return (error); 307 308 *off = uio.uio_offset; 309 310 *cpos = dirbuf; 311 *len = (dirbuflen - uio.uio_resid); 312 313 if (*len == 0) 314 return (ENOENT); 315 } 316 317 dp = (struct dirent *)(*cpos); 318 reclen = dp->d_reclen; 319 *dpp = dp; 320 321 /* check for malformed directory.. */ 322 if (reclen < DIRENT_MINSIZE) 323 return (EINVAL); 324 325 *cpos += reclen; 326 *len -= reclen; 327 328 return (0); 329 } 330 331 /* 332 * Check if a named file exists in a given directory vnode. 333 */ 334 static int 335 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 336 { 337 char *dirbuf, *cpos; 338 int error, eofflag, dirbuflen, len, found; 339 off_t off; 340 struct dirent *dp; 341 struct vattr va; 342 343 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 344 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 345 346 found = 0; 347 348 error = VOP_GETATTR(vp, &va, td->td_ucred); 349 if (error) 350 return (found); 351 352 dirbuflen = DEV_BSIZE; 353 if (dirbuflen < va.va_blocksize) 354 dirbuflen = va.va_blocksize; 355 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 356 357 off = 0; 358 len = 0; 359 do { 360 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 361 &cpos, &len, &eofflag, td); 362 if (error) 363 goto out; 364 365 if (dp->d_type != DT_WHT && dp->d_fileno != 0 && 366 strcmp(dp->d_name, dirname) == 0) { 367 found = 1; 368 goto out; 369 } 370 } while (len > 0 || !eofflag); 371 372 out: 373 free(dirbuf, M_TEMP); 374 return (found); 375 } 376 377 int 378 vop_stdaccess(struct vop_access_args *ap) 379 { 380 381 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 382 VAPPEND)) == 0, ("invalid bit in accmode")); 383 384 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 385 } 386 387 int 388 vop_stdaccessx(struct vop_accessx_args *ap) 389 { 390 int error; 391 accmode_t accmode = ap->a_accmode; 392 393 error = vfs_unixify_accmode(&accmode); 394 if (error != 0) 395 return (error); 396 397 if (accmode == 0) 398 return (0); 399 400 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 401 } 402 403 /* 404 * Advisory record locking support 405 */ 406 int 407 vop_stdadvlock(struct vop_advlock_args *ap) 408 { 409 struct vnode *vp; 410 struct vattr vattr; 411 int error; 412 413 vp = ap->a_vp; 414 if (ap->a_fl->l_whence == SEEK_END) { 415 /* 416 * The NFSv4 server must avoid doing a vn_lock() here, since it 417 * can deadlock the nfsd threads, due to a LOR. Fortunately 418 * the NFSv4 server always uses SEEK_SET and this code is 419 * only required for the SEEK_END case. 420 */ 421 vn_lock(vp, LK_SHARED | LK_RETRY); 422 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 423 VOP_UNLOCK(vp); 424 if (error) 425 return (error); 426 } else 427 vattr.va_size = 0; 428 429 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 430 } 431 432 int 433 vop_stdadvlockasync(struct vop_advlockasync_args *ap) 434 { 435 struct vnode *vp; 436 struct vattr vattr; 437 int error; 438 439 vp = ap->a_vp; 440 if (ap->a_fl->l_whence == SEEK_END) { 441 /* The size argument is only needed for SEEK_END. */ 442 vn_lock(vp, LK_SHARED | LK_RETRY); 443 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 444 VOP_UNLOCK(vp); 445 if (error) 446 return (error); 447 } else 448 vattr.va_size = 0; 449 450 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 451 } 452 453 int 454 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap) 455 { 456 struct vnode *vp; 457 458 vp = ap->a_vp; 459 lf_purgelocks(vp, &vp->v_lockf); 460 return (0); 461 } 462 463 /* 464 * vop_stdpathconf: 465 * 466 * Standard implementation of POSIX pathconf, to get information about limits 467 * for a filesystem. 468 * Override per filesystem for the case where the filesystem has smaller 469 * limits. 470 */ 471 int 472 vop_stdpathconf(ap) 473 struct vop_pathconf_args /* { 474 struct vnode *a_vp; 475 int a_name; 476 int *a_retval; 477 } */ *ap; 478 { 479 480 switch (ap->a_name) { 481 case _PC_ASYNC_IO: 482 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; 483 return (0); 484 case _PC_PATH_MAX: 485 *ap->a_retval = PATH_MAX; 486 return (0); 487 case _PC_ACL_EXTENDED: 488 case _PC_ACL_NFS4: 489 case _PC_CAP_PRESENT: 490 case _PC_INF_PRESENT: 491 case _PC_MAC_PRESENT: 492 *ap->a_retval = 0; 493 return (0); 494 default: 495 return (EINVAL); 496 } 497 /* NOTREACHED */ 498 } 499 500 /* 501 * Standard lock, unlock and islocked functions. 502 */ 503 int 504 vop_stdlock(ap) 505 struct vop_lock1_args /* { 506 struct vnode *a_vp; 507 int a_flags; 508 char *file; 509 int line; 510 } */ *ap; 511 { 512 struct vnode *vp = ap->a_vp; 513 struct mtx *ilk; 514 515 ilk = VI_MTX(vp); 516 return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags, 517 &ilk->lock_object, ap->a_file, ap->a_line)); 518 } 519 520 /* See above. */ 521 int 522 vop_stdunlock(ap) 523 struct vop_unlock_args /* { 524 struct vnode *a_vp; 525 } */ *ap; 526 { 527 struct vnode *vp = ap->a_vp; 528 529 return (lockmgr_unlock(vp->v_vnlock)); 530 } 531 532 /* See above. */ 533 int 534 vop_stdislocked(ap) 535 struct vop_islocked_args /* { 536 struct vnode *a_vp; 537 } */ *ap; 538 { 539 540 return (lockstatus(ap->a_vp->v_vnlock)); 541 } 542 543 /* 544 * Variants of the above set. 545 * 546 * Differences are: 547 * - shared locking disablement is not supported 548 * - v_vnlock pointer is not honored 549 */ 550 int 551 vop_lock(ap) 552 struct vop_lock1_args /* { 553 struct vnode *a_vp; 554 int a_flags; 555 char *file; 556 int line; 557 } */ *ap; 558 { 559 struct vnode *vp = ap->a_vp; 560 int flags = ap->a_flags; 561 struct mtx *ilk; 562 563 MPASS(vp->v_vnlock == &vp->v_lock); 564 565 if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0)) 566 goto other; 567 568 switch (flags & LK_TYPE_MASK) { 569 case LK_SHARED: 570 return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line)); 571 case LK_EXCLUSIVE: 572 return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line)); 573 } 574 other: 575 ilk = VI_MTX(vp); 576 return (lockmgr_lock_flags(&vp->v_lock, flags, 577 &ilk->lock_object, ap->a_file, ap->a_line)); 578 } 579 580 int 581 vop_unlock(ap) 582 struct vop_unlock_args /* { 583 struct vnode *a_vp; 584 } */ *ap; 585 { 586 struct vnode *vp = ap->a_vp; 587 588 MPASS(vp->v_vnlock == &vp->v_lock); 589 590 return (lockmgr_unlock(&vp->v_lock)); 591 } 592 593 int 594 vop_islocked(ap) 595 struct vop_islocked_args /* { 596 struct vnode *a_vp; 597 } */ *ap; 598 { 599 struct vnode *vp = ap->a_vp; 600 601 MPASS(vp->v_vnlock == &vp->v_lock); 602 603 return (lockstatus(&vp->v_lock)); 604 } 605 606 /* 607 * Return true for select/poll. 608 */ 609 int 610 vop_nopoll(ap) 611 struct vop_poll_args /* { 612 struct vnode *a_vp; 613 int a_events; 614 struct ucred *a_cred; 615 struct thread *a_td; 616 } */ *ap; 617 { 618 619 if (ap->a_events & ~POLLSTANDARD) 620 return (POLLNVAL); 621 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 622 } 623 624 /* 625 * Implement poll for local filesystems that support it. 626 */ 627 int 628 vop_stdpoll(ap) 629 struct vop_poll_args /* { 630 struct vnode *a_vp; 631 int a_events; 632 struct ucred *a_cred; 633 struct thread *a_td; 634 } */ *ap; 635 { 636 if (ap->a_events & ~POLLSTANDARD) 637 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 638 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 639 } 640 641 /* 642 * Return our mount point, as we will take charge of the writes. 643 */ 644 int 645 vop_stdgetwritemount(ap) 646 struct vop_getwritemount_args /* { 647 struct vnode *a_vp; 648 struct mount **a_mpp; 649 } */ *ap; 650 { 651 struct mount *mp; 652 struct vnode *vp; 653 654 /* 655 * Note that having a reference does not prevent forced unmount from 656 * setting ->v_mount to NULL after the lock gets released. This is of 657 * no consequence for typical consumers (most notably vn_start_write) 658 * since in this case the vnode is VIRF_DOOMED. Unmount might have 659 * progressed far enough that its completion is only delayed by the 660 * reference obtained here. The consumer only needs to concern itself 661 * with releasing it. 662 */ 663 vp = ap->a_vp; 664 mp = vp->v_mount; 665 if (mp == NULL) { 666 *(ap->a_mpp) = NULL; 667 return (0); 668 } 669 if (vfs_op_thread_enter(mp)) { 670 if (mp == vp->v_mount) { 671 vfs_mp_count_add_pcpu(mp, ref, 1); 672 vfs_op_thread_exit(mp); 673 } else { 674 vfs_op_thread_exit(mp); 675 mp = NULL; 676 } 677 } else { 678 MNT_ILOCK(mp); 679 if (mp == vp->v_mount) { 680 MNT_REF(mp); 681 MNT_IUNLOCK(mp); 682 } else { 683 MNT_IUNLOCK(mp); 684 mp = NULL; 685 } 686 } 687 *(ap->a_mpp) = mp; 688 return (0); 689 } 690 691 /* 692 * If the file system doesn't implement VOP_BMAP, then return sensible defaults: 693 * - Return the vnode's bufobj instead of any underlying device's bufobj 694 * - Calculate the physical block number as if there were equal size 695 * consecutive blocks, but 696 * - Report no contiguous runs of blocks. 697 */ 698 int 699 vop_stdbmap(ap) 700 struct vop_bmap_args /* { 701 struct vnode *a_vp; 702 daddr_t a_bn; 703 struct bufobj **a_bop; 704 daddr_t *a_bnp; 705 int *a_runp; 706 int *a_runb; 707 } */ *ap; 708 { 709 710 if (ap->a_bop != NULL) 711 *ap->a_bop = &ap->a_vp->v_bufobj; 712 if (ap->a_bnp != NULL) 713 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 714 if (ap->a_runp != NULL) 715 *ap->a_runp = 0; 716 if (ap->a_runb != NULL) 717 *ap->a_runb = 0; 718 return (0); 719 } 720 721 int 722 vop_stdfsync(ap) 723 struct vop_fsync_args /* { 724 struct vnode *a_vp; 725 int a_waitfor; 726 struct thread *a_td; 727 } */ *ap; 728 { 729 730 return (vn_fsync_buf(ap->a_vp, ap->a_waitfor)); 731 } 732 733 static int 734 vop_stdfdatasync(struct vop_fdatasync_args *ap) 735 { 736 737 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); 738 } 739 740 int 741 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap) 742 { 743 744 return (vn_fsync_buf(ap->a_vp, MNT_WAIT)); 745 } 746 747 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 748 int 749 vop_stdgetpages(ap) 750 struct vop_getpages_args /* { 751 struct vnode *a_vp; 752 vm_page_t *a_m; 753 int a_count; 754 int *a_rbehind; 755 int *a_rahead; 756 } */ *ap; 757 { 758 759 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 760 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); 761 } 762 763 static int 764 vop_stdgetpages_async(struct vop_getpages_async_args *ap) 765 { 766 int error; 767 768 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, 769 ap->a_rahead); 770 if (ap->a_iodone != NULL) 771 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 772 return (error); 773 } 774 775 int 776 vop_stdkqfilter(struct vop_kqfilter_args *ap) 777 { 778 return vfs_kqfilter(ap); 779 } 780 781 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 782 int 783 vop_stdputpages(ap) 784 struct vop_putpages_args /* { 785 struct vnode *a_vp; 786 vm_page_t *a_m; 787 int a_count; 788 int a_sync; 789 int *a_rtvals; 790 } */ *ap; 791 { 792 793 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 794 ap->a_sync, ap->a_rtvals); 795 } 796 797 int 798 vop_stdvptofh(struct vop_vptofh_args *ap) 799 { 800 return (EOPNOTSUPP); 801 } 802 803 int 804 vop_stdvptocnp(struct vop_vptocnp_args *ap) 805 { 806 struct vnode *vp = ap->a_vp; 807 struct vnode **dvp = ap->a_vpp; 808 struct ucred *cred = ap->a_cred; 809 char *buf = ap->a_buf; 810 size_t *buflen = ap->a_buflen; 811 char *dirbuf, *cpos; 812 int i, error, eofflag, dirbuflen, flags, locked, len, covered; 813 off_t off; 814 ino_t fileno; 815 struct vattr va; 816 struct nameidata nd; 817 struct thread *td; 818 struct dirent *dp; 819 struct vnode *mvp; 820 821 i = *buflen; 822 error = 0; 823 covered = 0; 824 td = curthread; 825 826 if (vp->v_type != VDIR) 827 return (ENOENT); 828 829 error = VOP_GETATTR(vp, &va, cred); 830 if (error) 831 return (error); 832 833 VREF(vp); 834 locked = VOP_ISLOCKED(vp); 835 VOP_UNLOCK(vp); 836 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 837 "..", vp, td); 838 flags = FREAD; 839 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 840 if (error) { 841 vn_lock(vp, locked | LK_RETRY); 842 return (error); 843 } 844 NDFREE(&nd, NDF_ONLY_PNBUF); 845 846 mvp = *dvp = nd.ni_vp; 847 848 if (vp->v_mount != (*dvp)->v_mount && 849 ((*dvp)->v_vflag & VV_ROOT) && 850 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 851 *dvp = (*dvp)->v_mount->mnt_vnodecovered; 852 VREF(mvp); 853 VOP_UNLOCK(mvp); 854 vn_close(mvp, FREAD, cred, td); 855 VREF(*dvp); 856 vn_lock(*dvp, LK_SHARED | LK_RETRY); 857 covered = 1; 858 } 859 860 fileno = va.va_fileid; 861 862 dirbuflen = DEV_BSIZE; 863 if (dirbuflen < va.va_blocksize) 864 dirbuflen = va.va_blocksize; 865 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 866 867 if ((*dvp)->v_type != VDIR) { 868 error = ENOENT; 869 goto out; 870 } 871 872 off = 0; 873 len = 0; 874 do { 875 /* call VOP_READDIR of parent */ 876 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 877 &cpos, &len, &eofflag, td); 878 if (error) 879 goto out; 880 881 if ((dp->d_type != DT_WHT) && 882 (dp->d_fileno == fileno)) { 883 if (covered) { 884 VOP_UNLOCK(*dvp); 885 vn_lock(mvp, LK_SHARED | LK_RETRY); 886 if (dirent_exists(mvp, dp->d_name, td)) { 887 error = ENOENT; 888 VOP_UNLOCK(mvp); 889 vn_lock(*dvp, LK_SHARED | LK_RETRY); 890 goto out; 891 } 892 VOP_UNLOCK(mvp); 893 vn_lock(*dvp, LK_SHARED | LK_RETRY); 894 } 895 i -= dp->d_namlen; 896 897 if (i < 0) { 898 error = ENOMEM; 899 goto out; 900 } 901 if (dp->d_namlen == 1 && dp->d_name[0] == '.') { 902 error = ENOENT; 903 } else { 904 bcopy(dp->d_name, buf + i, dp->d_namlen); 905 error = 0; 906 } 907 goto out; 908 } 909 } while (len > 0 || !eofflag); 910 error = ENOENT; 911 912 out: 913 free(dirbuf, M_TEMP); 914 if (!error) { 915 *buflen = i; 916 vref(*dvp); 917 } 918 if (covered) { 919 vput(*dvp); 920 vrele(mvp); 921 } else { 922 VOP_UNLOCK(mvp); 923 vn_close(mvp, FREAD, cred, td); 924 } 925 vn_lock(vp, locked | LK_RETRY); 926 return (error); 927 } 928 929 int 930 vop_stdallocate(struct vop_allocate_args *ap) 931 { 932 #ifdef __notyet__ 933 struct statfs *sfs; 934 off_t maxfilesize = 0; 935 #endif 936 struct iovec aiov; 937 struct vattr vattr, *vap; 938 struct uio auio; 939 off_t fsize, len, cur, offset; 940 uint8_t *buf; 941 struct thread *td; 942 struct vnode *vp; 943 size_t iosize; 944 int error; 945 946 buf = NULL; 947 error = 0; 948 td = curthread; 949 vap = &vattr; 950 vp = ap->a_vp; 951 len = *ap->a_len; 952 offset = *ap->a_offset; 953 954 error = VOP_GETATTR(vp, vap, td->td_ucred); 955 if (error != 0) 956 goto out; 957 fsize = vap->va_size; 958 iosize = vap->va_blocksize; 959 if (iosize == 0) 960 iosize = BLKDEV_IOSIZE; 961 if (iosize > MAXPHYS) 962 iosize = MAXPHYS; 963 buf = malloc(iosize, M_TEMP, M_WAITOK); 964 965 #ifdef __notyet__ 966 /* 967 * Check if the filesystem sets f_maxfilesize; if not use 968 * VOP_SETATTR to perform the check. 969 */ 970 sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); 971 error = VFS_STATFS(vp->v_mount, sfs, td); 972 if (error == 0) 973 maxfilesize = sfs->f_maxfilesize; 974 free(sfs, M_STATFS); 975 if (error != 0) 976 goto out; 977 if (maxfilesize) { 978 if (offset > maxfilesize || len > maxfilesize || 979 offset + len > maxfilesize) { 980 error = EFBIG; 981 goto out; 982 } 983 } else 984 #endif 985 if (offset + len > vap->va_size) { 986 /* 987 * Test offset + len against the filesystem's maxfilesize. 988 */ 989 VATTR_NULL(vap); 990 vap->va_size = offset + len; 991 error = VOP_SETATTR(vp, vap, td->td_ucred); 992 if (error != 0) 993 goto out; 994 VATTR_NULL(vap); 995 vap->va_size = fsize; 996 error = VOP_SETATTR(vp, vap, td->td_ucred); 997 if (error != 0) 998 goto out; 999 } 1000 1001 for (;;) { 1002 /* 1003 * Read and write back anything below the nominal file 1004 * size. There's currently no way outside the filesystem 1005 * to know whether this area is sparse or not. 1006 */ 1007 cur = iosize; 1008 if ((offset % iosize) != 0) 1009 cur -= (offset % iosize); 1010 if (cur > len) 1011 cur = len; 1012 if (offset < fsize) { 1013 aiov.iov_base = buf; 1014 aiov.iov_len = cur; 1015 auio.uio_iov = &aiov; 1016 auio.uio_iovcnt = 1; 1017 auio.uio_offset = offset; 1018 auio.uio_resid = cur; 1019 auio.uio_segflg = UIO_SYSSPACE; 1020 auio.uio_rw = UIO_READ; 1021 auio.uio_td = td; 1022 error = VOP_READ(vp, &auio, 0, td->td_ucred); 1023 if (error != 0) 1024 break; 1025 if (auio.uio_resid > 0) { 1026 bzero(buf + cur - auio.uio_resid, 1027 auio.uio_resid); 1028 } 1029 } else { 1030 bzero(buf, cur); 1031 } 1032 1033 aiov.iov_base = buf; 1034 aiov.iov_len = cur; 1035 auio.uio_iov = &aiov; 1036 auio.uio_iovcnt = 1; 1037 auio.uio_offset = offset; 1038 auio.uio_resid = cur; 1039 auio.uio_segflg = UIO_SYSSPACE; 1040 auio.uio_rw = UIO_WRITE; 1041 auio.uio_td = td; 1042 1043 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 1044 if (error != 0) 1045 break; 1046 1047 len -= cur; 1048 offset += cur; 1049 if (len == 0) 1050 break; 1051 if (should_yield()) 1052 break; 1053 } 1054 1055 out: 1056 *ap->a_len = len; 1057 *ap->a_offset = offset; 1058 free(buf, M_TEMP); 1059 return (error); 1060 } 1061 1062 int 1063 vop_stdadvise(struct vop_advise_args *ap) 1064 { 1065 struct vnode *vp; 1066 struct bufobj *bo; 1067 daddr_t startn, endn; 1068 off_t bstart, bend, start, end; 1069 int bsize, error; 1070 1071 vp = ap->a_vp; 1072 switch (ap->a_advice) { 1073 case POSIX_FADV_WILLNEED: 1074 /* 1075 * Do nothing for now. Filesystems should provide a 1076 * custom method which starts an asynchronous read of 1077 * the requested region. 1078 */ 1079 error = 0; 1080 break; 1081 case POSIX_FADV_DONTNEED: 1082 error = 0; 1083 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1084 if (VN_IS_DOOMED(vp)) { 1085 VOP_UNLOCK(vp); 1086 break; 1087 } 1088 1089 /* 1090 * Round to block boundaries (and later possibly further to 1091 * page boundaries). Applications cannot reasonably be aware 1092 * of the boundaries, and the rounding must be to expand at 1093 * both extremities to cover enough. It still doesn't cover 1094 * read-ahead. For partial blocks, this gives unnecessary 1095 * discarding of buffers but is efficient enough since the 1096 * pages usually remain in VMIO for some time. 1097 */ 1098 bsize = vp->v_bufobj.bo_bsize; 1099 bstart = rounddown(ap->a_start, bsize); 1100 bend = roundup(ap->a_end, bsize); 1101 1102 /* 1103 * Deactivate pages in the specified range from the backing VM 1104 * object. Pages that are resident in the buffer cache will 1105 * remain wired until their corresponding buffers are released 1106 * below. 1107 */ 1108 if (vp->v_object != NULL) { 1109 start = trunc_page(bstart); 1110 end = round_page(bend); 1111 VM_OBJECT_RLOCK(vp->v_object); 1112 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), 1113 OFF_TO_IDX(end)); 1114 VM_OBJECT_RUNLOCK(vp->v_object); 1115 } 1116 1117 bo = &vp->v_bufobj; 1118 BO_RLOCK(bo); 1119 startn = bstart / bsize; 1120 endn = bend / bsize; 1121 error = bnoreuselist(&bo->bo_clean, bo, startn, endn); 1122 if (error == 0) 1123 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); 1124 BO_RUNLOCK(bo); 1125 VOP_UNLOCK(vp); 1126 break; 1127 default: 1128 error = EINVAL; 1129 break; 1130 } 1131 return (error); 1132 } 1133 1134 int 1135 vop_stdunp_bind(struct vop_unp_bind_args *ap) 1136 { 1137 1138 ap->a_vp->v_unpcb = ap->a_unpcb; 1139 return (0); 1140 } 1141 1142 int 1143 vop_stdunp_connect(struct vop_unp_connect_args *ap) 1144 { 1145 1146 *ap->a_unpcb = ap->a_vp->v_unpcb; 1147 return (0); 1148 } 1149 1150 int 1151 vop_stdunp_detach(struct vop_unp_detach_args *ap) 1152 { 1153 1154 ap->a_vp->v_unpcb = NULL; 1155 return (0); 1156 } 1157 1158 static int 1159 vop_stdis_text(struct vop_is_text_args *ap) 1160 { 1161 1162 return (ap->a_vp->v_writecount < 0); 1163 } 1164 1165 int 1166 vop_stdset_text(struct vop_set_text_args *ap) 1167 { 1168 struct vnode *vp; 1169 struct mount *mp; 1170 int error; 1171 1172 vp = ap->a_vp; 1173 VI_LOCK(vp); 1174 if (vp->v_writecount > 0) { 1175 error = ETXTBSY; 1176 } else { 1177 /* 1178 * If requested by fs, keep a use reference to the 1179 * vnode until the last text reference is released. 1180 */ 1181 mp = vp->v_mount; 1182 if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 && 1183 vp->v_writecount == 0) { 1184 vp->v_iflag |= VI_TEXT_REF; 1185 vrefl(vp); 1186 } 1187 1188 vp->v_writecount--; 1189 error = 0; 1190 } 1191 VI_UNLOCK(vp); 1192 return (error); 1193 } 1194 1195 static int 1196 vop_stdunset_text(struct vop_unset_text_args *ap) 1197 { 1198 struct vnode *vp; 1199 int error; 1200 bool last; 1201 1202 vp = ap->a_vp; 1203 last = false; 1204 VI_LOCK(vp); 1205 if (vp->v_writecount < 0) { 1206 if ((vp->v_iflag & VI_TEXT_REF) != 0 && 1207 vp->v_writecount == -1) { 1208 last = true; 1209 vp->v_iflag &= ~VI_TEXT_REF; 1210 } 1211 vp->v_writecount++; 1212 error = 0; 1213 } else { 1214 error = EINVAL; 1215 } 1216 VI_UNLOCK(vp); 1217 if (last) 1218 vunref(vp); 1219 return (error); 1220 } 1221 1222 static int 1223 vop_stdadd_writecount(struct vop_add_writecount_args *ap) 1224 { 1225 struct vnode *vp; 1226 struct mount *mp; 1227 int error; 1228 1229 vp = ap->a_vp; 1230 VI_LOCK_FLAGS(vp, MTX_DUPOK); 1231 if (vp->v_writecount < 0) { 1232 error = ETXTBSY; 1233 } else { 1234 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, 1235 ("neg writecount increment %d", ap->a_inc)); 1236 if (vp->v_writecount == 0) { 1237 mp = vp->v_mount; 1238 if (mp != NULL && (mp->mnt_kern_flag & MNTK_NOMSYNC) == 0) 1239 vlazy(vp); 1240 } 1241 vp->v_writecount += ap->a_inc; 1242 error = 0; 1243 } 1244 VI_UNLOCK(vp); 1245 return (error); 1246 } 1247 1248 int 1249 vop_stdneed_inactive(struct vop_need_inactive_args *ap) 1250 { 1251 1252 return (1); 1253 } 1254 1255 int 1256 vop_stdioctl(struct vop_ioctl_args *ap) 1257 { 1258 struct vnode *vp; 1259 struct vattr va; 1260 off_t *offp; 1261 int error; 1262 1263 switch (ap->a_command) { 1264 case FIOSEEKDATA: 1265 case FIOSEEKHOLE: 1266 vp = ap->a_vp; 1267 error = vn_lock(vp, LK_SHARED); 1268 if (error != 0) 1269 return (EBADF); 1270 if (vp->v_type == VREG) 1271 error = VOP_GETATTR(vp, &va, ap->a_cred); 1272 else 1273 error = ENOTTY; 1274 if (error == 0) { 1275 offp = ap->a_data; 1276 if (*offp < 0 || *offp >= va.va_size) 1277 error = ENXIO; 1278 else if (ap->a_command == FIOSEEKHOLE) 1279 *offp = va.va_size; 1280 } 1281 VOP_UNLOCK(vp); 1282 break; 1283 default: 1284 error = ENOTTY; 1285 break; 1286 } 1287 return (error); 1288 } 1289 1290 /* 1291 * vfs default ops 1292 * used to fill the vfs function table to get reasonable default return values. 1293 */ 1294 int 1295 vfs_stdroot (mp, flags, vpp) 1296 struct mount *mp; 1297 int flags; 1298 struct vnode **vpp; 1299 { 1300 1301 return (EOPNOTSUPP); 1302 } 1303 1304 int 1305 vfs_stdstatfs (mp, sbp) 1306 struct mount *mp; 1307 struct statfs *sbp; 1308 { 1309 1310 return (EOPNOTSUPP); 1311 } 1312 1313 int 1314 vfs_stdquotactl (mp, cmds, uid, arg) 1315 struct mount *mp; 1316 int cmds; 1317 uid_t uid; 1318 void *arg; 1319 { 1320 1321 return (EOPNOTSUPP); 1322 } 1323 1324 int 1325 vfs_stdsync(mp, waitfor) 1326 struct mount *mp; 1327 int waitfor; 1328 { 1329 struct vnode *vp, *mvp; 1330 struct thread *td; 1331 int error, lockreq, allerror = 0; 1332 1333 td = curthread; 1334 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 1335 if (waitfor != MNT_WAIT) 1336 lockreq |= LK_NOWAIT; 1337 /* 1338 * Force stale buffer cache information to be flushed. 1339 */ 1340 loop: 1341 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1342 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1343 VI_UNLOCK(vp); 1344 continue; 1345 } 1346 if ((error = vget(vp, lockreq, td)) != 0) { 1347 if (error == ENOENT) { 1348 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1349 goto loop; 1350 } 1351 continue; 1352 } 1353 error = VOP_FSYNC(vp, waitfor, td); 1354 if (error) 1355 allerror = error; 1356 vput(vp); 1357 } 1358 return (allerror); 1359 } 1360 1361 int 1362 vfs_stdnosync (mp, waitfor) 1363 struct mount *mp; 1364 int waitfor; 1365 { 1366 1367 return (0); 1368 } 1369 1370 static int 1371 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap) 1372 { 1373 int error; 1374 1375 error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp, 1376 ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred, 1377 ap->a_outcred, ap->a_fsizetd); 1378 return (error); 1379 } 1380 1381 int 1382 vfs_stdvget (mp, ino, flags, vpp) 1383 struct mount *mp; 1384 ino_t ino; 1385 int flags; 1386 struct vnode **vpp; 1387 { 1388 1389 return (EOPNOTSUPP); 1390 } 1391 1392 int 1393 vfs_stdfhtovp (mp, fhp, flags, vpp) 1394 struct mount *mp; 1395 struct fid *fhp; 1396 int flags; 1397 struct vnode **vpp; 1398 { 1399 1400 return (EOPNOTSUPP); 1401 } 1402 1403 int 1404 vfs_stdinit (vfsp) 1405 struct vfsconf *vfsp; 1406 { 1407 1408 return (0); 1409 } 1410 1411 int 1412 vfs_stduninit (vfsp) 1413 struct vfsconf *vfsp; 1414 { 1415 1416 return(0); 1417 } 1418 1419 int 1420 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 1421 struct mount *mp; 1422 int cmd; 1423 struct vnode *filename_vp; 1424 int attrnamespace; 1425 const char *attrname; 1426 { 1427 1428 if (filename_vp != NULL) 1429 VOP_UNLOCK(filename_vp); 1430 return (EOPNOTSUPP); 1431 } 1432 1433 int 1434 vfs_stdsysctl(mp, op, req) 1435 struct mount *mp; 1436 fsctlop_t op; 1437 struct sysctl_req *req; 1438 { 1439 1440 return (EOPNOTSUPP); 1441 } 1442 1443 static vop_bypass_t * 1444 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a) 1445 { 1446 1447 return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset)); 1448 } 1449 1450 int 1451 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a) 1452 { 1453 vop_bypass_t *bp; 1454 int prev_stops, rc; 1455 1456 bp = bp_by_off(vop, a); 1457 MPASS(bp != NULL); 1458 1459 prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); 1460 rc = bp(a); 1461 sigallowstop(prev_stops); 1462 return (rc); 1463 } 1464