1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed 8 * to Berkeley by John Heidemann of the UCLA Ficus project. 9 * 10 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bio.h> 43 #include <sys/buf.h> 44 #include <sys/conf.h> 45 #include <sys/event.h> 46 #include <sys/filio.h> 47 #include <sys/kernel.h> 48 #include <sys/limits.h> 49 #include <sys/lock.h> 50 #include <sys/lockf.h> 51 #include <sys/malloc.h> 52 #include <sys/mount.h> 53 #include <sys/namei.h> 54 #include <sys/rwlock.h> 55 #include <sys/fcntl.h> 56 #include <sys/unistd.h> 57 #include <sys/vnode.h> 58 #include <sys/dirent.h> 59 #include <sys/poll.h> 60 61 #include <security/mac/mac_framework.h> 62 63 #include <vm/vm.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_extern.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_map.h> 68 #include <vm/vm_page.h> 69 #include <vm/vm_pager.h> 70 #include <vm/vnode_pager.h> 71 72 static int vop_nolookup(struct vop_lookup_args *); 73 static int vop_norename(struct vop_rename_args *); 74 static int vop_nostrategy(struct vop_strategy_args *); 75 static int get_next_dirent(struct vnode *vp, struct dirent **dpp, 76 char *dirbuf, int dirbuflen, off_t *off, 77 char **cpos, int *len, int *eofflag, 78 struct thread *td); 79 static int dirent_exists(struct vnode *vp, const char *dirname, 80 struct thread *td); 81 82 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 83 84 static int vop_stdis_text(struct vop_is_text_args *ap); 85 static int vop_stdunset_text(struct vop_unset_text_args *ap); 86 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); 87 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap); 88 static int vop_stdfdatasync(struct vop_fdatasync_args *ap); 89 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); 90 static int vop_stdioctl(struct vop_ioctl_args *ap); 91 92 /* 93 * This vnode table stores what we want to do if the filesystem doesn't 94 * implement a particular VOP. 95 * 96 * If there is no specific entry here, we will return EOPNOTSUPP. 97 * 98 * Note that every filesystem has to implement either vop_access 99 * or vop_accessx; failing to do so will result in immediate crash 100 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 101 * which calls vop_stdaccess() etc. 102 */ 103 104 struct vop_vector default_vnodeops = { 105 .vop_default = NULL, 106 .vop_bypass = VOP_EOPNOTSUPP, 107 108 .vop_access = vop_stdaccess, 109 .vop_accessx = vop_stdaccessx, 110 .vop_advise = vop_stdadvise, 111 .vop_advlock = vop_stdadvlock, 112 .vop_advlockasync = vop_stdadvlockasync, 113 .vop_advlockpurge = vop_stdadvlockpurge, 114 .vop_allocate = vop_stdallocate, 115 .vop_bmap = vop_stdbmap, 116 .vop_close = VOP_NULL, 117 .vop_fsync = VOP_NULL, 118 .vop_fdatasync = vop_stdfdatasync, 119 .vop_getpages = vop_stdgetpages, 120 .vop_getpages_async = vop_stdgetpages_async, 121 .vop_getwritemount = vop_stdgetwritemount, 122 .vop_inactive = VOP_NULL, 123 .vop_ioctl = vop_stdioctl, 124 .vop_kqfilter = vop_stdkqfilter, 125 .vop_islocked = vop_stdislocked, 126 .vop_lock1 = vop_stdlock, 127 .vop_lookup = vop_nolookup, 128 .vop_open = VOP_NULL, 129 .vop_pathconf = VOP_EINVAL, 130 .vop_poll = vop_nopoll, 131 .vop_putpages = vop_stdputpages, 132 .vop_readlink = VOP_EINVAL, 133 .vop_rename = vop_norename, 134 .vop_revoke = VOP_PANIC, 135 .vop_strategy = vop_nostrategy, 136 .vop_unlock = vop_stdunlock, 137 .vop_vptocnp = vop_stdvptocnp, 138 .vop_vptofh = vop_stdvptofh, 139 .vop_unp_bind = vop_stdunp_bind, 140 .vop_unp_connect = vop_stdunp_connect, 141 .vop_unp_detach = vop_stdunp_detach, 142 .vop_is_text = vop_stdis_text, 143 .vop_set_text = vop_stdset_text, 144 .vop_unset_text = vop_stdunset_text, 145 .vop_add_writecount = vop_stdadd_writecount, 146 .vop_copy_file_range = vop_stdcopy_file_range, 147 }; 148 149 /* 150 * Series of placeholder functions for various error returns for 151 * VOPs. 152 */ 153 154 int 155 vop_eopnotsupp(struct vop_generic_args *ap) 156 { 157 /* 158 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 159 */ 160 161 return (EOPNOTSUPP); 162 } 163 164 int 165 vop_ebadf(struct vop_generic_args *ap) 166 { 167 168 return (EBADF); 169 } 170 171 int 172 vop_enotty(struct vop_generic_args *ap) 173 { 174 175 return (ENOTTY); 176 } 177 178 int 179 vop_einval(struct vop_generic_args *ap) 180 { 181 182 return (EINVAL); 183 } 184 185 int 186 vop_enoent(struct vop_generic_args *ap) 187 { 188 189 return (ENOENT); 190 } 191 192 int 193 vop_null(struct vop_generic_args *ap) 194 { 195 196 return (0); 197 } 198 199 /* 200 * Helper function to panic on some bad VOPs in some filesystems. 201 */ 202 int 203 vop_panic(struct vop_generic_args *ap) 204 { 205 206 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 207 } 208 209 /* 210 * vop_std<something> and vop_no<something> are default functions for use by 211 * filesystems that need the "default reasonable" implementation for a 212 * particular operation. 213 * 214 * The documentation for the operations they implement exists (if it exists) 215 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 216 */ 217 218 /* 219 * Default vop for filesystems that do not support name lookup 220 */ 221 static int 222 vop_nolookup(ap) 223 struct vop_lookup_args /* { 224 struct vnode *a_dvp; 225 struct vnode **a_vpp; 226 struct componentname *a_cnp; 227 } */ *ap; 228 { 229 230 *ap->a_vpp = NULL; 231 return (ENOTDIR); 232 } 233 234 /* 235 * vop_norename: 236 * 237 * Handle unlock and reference counting for arguments of vop_rename 238 * for filesystems that do not implement rename operation. 239 */ 240 static int 241 vop_norename(struct vop_rename_args *ap) 242 { 243 244 vop_rename_fail(ap); 245 return (EOPNOTSUPP); 246 } 247 248 /* 249 * vop_nostrategy: 250 * 251 * Strategy routine for VFS devices that have none. 252 * 253 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 254 * routine. Typically this is done for a BIO_READ strategy call. 255 * Typically B_INVAL is assumed to already be clear prior to a write 256 * and should not be cleared manually unless you just made the buffer 257 * invalid. BIO_ERROR should be cleared either way. 258 */ 259 260 static int 261 vop_nostrategy (struct vop_strategy_args *ap) 262 { 263 printf("No strategy for buffer at %p\n", ap->a_bp); 264 vn_printf(ap->a_vp, "vnode "); 265 ap->a_bp->b_ioflags |= BIO_ERROR; 266 ap->a_bp->b_error = EOPNOTSUPP; 267 bufdone(ap->a_bp); 268 return (EOPNOTSUPP); 269 } 270 271 static int 272 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 273 int dirbuflen, off_t *off, char **cpos, int *len, 274 int *eofflag, struct thread *td) 275 { 276 int error, reclen; 277 struct uio uio; 278 struct iovec iov; 279 struct dirent *dp; 280 281 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 282 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 283 284 if (*len == 0) { 285 iov.iov_base = dirbuf; 286 iov.iov_len = dirbuflen; 287 288 uio.uio_iov = &iov; 289 uio.uio_iovcnt = 1; 290 uio.uio_offset = *off; 291 uio.uio_resid = dirbuflen; 292 uio.uio_segflg = UIO_SYSSPACE; 293 uio.uio_rw = UIO_READ; 294 uio.uio_td = td; 295 296 *eofflag = 0; 297 298 #ifdef MAC 299 error = mac_vnode_check_readdir(td->td_ucred, vp); 300 if (error == 0) 301 #endif 302 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 303 NULL, NULL); 304 if (error) 305 return (error); 306 307 *off = uio.uio_offset; 308 309 *cpos = dirbuf; 310 *len = (dirbuflen - uio.uio_resid); 311 312 if (*len == 0) 313 return (ENOENT); 314 } 315 316 dp = (struct dirent *)(*cpos); 317 reclen = dp->d_reclen; 318 *dpp = dp; 319 320 /* check for malformed directory.. */ 321 if (reclen < DIRENT_MINSIZE) 322 return (EINVAL); 323 324 *cpos += reclen; 325 *len -= reclen; 326 327 return (0); 328 } 329 330 /* 331 * Check if a named file exists in a given directory vnode. 332 */ 333 static int 334 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 335 { 336 char *dirbuf, *cpos; 337 int error, eofflag, dirbuflen, len, found; 338 off_t off; 339 struct dirent *dp; 340 struct vattr va; 341 342 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 343 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 344 345 found = 0; 346 347 error = VOP_GETATTR(vp, &va, td->td_ucred); 348 if (error) 349 return (found); 350 351 dirbuflen = DEV_BSIZE; 352 if (dirbuflen < va.va_blocksize) 353 dirbuflen = va.va_blocksize; 354 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 355 356 off = 0; 357 len = 0; 358 do { 359 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 360 &cpos, &len, &eofflag, td); 361 if (error) 362 goto out; 363 364 if (dp->d_type != DT_WHT && dp->d_fileno != 0 && 365 strcmp(dp->d_name, dirname) == 0) { 366 found = 1; 367 goto out; 368 } 369 } while (len > 0 || !eofflag); 370 371 out: 372 free(dirbuf, M_TEMP); 373 return (found); 374 } 375 376 int 377 vop_stdaccess(struct vop_access_args *ap) 378 { 379 380 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 381 VAPPEND)) == 0, ("invalid bit in accmode")); 382 383 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 384 } 385 386 int 387 vop_stdaccessx(struct vop_accessx_args *ap) 388 { 389 int error; 390 accmode_t accmode = ap->a_accmode; 391 392 error = vfs_unixify_accmode(&accmode); 393 if (error != 0) 394 return (error); 395 396 if (accmode == 0) 397 return (0); 398 399 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 400 } 401 402 /* 403 * Advisory record locking support 404 */ 405 int 406 vop_stdadvlock(struct vop_advlock_args *ap) 407 { 408 struct vnode *vp; 409 struct vattr vattr; 410 int error; 411 412 vp = ap->a_vp; 413 if (ap->a_fl->l_whence == SEEK_END) { 414 /* 415 * The NFSv4 server must avoid doing a vn_lock() here, since it 416 * can deadlock the nfsd threads, due to a LOR. Fortunately 417 * the NFSv4 server always uses SEEK_SET and this code is 418 * only required for the SEEK_END case. 419 */ 420 vn_lock(vp, LK_SHARED | LK_RETRY); 421 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 422 VOP_UNLOCK(vp, 0); 423 if (error) 424 return (error); 425 } else 426 vattr.va_size = 0; 427 428 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 429 } 430 431 int 432 vop_stdadvlockasync(struct vop_advlockasync_args *ap) 433 { 434 struct vnode *vp; 435 struct vattr vattr; 436 int error; 437 438 vp = ap->a_vp; 439 if (ap->a_fl->l_whence == SEEK_END) { 440 /* The size argument is only needed for SEEK_END. */ 441 vn_lock(vp, LK_SHARED | LK_RETRY); 442 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 443 VOP_UNLOCK(vp, 0); 444 if (error) 445 return (error); 446 } else 447 vattr.va_size = 0; 448 449 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 450 } 451 452 int 453 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap) 454 { 455 struct vnode *vp; 456 457 vp = ap->a_vp; 458 lf_purgelocks(vp, &vp->v_lockf); 459 return (0); 460 } 461 462 /* 463 * vop_stdpathconf: 464 * 465 * Standard implementation of POSIX pathconf, to get information about limits 466 * for a filesystem. 467 * Override per filesystem for the case where the filesystem has smaller 468 * limits. 469 */ 470 int 471 vop_stdpathconf(ap) 472 struct vop_pathconf_args /* { 473 struct vnode *a_vp; 474 int a_name; 475 int *a_retval; 476 } */ *ap; 477 { 478 479 switch (ap->a_name) { 480 case _PC_ASYNC_IO: 481 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; 482 return (0); 483 case _PC_PATH_MAX: 484 *ap->a_retval = PATH_MAX; 485 return (0); 486 case _PC_ACL_EXTENDED: 487 case _PC_ACL_NFS4: 488 case _PC_CAP_PRESENT: 489 case _PC_INF_PRESENT: 490 case _PC_MAC_PRESENT: 491 *ap->a_retval = 0; 492 return (0); 493 default: 494 return (EINVAL); 495 } 496 /* NOTREACHED */ 497 } 498 499 /* 500 * Standard lock, unlock and islocked functions. 501 */ 502 int 503 vop_stdlock(ap) 504 struct vop_lock1_args /* { 505 struct vnode *a_vp; 506 int a_flags; 507 char *file; 508 int line; 509 } */ *ap; 510 { 511 struct vnode *vp = ap->a_vp; 512 struct mtx *ilk; 513 514 ilk = VI_MTX(vp); 515 return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags, 516 &ilk->lock_object, ap->a_file, ap->a_line)); 517 } 518 519 /* See above. */ 520 int 521 vop_stdunlock(ap) 522 struct vop_unlock_args /* { 523 struct vnode *a_vp; 524 int a_flags; 525 } */ *ap; 526 { 527 struct vnode *vp = ap->a_vp; 528 struct mtx *ilk; 529 530 ilk = VI_MTX(vp); 531 return (lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags, 532 &ilk->lock_object)); 533 } 534 535 /* See above. */ 536 int 537 vop_stdislocked(ap) 538 struct vop_islocked_args /* { 539 struct vnode *a_vp; 540 } */ *ap; 541 { 542 543 return (lockstatus(ap->a_vp->v_vnlock)); 544 } 545 546 /* 547 * Return true for select/poll. 548 */ 549 int 550 vop_nopoll(ap) 551 struct vop_poll_args /* { 552 struct vnode *a_vp; 553 int a_events; 554 struct ucred *a_cred; 555 struct thread *a_td; 556 } */ *ap; 557 { 558 559 return (poll_no_poll(ap->a_events)); 560 } 561 562 /* 563 * Implement poll for local filesystems that support it. 564 */ 565 int 566 vop_stdpoll(ap) 567 struct vop_poll_args /* { 568 struct vnode *a_vp; 569 int a_events; 570 struct ucred *a_cred; 571 struct thread *a_td; 572 } */ *ap; 573 { 574 if (ap->a_events & ~POLLSTANDARD) 575 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 576 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 577 } 578 579 /* 580 * Return our mount point, as we will take charge of the writes. 581 */ 582 int 583 vop_stdgetwritemount(ap) 584 struct vop_getwritemount_args /* { 585 struct vnode *a_vp; 586 struct mount **a_mpp; 587 } */ *ap; 588 { 589 struct mount *mp; 590 591 /* 592 * XXX Since this is called unlocked we may be recycled while 593 * attempting to ref the mount. If this is the case or mountpoint 594 * will be set to NULL. We only have to prevent this call from 595 * returning with a ref to an incorrect mountpoint. It is not 596 * harmful to return with a ref to our previous mountpoint. 597 */ 598 mp = ap->a_vp->v_mount; 599 if (mp != NULL) { 600 vfs_ref(mp); 601 if (mp != ap->a_vp->v_mount) { 602 vfs_rel(mp); 603 mp = NULL; 604 } 605 } 606 *(ap->a_mpp) = mp; 607 return (0); 608 } 609 610 /* 611 * If the file system doesn't implement VOP_BMAP, then return sensible defaults: 612 * - Return the vnode's bufobj instead of any underlying device's bufobj 613 * - Calculate the physical block number as if there were equal size 614 * consecutive blocks, but 615 * - Report no contiguous runs of blocks. 616 */ 617 int 618 vop_stdbmap(ap) 619 struct vop_bmap_args /* { 620 struct vnode *a_vp; 621 daddr_t a_bn; 622 struct bufobj **a_bop; 623 daddr_t *a_bnp; 624 int *a_runp; 625 int *a_runb; 626 } */ *ap; 627 { 628 629 if (ap->a_bop != NULL) 630 *ap->a_bop = &ap->a_vp->v_bufobj; 631 if (ap->a_bnp != NULL) 632 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 633 if (ap->a_runp != NULL) 634 *ap->a_runp = 0; 635 if (ap->a_runb != NULL) 636 *ap->a_runb = 0; 637 return (0); 638 } 639 640 int 641 vop_stdfsync(ap) 642 struct vop_fsync_args /* { 643 struct vnode *a_vp; 644 int a_waitfor; 645 struct thread *a_td; 646 } */ *ap; 647 { 648 649 return (vn_fsync_buf(ap->a_vp, ap->a_waitfor)); 650 } 651 652 static int 653 vop_stdfdatasync(struct vop_fdatasync_args *ap) 654 { 655 656 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); 657 } 658 659 int 660 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap) 661 { 662 663 return (vn_fsync_buf(ap->a_vp, MNT_WAIT)); 664 } 665 666 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 667 int 668 vop_stdgetpages(ap) 669 struct vop_getpages_args /* { 670 struct vnode *a_vp; 671 vm_page_t *a_m; 672 int a_count; 673 int *a_rbehind; 674 int *a_rahead; 675 } */ *ap; 676 { 677 678 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 679 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); 680 } 681 682 static int 683 vop_stdgetpages_async(struct vop_getpages_async_args *ap) 684 { 685 int error; 686 687 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, 688 ap->a_rahead); 689 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 690 return (error); 691 } 692 693 int 694 vop_stdkqfilter(struct vop_kqfilter_args *ap) 695 { 696 return vfs_kqfilter(ap); 697 } 698 699 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 700 int 701 vop_stdputpages(ap) 702 struct vop_putpages_args /* { 703 struct vnode *a_vp; 704 vm_page_t *a_m; 705 int a_count; 706 int a_sync; 707 int *a_rtvals; 708 } */ *ap; 709 { 710 711 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 712 ap->a_sync, ap->a_rtvals); 713 } 714 715 int 716 vop_stdvptofh(struct vop_vptofh_args *ap) 717 { 718 return (EOPNOTSUPP); 719 } 720 721 int 722 vop_stdvptocnp(struct vop_vptocnp_args *ap) 723 { 724 struct vnode *vp = ap->a_vp; 725 struct vnode **dvp = ap->a_vpp; 726 struct ucred *cred = ap->a_cred; 727 char *buf = ap->a_buf; 728 int *buflen = ap->a_buflen; 729 char *dirbuf, *cpos; 730 int i, error, eofflag, dirbuflen, flags, locked, len, covered; 731 off_t off; 732 ino_t fileno; 733 struct vattr va; 734 struct nameidata nd; 735 struct thread *td; 736 struct dirent *dp; 737 struct vnode *mvp; 738 739 i = *buflen; 740 error = 0; 741 covered = 0; 742 td = curthread; 743 744 if (vp->v_type != VDIR) 745 return (ENOENT); 746 747 error = VOP_GETATTR(vp, &va, cred); 748 if (error) 749 return (error); 750 751 VREF(vp); 752 locked = VOP_ISLOCKED(vp); 753 VOP_UNLOCK(vp, 0); 754 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 755 "..", vp, td); 756 flags = FREAD; 757 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 758 if (error) { 759 vn_lock(vp, locked | LK_RETRY); 760 return (error); 761 } 762 NDFREE(&nd, NDF_ONLY_PNBUF); 763 764 mvp = *dvp = nd.ni_vp; 765 766 if (vp->v_mount != (*dvp)->v_mount && 767 ((*dvp)->v_vflag & VV_ROOT) && 768 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 769 *dvp = (*dvp)->v_mount->mnt_vnodecovered; 770 VREF(mvp); 771 VOP_UNLOCK(mvp, 0); 772 vn_close(mvp, FREAD, cred, td); 773 VREF(*dvp); 774 vn_lock(*dvp, LK_SHARED | LK_RETRY); 775 covered = 1; 776 } 777 778 fileno = va.va_fileid; 779 780 dirbuflen = DEV_BSIZE; 781 if (dirbuflen < va.va_blocksize) 782 dirbuflen = va.va_blocksize; 783 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 784 785 if ((*dvp)->v_type != VDIR) { 786 error = ENOENT; 787 goto out; 788 } 789 790 off = 0; 791 len = 0; 792 do { 793 /* call VOP_READDIR of parent */ 794 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 795 &cpos, &len, &eofflag, td); 796 if (error) 797 goto out; 798 799 if ((dp->d_type != DT_WHT) && 800 (dp->d_fileno == fileno)) { 801 if (covered) { 802 VOP_UNLOCK(*dvp, 0); 803 vn_lock(mvp, LK_SHARED | LK_RETRY); 804 if (dirent_exists(mvp, dp->d_name, td)) { 805 error = ENOENT; 806 VOP_UNLOCK(mvp, 0); 807 vn_lock(*dvp, LK_SHARED | LK_RETRY); 808 goto out; 809 } 810 VOP_UNLOCK(mvp, 0); 811 vn_lock(*dvp, LK_SHARED | LK_RETRY); 812 } 813 i -= dp->d_namlen; 814 815 if (i < 0) { 816 error = ENOMEM; 817 goto out; 818 } 819 if (dp->d_namlen == 1 && dp->d_name[0] == '.') { 820 error = ENOENT; 821 } else { 822 bcopy(dp->d_name, buf + i, dp->d_namlen); 823 error = 0; 824 } 825 goto out; 826 } 827 } while (len > 0 || !eofflag); 828 error = ENOENT; 829 830 out: 831 free(dirbuf, M_TEMP); 832 if (!error) { 833 *buflen = i; 834 vref(*dvp); 835 } 836 if (covered) { 837 vput(*dvp); 838 vrele(mvp); 839 } else { 840 VOP_UNLOCK(mvp, 0); 841 vn_close(mvp, FREAD, cred, td); 842 } 843 vn_lock(vp, locked | LK_RETRY); 844 return (error); 845 } 846 847 int 848 vop_stdallocate(struct vop_allocate_args *ap) 849 { 850 #ifdef __notyet__ 851 struct statfs *sfs; 852 off_t maxfilesize = 0; 853 #endif 854 struct iovec aiov; 855 struct vattr vattr, *vap; 856 struct uio auio; 857 off_t fsize, len, cur, offset; 858 uint8_t *buf; 859 struct thread *td; 860 struct vnode *vp; 861 size_t iosize; 862 int error; 863 864 buf = NULL; 865 error = 0; 866 td = curthread; 867 vap = &vattr; 868 vp = ap->a_vp; 869 len = *ap->a_len; 870 offset = *ap->a_offset; 871 872 error = VOP_GETATTR(vp, vap, td->td_ucred); 873 if (error != 0) 874 goto out; 875 fsize = vap->va_size; 876 iosize = vap->va_blocksize; 877 if (iosize == 0) 878 iosize = BLKDEV_IOSIZE; 879 if (iosize > MAXPHYS) 880 iosize = MAXPHYS; 881 buf = malloc(iosize, M_TEMP, M_WAITOK); 882 883 #ifdef __notyet__ 884 /* 885 * Check if the filesystem sets f_maxfilesize; if not use 886 * VOP_SETATTR to perform the check. 887 */ 888 sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); 889 error = VFS_STATFS(vp->v_mount, sfs, td); 890 if (error == 0) 891 maxfilesize = sfs->f_maxfilesize; 892 free(sfs, M_STATFS); 893 if (error != 0) 894 goto out; 895 if (maxfilesize) { 896 if (offset > maxfilesize || len > maxfilesize || 897 offset + len > maxfilesize) { 898 error = EFBIG; 899 goto out; 900 } 901 } else 902 #endif 903 if (offset + len > vap->va_size) { 904 /* 905 * Test offset + len against the filesystem's maxfilesize. 906 */ 907 VATTR_NULL(vap); 908 vap->va_size = offset + len; 909 error = VOP_SETATTR(vp, vap, td->td_ucred); 910 if (error != 0) 911 goto out; 912 VATTR_NULL(vap); 913 vap->va_size = fsize; 914 error = VOP_SETATTR(vp, vap, td->td_ucred); 915 if (error != 0) 916 goto out; 917 } 918 919 for (;;) { 920 /* 921 * Read and write back anything below the nominal file 922 * size. There's currently no way outside the filesystem 923 * to know whether this area is sparse or not. 924 */ 925 cur = iosize; 926 if ((offset % iosize) != 0) 927 cur -= (offset % iosize); 928 if (cur > len) 929 cur = len; 930 if (offset < fsize) { 931 aiov.iov_base = buf; 932 aiov.iov_len = cur; 933 auio.uio_iov = &aiov; 934 auio.uio_iovcnt = 1; 935 auio.uio_offset = offset; 936 auio.uio_resid = cur; 937 auio.uio_segflg = UIO_SYSSPACE; 938 auio.uio_rw = UIO_READ; 939 auio.uio_td = td; 940 error = VOP_READ(vp, &auio, 0, td->td_ucred); 941 if (error != 0) 942 break; 943 if (auio.uio_resid > 0) { 944 bzero(buf + cur - auio.uio_resid, 945 auio.uio_resid); 946 } 947 } else { 948 bzero(buf, cur); 949 } 950 951 aiov.iov_base = buf; 952 aiov.iov_len = cur; 953 auio.uio_iov = &aiov; 954 auio.uio_iovcnt = 1; 955 auio.uio_offset = offset; 956 auio.uio_resid = cur; 957 auio.uio_segflg = UIO_SYSSPACE; 958 auio.uio_rw = UIO_WRITE; 959 auio.uio_td = td; 960 961 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 962 if (error != 0) 963 break; 964 965 len -= cur; 966 offset += cur; 967 if (len == 0) 968 break; 969 if (should_yield()) 970 break; 971 } 972 973 out: 974 *ap->a_len = len; 975 *ap->a_offset = offset; 976 free(buf, M_TEMP); 977 return (error); 978 } 979 980 int 981 vop_stdadvise(struct vop_advise_args *ap) 982 { 983 struct vnode *vp; 984 struct bufobj *bo; 985 daddr_t startn, endn; 986 off_t bstart, bend, start, end; 987 int bsize, error; 988 989 vp = ap->a_vp; 990 switch (ap->a_advice) { 991 case POSIX_FADV_WILLNEED: 992 /* 993 * Do nothing for now. Filesystems should provide a 994 * custom method which starts an asynchronous read of 995 * the requested region. 996 */ 997 error = 0; 998 break; 999 case POSIX_FADV_DONTNEED: 1000 error = 0; 1001 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1002 if (vp->v_iflag & VI_DOOMED) { 1003 VOP_UNLOCK(vp, 0); 1004 break; 1005 } 1006 1007 /* 1008 * Round to block boundaries (and later possibly further to 1009 * page boundaries). Applications cannot reasonably be aware 1010 * of the boundaries, and the rounding must be to expand at 1011 * both extremities to cover enough. It still doesn't cover 1012 * read-ahead. For partial blocks, this gives unnecessary 1013 * discarding of buffers but is efficient enough since the 1014 * pages usually remain in VMIO for some time. 1015 */ 1016 bsize = vp->v_bufobj.bo_bsize; 1017 bstart = rounddown(ap->a_start, bsize); 1018 bend = roundup(ap->a_end, bsize); 1019 1020 /* 1021 * Deactivate pages in the specified range from the backing VM 1022 * object. Pages that are resident in the buffer cache will 1023 * remain wired until their corresponding buffers are released 1024 * below. 1025 */ 1026 if (vp->v_object != NULL) { 1027 start = trunc_page(bstart); 1028 end = round_page(bend); 1029 VM_OBJECT_RLOCK(vp->v_object); 1030 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), 1031 OFF_TO_IDX(end)); 1032 VM_OBJECT_RUNLOCK(vp->v_object); 1033 } 1034 1035 bo = &vp->v_bufobj; 1036 BO_RLOCK(bo); 1037 startn = bstart / bsize; 1038 endn = bend / bsize; 1039 error = bnoreuselist(&bo->bo_clean, bo, startn, endn); 1040 if (error == 0) 1041 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); 1042 BO_RUNLOCK(bo); 1043 VOP_UNLOCK(vp, 0); 1044 break; 1045 default: 1046 error = EINVAL; 1047 break; 1048 } 1049 return (error); 1050 } 1051 1052 int 1053 vop_stdunp_bind(struct vop_unp_bind_args *ap) 1054 { 1055 1056 ap->a_vp->v_unpcb = ap->a_unpcb; 1057 return (0); 1058 } 1059 1060 int 1061 vop_stdunp_connect(struct vop_unp_connect_args *ap) 1062 { 1063 1064 *ap->a_unpcb = ap->a_vp->v_unpcb; 1065 return (0); 1066 } 1067 1068 int 1069 vop_stdunp_detach(struct vop_unp_detach_args *ap) 1070 { 1071 1072 ap->a_vp->v_unpcb = NULL; 1073 return (0); 1074 } 1075 1076 static int 1077 vop_stdis_text(struct vop_is_text_args *ap) 1078 { 1079 1080 return (ap->a_vp->v_writecount < 0); 1081 } 1082 1083 int 1084 vop_stdset_text(struct vop_set_text_args *ap) 1085 { 1086 struct vnode *vp; 1087 struct mount *mp; 1088 int error; 1089 1090 vp = ap->a_vp; 1091 VI_LOCK(vp); 1092 if (vp->v_writecount > 0) { 1093 error = ETXTBSY; 1094 } else { 1095 /* 1096 * If requested by fs, keep a use reference to the 1097 * vnode until the last text reference is released. 1098 */ 1099 mp = vp->v_mount; 1100 if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 && 1101 vp->v_writecount == 0) { 1102 vp->v_iflag |= VI_TEXT_REF; 1103 vrefl(vp); 1104 } 1105 1106 vp->v_writecount--; 1107 error = 0; 1108 } 1109 VI_UNLOCK(vp); 1110 return (error); 1111 } 1112 1113 static int 1114 vop_stdunset_text(struct vop_unset_text_args *ap) 1115 { 1116 struct vnode *vp; 1117 int error; 1118 bool last; 1119 1120 vp = ap->a_vp; 1121 last = false; 1122 VI_LOCK(vp); 1123 if (vp->v_writecount < 0) { 1124 if ((vp->v_iflag & VI_TEXT_REF) != 0 && 1125 vp->v_writecount == -1) { 1126 last = true; 1127 vp->v_iflag &= ~VI_TEXT_REF; 1128 } 1129 vp->v_writecount++; 1130 error = 0; 1131 } else { 1132 error = EINVAL; 1133 } 1134 VI_UNLOCK(vp); 1135 if (last) 1136 vunref(vp); 1137 return (error); 1138 } 1139 1140 static int 1141 vop_stdadd_writecount(struct vop_add_writecount_args *ap) 1142 { 1143 struct vnode *vp; 1144 int error; 1145 1146 vp = ap->a_vp; 1147 VI_LOCK_FLAGS(vp, MTX_DUPOK); 1148 if (vp->v_writecount < 0) { 1149 error = ETXTBSY; 1150 } else { 1151 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, 1152 ("neg writecount increment %d", ap->a_inc)); 1153 vp->v_writecount += ap->a_inc; 1154 error = 0; 1155 } 1156 VI_UNLOCK(vp); 1157 return (error); 1158 } 1159 1160 static int 1161 vop_stdioctl(struct vop_ioctl_args *ap) 1162 { 1163 struct vnode *vp; 1164 struct vattr va; 1165 off_t *offp; 1166 int error; 1167 1168 switch (ap->a_command) { 1169 case FIOSEEKDATA: 1170 case FIOSEEKHOLE: 1171 vp = ap->a_vp; 1172 error = vn_lock(vp, LK_SHARED); 1173 if (error != 0) 1174 return (EBADF); 1175 if (vp->v_type == VREG) 1176 error = VOP_GETATTR(vp, &va, ap->a_cred); 1177 else 1178 error = ENOTTY; 1179 if (error == 0) { 1180 offp = ap->a_data; 1181 if (*offp < 0 || *offp >= va.va_size) 1182 error = ENXIO; 1183 else if (ap->a_command == FIOSEEKHOLE) 1184 *offp = va.va_size; 1185 } 1186 VOP_UNLOCK(vp, 0); 1187 break; 1188 default: 1189 error = ENOTTY; 1190 break; 1191 } 1192 return (error); 1193 } 1194 1195 /* 1196 * vfs default ops 1197 * used to fill the vfs function table to get reasonable default return values. 1198 */ 1199 int 1200 vfs_stdroot (mp, flags, vpp) 1201 struct mount *mp; 1202 int flags; 1203 struct vnode **vpp; 1204 { 1205 1206 return (EOPNOTSUPP); 1207 } 1208 1209 int 1210 vfs_stdstatfs (mp, sbp) 1211 struct mount *mp; 1212 struct statfs *sbp; 1213 { 1214 1215 return (EOPNOTSUPP); 1216 } 1217 1218 int 1219 vfs_stdquotactl (mp, cmds, uid, arg) 1220 struct mount *mp; 1221 int cmds; 1222 uid_t uid; 1223 void *arg; 1224 { 1225 1226 return (EOPNOTSUPP); 1227 } 1228 1229 int 1230 vfs_stdsync(mp, waitfor) 1231 struct mount *mp; 1232 int waitfor; 1233 { 1234 struct vnode *vp, *mvp; 1235 struct thread *td; 1236 int error, lockreq, allerror = 0; 1237 1238 td = curthread; 1239 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 1240 if (waitfor != MNT_WAIT) 1241 lockreq |= LK_NOWAIT; 1242 /* 1243 * Force stale buffer cache information to be flushed. 1244 */ 1245 loop: 1246 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1247 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1248 VI_UNLOCK(vp); 1249 continue; 1250 } 1251 if ((error = vget(vp, lockreq, td)) != 0) { 1252 if (error == ENOENT) { 1253 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1254 goto loop; 1255 } 1256 continue; 1257 } 1258 error = VOP_FSYNC(vp, waitfor, td); 1259 if (error) 1260 allerror = error; 1261 vput(vp); 1262 } 1263 return (allerror); 1264 } 1265 1266 int 1267 vfs_stdnosync (mp, waitfor) 1268 struct mount *mp; 1269 int waitfor; 1270 { 1271 1272 return (0); 1273 } 1274 1275 static int 1276 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap) 1277 { 1278 int error; 1279 1280 error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp, 1281 ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred, 1282 ap->a_outcred, ap->a_fsizetd); 1283 return (error); 1284 } 1285 1286 int 1287 vfs_stdvget (mp, ino, flags, vpp) 1288 struct mount *mp; 1289 ino_t ino; 1290 int flags; 1291 struct vnode **vpp; 1292 { 1293 1294 return (EOPNOTSUPP); 1295 } 1296 1297 int 1298 vfs_stdfhtovp (mp, fhp, flags, vpp) 1299 struct mount *mp; 1300 struct fid *fhp; 1301 int flags; 1302 struct vnode **vpp; 1303 { 1304 1305 return (EOPNOTSUPP); 1306 } 1307 1308 int 1309 vfs_stdinit (vfsp) 1310 struct vfsconf *vfsp; 1311 { 1312 1313 return (0); 1314 } 1315 1316 int 1317 vfs_stduninit (vfsp) 1318 struct vfsconf *vfsp; 1319 { 1320 1321 return(0); 1322 } 1323 1324 int 1325 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 1326 struct mount *mp; 1327 int cmd; 1328 struct vnode *filename_vp; 1329 int attrnamespace; 1330 const char *attrname; 1331 { 1332 1333 if (filename_vp != NULL) 1334 VOP_UNLOCK(filename_vp, 0); 1335 return (EOPNOTSUPP); 1336 } 1337 1338 int 1339 vfs_stdsysctl(mp, op, req) 1340 struct mount *mp; 1341 fsctlop_t op; 1342 struct sysctl_req *req; 1343 { 1344 1345 return (EOPNOTSUPP); 1346 } 1347 1348 static vop_bypass_t * 1349 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a) 1350 { 1351 1352 return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset)); 1353 } 1354 1355 int 1356 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a) 1357 { 1358 vop_bypass_t *bp; 1359 int prev_stops, rc; 1360 1361 for (; vop != NULL; vop = vop->vop_default) { 1362 bp = bp_by_off(vop, a); 1363 if (bp != NULL) 1364 break; 1365 1366 /* 1367 * Bypass is not really supported. It is done for 1368 * fallback to unimplemented vops in the default 1369 * vector. 1370 */ 1371 bp = vop->vop_bypass; 1372 if (bp != NULL) 1373 break; 1374 } 1375 MPASS(bp != NULL); 1376 1377 prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); 1378 rc = bp(a); 1379 sigallowstop(prev_stops); 1380 return (rc); 1381 } 1382