1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bio.h> 41 #include <sys/buf.h> 42 #include <sys/conf.h> 43 #include <sys/kernel.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mount.h> 48 #include <sys/mutex.h> 49 #include <sys/unistd.h> 50 #include <sys/vnode.h> 51 #include <sys/poll.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_extern.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_map.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pager.h> 60 #include <vm/vnode_pager.h> 61 62 static int vop_nolookup(struct vop_lookup_args *); 63 static int vop_nostrategy(struct vop_strategy_args *); 64 65 /* 66 * This vnode table stores what we want to do if the filesystem doesn't 67 * implement a particular VOP. 68 * 69 * If there is no specific entry here, we will return EOPNOTSUPP. 70 * 71 */ 72 73 struct vop_vector default_vnodeops = { 74 .vop_default = NULL, 75 .vop_bypass = VOP_EOPNOTSUPP, 76 77 .vop_advlock = VOP_EINVAL, 78 .vop_bmap = vop_stdbmap, 79 .vop_close = VOP_NULL, 80 .vop_createvobject = vop_stdcreatevobject, 81 .vop_destroyvobject = vop_stddestroyvobject, 82 .vop_fsync = VOP_NULL, 83 .vop_getpages = vop_stdgetpages, 84 .vop_getvobject = vop_stdgetvobject, 85 .vop_getwritemount = vop_stdgetwritemount, 86 .vop_inactive = vop_stdinactive, 87 .vop_ioctl = VOP_ENOTTY, 88 .vop_islocked = vop_stdislocked, 89 .vop_lease = VOP_NULL, 90 .vop_lock = vop_stdlock, 91 .vop_lookup = vop_nolookup, 92 .vop_open = VOP_NULL, 93 .vop_pathconf = VOP_EINVAL, 94 .vop_poll = vop_nopoll, 95 .vop_putpages = vop_stdputpages, 96 .vop_readlink = VOP_EINVAL, 97 .vop_revoke = VOP_PANIC, 98 .vop_strategy = vop_nostrategy, 99 .vop_unlock = vop_stdunlock, 100 }; 101 102 /* 103 * Series of placeholder functions for various error returns for 104 * VOPs. 105 */ 106 107 int 108 vop_eopnotsupp(struct vop_generic_args *ap) 109 { 110 /* 111 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 112 */ 113 114 return (EOPNOTSUPP); 115 } 116 117 int 118 vop_ebadf(struct vop_generic_args *ap) 119 { 120 121 return (EBADF); 122 } 123 124 int 125 vop_enotty(struct vop_generic_args *ap) 126 { 127 128 return (ENOTTY); 129 } 130 131 int 132 vop_einval(struct vop_generic_args *ap) 133 { 134 135 return (EINVAL); 136 } 137 138 int 139 vop_null(struct vop_generic_args *ap) 140 { 141 142 return (0); 143 } 144 145 /* 146 * Helper function to panic on some bad VOPs in some filesystems. 147 */ 148 int 149 vop_panic(struct vop_generic_args *ap) 150 { 151 152 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 153 } 154 155 /* 156 * vop_std<something> and vop_no<something> are default functions for use by 157 * filesystems that need the "default reasonable" implementation for a 158 * particular operation. 159 * 160 * The documentation for the operations they implement exists (if it exists) 161 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 162 */ 163 164 /* 165 * Default vop for filesystems that do not support name lookup 166 */ 167 static int 168 vop_nolookup(ap) 169 struct vop_lookup_args /* { 170 struct vnode *a_dvp; 171 struct vnode **a_vpp; 172 struct componentname *a_cnp; 173 } */ *ap; 174 { 175 176 *ap->a_vpp = NULL; 177 return (ENOTDIR); 178 } 179 180 /* 181 * vop_nostrategy: 182 * 183 * Strategy routine for VFS devices that have none. 184 * 185 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 186 * routine. Typically this is done for a BIO_READ strategy call. 187 * Typically B_INVAL is assumed to already be clear prior to a write 188 * and should not be cleared manually unless you just made the buffer 189 * invalid. BIO_ERROR should be cleared either way. 190 */ 191 192 static int 193 vop_nostrategy (struct vop_strategy_args *ap) 194 { 195 printf("No strategy for buffer at %p\n", ap->a_bp); 196 vprint("vnode", ap->a_vp); 197 ap->a_bp->b_ioflags |= BIO_ERROR; 198 ap->a_bp->b_error = EOPNOTSUPP; 199 bufdone(ap->a_bp); 200 return (EOPNOTSUPP); 201 } 202 203 /* 204 * vop_stdpathconf: 205 * 206 * Standard implementation of POSIX pathconf, to get information about limits 207 * for a filesystem. 208 * Override per filesystem for the case where the filesystem has smaller 209 * limits. 210 */ 211 int 212 vop_stdpathconf(ap) 213 struct vop_pathconf_args /* { 214 struct vnode *a_vp; 215 int a_name; 216 int *a_retval; 217 } */ *ap; 218 { 219 220 switch (ap->a_name) { 221 case _PC_LINK_MAX: 222 *ap->a_retval = LINK_MAX; 223 return (0); 224 case _PC_MAX_CANON: 225 *ap->a_retval = MAX_CANON; 226 return (0); 227 case _PC_MAX_INPUT: 228 *ap->a_retval = MAX_INPUT; 229 return (0); 230 case _PC_PIPE_BUF: 231 *ap->a_retval = PIPE_BUF; 232 return (0); 233 case _PC_CHOWN_RESTRICTED: 234 *ap->a_retval = 1; 235 return (0); 236 case _PC_VDISABLE: 237 *ap->a_retval = _POSIX_VDISABLE; 238 return (0); 239 default: 240 return (EINVAL); 241 } 242 /* NOTREACHED */ 243 } 244 245 /* 246 * Standard lock, unlock and islocked functions. 247 */ 248 int 249 vop_stdlock(ap) 250 struct vop_lock_args /* { 251 struct vnode *a_vp; 252 int a_flags; 253 struct thread *a_td; 254 } */ *ap; 255 { 256 struct vnode *vp = ap->a_vp; 257 258 #ifndef DEBUG_LOCKS 259 return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td)); 260 #else 261 return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 262 ap->a_td, "vop_stdlock", vp->filename, vp->line)); 263 #endif 264 } 265 266 /* See above. */ 267 int 268 vop_stdunlock(ap) 269 struct vop_unlock_args /* { 270 struct vnode *a_vp; 271 int a_flags; 272 struct thread *a_td; 273 } */ *ap; 274 { 275 struct vnode *vp = ap->a_vp; 276 277 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp), 278 ap->a_td)); 279 } 280 281 /* See above. */ 282 int 283 vop_stdislocked(ap) 284 struct vop_islocked_args /* { 285 struct vnode *a_vp; 286 struct thread *a_td; 287 } */ *ap; 288 { 289 290 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td)); 291 } 292 293 /* Mark the vnode inactive */ 294 int 295 vop_stdinactive(ap) 296 struct vop_inactive_args /* { 297 struct vnode *a_vp; 298 struct thread *a_td; 299 } */ *ap; 300 { 301 302 VOP_UNLOCK(ap->a_vp, 0, ap->a_td); 303 return (0); 304 } 305 306 /* 307 * Return true for select/poll. 308 */ 309 int 310 vop_nopoll(ap) 311 struct vop_poll_args /* { 312 struct vnode *a_vp; 313 int a_events; 314 struct ucred *a_cred; 315 struct thread *a_td; 316 } */ *ap; 317 { 318 /* 319 * Return true for read/write. If the user asked for something 320 * special, return POLLNVAL, so that clients have a way of 321 * determining reliably whether or not the extended 322 * functionality is present without hard-coding knowledge 323 * of specific filesystem implementations. 324 * Stay in sync with kern_conf.c::no_poll(). 325 */ 326 if (ap->a_events & ~POLLSTANDARD) 327 return (POLLNVAL); 328 329 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 330 } 331 332 /* 333 * Implement poll for local filesystems that support it. 334 */ 335 int 336 vop_stdpoll(ap) 337 struct vop_poll_args /* { 338 struct vnode *a_vp; 339 int a_events; 340 struct ucred *a_cred; 341 struct thread *a_td; 342 } */ *ap; 343 { 344 if (ap->a_events & ~POLLSTANDARD) 345 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 346 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 347 } 348 349 /* 350 * Return our mount point, as we will take charge of the writes. 351 */ 352 int 353 vop_stdgetwritemount(ap) 354 struct vop_getwritemount_args /* { 355 struct vnode *a_vp; 356 struct mount **a_mpp; 357 } */ *ap; 358 { 359 360 *(ap->a_mpp) = ap->a_vp->v_mount; 361 return (0); 362 } 363 364 /* Create the VM system backing object for this vnode */ 365 int 366 vop_stdcreatevobject(ap) 367 struct vop_createvobject_args /* { 368 struct vnode *vp; 369 struct ucred *cred; 370 struct thread *td; 371 } */ *ap; 372 { 373 struct vnode *vp = ap->a_vp; 374 struct ucred *cred = ap->a_cred; 375 struct thread *td = ap->a_td; 376 struct vattr vat; 377 vm_object_t object; 378 int error = 0; 379 vm_ooffset_t size; 380 381 GIANT_REQUIRED; 382 383 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 384 return (0); 385 386 while ((object = vp->v_object) != NULL) { 387 VM_OBJECT_LOCK(object); 388 if (!(object->flags & OBJ_DEAD)) { 389 VM_OBJECT_UNLOCK(object); 390 break; 391 } 392 VOP_UNLOCK(vp, 0, td); 393 vm_object_set_flag(object, OBJ_DISCONNECTWNT); 394 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0); 395 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 396 } 397 398 if (object == NULL) { 399 if (vn_isdisk(vp, NULL)) { 400 /* 401 * This simply allocates the biggest object possible 402 * for a disk vnode. This should be fixed, but doesn't 403 * cause any problems (yet). 404 */ 405 size = IDX_TO_OFF(INT_MAX); 406 } else { 407 if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0) 408 return (error); 409 size = vat.va_size; 410 } 411 412 object = vnode_pager_alloc(vp, size, 0, 0); 413 /* 414 * Dereference the reference we just created. This assumes 415 * that the object is associated with the vp. 416 */ 417 VM_OBJECT_LOCK(object); 418 object->ref_count--; 419 VM_OBJECT_UNLOCK(object); 420 vrele(vp); 421 } 422 423 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object")); 424 vp->v_vflag |= VV_OBJBUF; 425 426 return (error); 427 } 428 429 /* Destroy the VM system object associated with this vnode */ 430 int 431 vop_stddestroyvobject(ap) 432 struct vop_destroyvobject_args /* { 433 struct vnode *vp; 434 } */ *ap; 435 { 436 struct vnode *vp = ap->a_vp; 437 vm_object_t obj = vp->v_object; 438 439 GIANT_REQUIRED; 440 441 if (obj == NULL) 442 return (0); 443 VM_OBJECT_LOCK(obj); 444 if (obj->ref_count == 0) { 445 /* 446 * vclean() may be called twice. The first time 447 * removes the primary reference to the object, 448 * the second time goes one further and is a 449 * special-case to terminate the object. 450 * 451 * don't double-terminate the object 452 */ 453 if ((obj->flags & OBJ_DEAD) == 0) 454 vm_object_terminate(obj); 455 else 456 VM_OBJECT_UNLOCK(obj); 457 } else { 458 /* 459 * Woe to the process that tries to page now :-). 460 */ 461 vm_pager_deallocate(obj); 462 VM_OBJECT_UNLOCK(obj); 463 } 464 return (0); 465 } 466 467 /* 468 * Return the underlying VM object. This routine may be called with or 469 * without the vnode interlock held. If called without, the returned 470 * object is not guarenteed to be valid. The syncer typically gets the 471 * object without holding the interlock in order to quickly test whether 472 * it might be dirty before going heavy-weight. vm_object's use zalloc 473 * and thus stable-storage, so this is safe. 474 */ 475 int 476 vop_stdgetvobject(ap) 477 struct vop_getvobject_args /* { 478 struct vnode *vp; 479 struct vm_object **objpp; 480 } */ *ap; 481 { 482 struct vnode *vp = ap->a_vp; 483 struct vm_object **objpp = ap->a_objpp; 484 485 if (objpp) 486 *objpp = vp->v_object; 487 return (vp->v_object ? 0 : EINVAL); 488 } 489 490 /* XXX Needs good comment and VOP_BMAP(9) manpage */ 491 int 492 vop_stdbmap(ap) 493 struct vop_bmap_args /* { 494 struct vnode *a_vp; 495 daddr_t a_bn; 496 struct bufobj **a_bop; 497 daddr_t *a_bnp; 498 int *a_runp; 499 int *a_runb; 500 } */ *ap; 501 { 502 503 if (ap->a_bop != NULL) 504 *ap->a_bop = &ap->a_vp->v_bufobj; 505 if (ap->a_bnp != NULL) 506 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 507 if (ap->a_runp != NULL) 508 *ap->a_runp = 0; 509 if (ap->a_runb != NULL) 510 *ap->a_runb = 0; 511 return (0); 512 } 513 514 int 515 vop_stdfsync(ap) 516 struct vop_fsync_args /* { 517 struct vnode *a_vp; 518 struct ucred *a_cred; 519 int a_waitfor; 520 struct thread *a_td; 521 } */ *ap; 522 { 523 struct vnode *vp = ap->a_vp; 524 struct buf *bp; 525 struct bufobj *bo; 526 struct buf *nbp; 527 int s, error = 0; 528 int maxretry = 100; /* large, arbitrarily chosen */ 529 530 VI_LOCK(vp); 531 loop1: 532 /* 533 * MARK/SCAN initialization to avoid infinite loops. 534 */ 535 s = splbio(); 536 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { 537 bp->b_vflags &= ~BV_SCANNED; 538 bp->b_error = 0; 539 } 540 splx(s); 541 542 /* 543 * Flush all dirty buffers associated with a block device. 544 */ 545 loop2: 546 s = splbio(); 547 TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) { 548 if ((bp->b_vflags & BV_SCANNED) != 0) 549 continue; 550 bp->b_vflags |= BV_SCANNED; 551 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 552 continue; 553 VI_UNLOCK(vp); 554 if ((bp->b_flags & B_DELWRI) == 0) 555 panic("fsync: not dirty"); 556 if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) { 557 vfs_bio_awrite(bp); 558 splx(s); 559 } else { 560 bremfree(bp); 561 splx(s); 562 bawrite(bp); 563 } 564 VI_LOCK(vp); 565 goto loop2; 566 } 567 568 /* 569 * If synchronous the caller expects us to completely resolve all 570 * dirty buffers in the system. Wait for in-progress I/O to 571 * complete (which could include background bitmap writes), then 572 * retry if dirty blocks still exist. 573 */ 574 if (ap->a_waitfor == MNT_WAIT) { 575 bo = &vp->v_bufobj; 576 bufobj_wwait(bo, 0, 0); 577 if (bo->bo_dirty.bv_cnt > 0) { 578 /* 579 * If we are unable to write any of these buffers 580 * then we fail now rather than trying endlessly 581 * to write them out. 582 */ 583 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 584 if ((error = bp->b_error) == 0) 585 continue; 586 if (error == 0 && --maxretry >= 0) { 587 splx(s); 588 goto loop1; 589 } 590 vprint("fsync: giving up on dirty", vp); 591 error = EAGAIN; 592 } 593 } 594 VI_UNLOCK(vp); 595 splx(s); 596 597 return (error); 598 } 599 600 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 601 int 602 vop_stdgetpages(ap) 603 struct vop_getpages_args /* { 604 struct vnode *a_vp; 605 vm_page_t *a_m; 606 int a_count; 607 int a_reqpage; 608 vm_ooffset_t a_offset; 609 } */ *ap; 610 { 611 612 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 613 ap->a_count, ap->a_reqpage); 614 } 615 616 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 617 int 618 vop_stdputpages(ap) 619 struct vop_putpages_args /* { 620 struct vnode *a_vp; 621 vm_page_t *a_m; 622 int a_count; 623 int a_sync; 624 int *a_rtvals; 625 vm_ooffset_t a_offset; 626 } */ *ap; 627 { 628 629 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 630 ap->a_sync, ap->a_rtvals); 631 } 632 633 /* 634 * vfs default ops 635 * used to fill the vfs function table to get reasonable default return values. 636 */ 637 int 638 vfs_stdroot (mp, vpp, td) 639 struct mount *mp; 640 struct vnode **vpp; 641 struct thread *td; 642 { 643 644 return (EOPNOTSUPP); 645 } 646 647 int 648 vfs_stdstatfs (mp, sbp, td) 649 struct mount *mp; 650 struct statfs *sbp; 651 struct thread *td; 652 { 653 654 return (EOPNOTSUPP); 655 } 656 657 int 658 vfs_stdvptofh (vp, fhp) 659 struct vnode *vp; 660 struct fid *fhp; 661 { 662 663 return (EOPNOTSUPP); 664 } 665 666 int 667 vfs_stdstart (mp, flags, td) 668 struct mount *mp; 669 int flags; 670 struct thread *td; 671 { 672 673 return (0); 674 } 675 676 int 677 vfs_stdquotactl (mp, cmds, uid, arg, td) 678 struct mount *mp; 679 int cmds; 680 uid_t uid; 681 caddr_t arg; 682 struct thread *td; 683 { 684 685 return (EOPNOTSUPP); 686 } 687 688 int 689 vfs_stdsync(mp, waitfor, cred, td) 690 struct mount *mp; 691 int waitfor; 692 struct ucred *cred; 693 struct thread *td; 694 { 695 struct vnode *vp, *nvp; 696 int error, lockreq, allerror = 0; 697 698 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 699 if (waitfor != MNT_WAIT) 700 lockreq |= LK_NOWAIT; 701 /* 702 * Force stale buffer cache information to be flushed. 703 */ 704 MNT_ILOCK(mp); 705 loop: 706 MNT_VNODE_FOREACH(vp, mp, nvp) { 707 708 VI_LOCK(vp); 709 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 710 VI_UNLOCK(vp); 711 continue; 712 } 713 MNT_IUNLOCK(mp); 714 715 if ((error = vget(vp, lockreq, td)) != 0) { 716 MNT_ILOCK(mp); 717 if (error == ENOENT) 718 goto loop; 719 continue; 720 } 721 error = VOP_FSYNC(vp, cred, waitfor, td); 722 if (error) 723 allerror = error; 724 725 VOP_UNLOCK(vp, 0, td); 726 vrele(vp); 727 MNT_ILOCK(mp); 728 } 729 MNT_IUNLOCK(mp); 730 return (allerror); 731 } 732 733 int 734 vfs_stdnosync (mp, waitfor, cred, td) 735 struct mount *mp; 736 int waitfor; 737 struct ucred *cred; 738 struct thread *td; 739 { 740 741 return (0); 742 } 743 744 int 745 vfs_stdvget (mp, ino, flags, vpp) 746 struct mount *mp; 747 ino_t ino; 748 int flags; 749 struct vnode **vpp; 750 { 751 752 return (EOPNOTSUPP); 753 } 754 755 int 756 vfs_stdfhtovp (mp, fhp, vpp) 757 struct mount *mp; 758 struct fid *fhp; 759 struct vnode **vpp; 760 { 761 762 return (EOPNOTSUPP); 763 } 764 765 int 766 vfs_stdinit (vfsp) 767 struct vfsconf *vfsp; 768 { 769 770 return (0); 771 } 772 773 int 774 vfs_stduninit (vfsp) 775 struct vfsconf *vfsp; 776 { 777 778 return(0); 779 } 780 781 int 782 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) 783 struct mount *mp; 784 int cmd; 785 struct vnode *filename_vp; 786 int attrnamespace; 787 const char *attrname; 788 struct thread *td; 789 { 790 791 if (filename_vp != NULL) 792 VOP_UNLOCK(filename_vp, 0, td); 793 return (EOPNOTSUPP); 794 } 795 796 int 797 vfs_stdsysctl(mp, op, req) 798 struct mount *mp; 799 fsctlop_t op; 800 struct sysctl_req *req; 801 { 802 803 return (EOPNOTSUPP); 804 } 805 806 /* end of vfs default ops */ 807