1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * 39 * $FreeBSD$ 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/bio.h> 45 #include <sys/buf.h> 46 #include <sys/conf.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mount.h> 51 #include <sys/mutex.h> 52 #include <sys/unistd.h> 53 #include <sys/vnode.h> 54 #include <sys/poll.h> 55 56 #include <machine/limits.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_extern.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_map.h> 63 #include <vm/vm_page.h> 64 #include <vm/vm_pager.h> 65 #include <vm/vnode_pager.h> 66 67 static int vop_nolookup(struct vop_lookup_args *); 68 static int vop_nostrategy(struct vop_strategy_args *); 69 70 /* 71 * This vnode table stores what we want to do if the filesystem doesn't 72 * implement a particular VOP. 73 * 74 * If there is no specific entry here, we will return EOPNOTSUPP. 75 * 76 */ 77 78 vop_t **default_vnodeop_p; 79 static struct vnodeopv_entry_desc default_vnodeop_entries[] = { 80 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 81 { &vop_advlock_desc, (vop_t *) vop_einval }, 82 { &vop_bmap_desc, (vop_t *) vop_stdbmap }, 83 { &vop_close_desc, (vop_t *) vop_null }, 84 { &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject }, 85 { &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject }, 86 { &vop_fsync_desc, (vop_t *) vop_null }, 87 { &vop_getpages_desc, (vop_t *) vop_stdgetpages }, 88 { &vop_getvobject_desc, (vop_t *) vop_stdgetvobject }, 89 { &vop_inactive_desc, (vop_t *) vop_stdinactive }, 90 { &vop_ioctl_desc, (vop_t *) vop_enotty }, 91 { &vop_islocked_desc, (vop_t *) vop_noislocked }, 92 { &vop_lease_desc, (vop_t *) vop_null }, 93 { &vop_lock_desc, (vop_t *) vop_nolock }, 94 { &vop_lookup_desc, (vop_t *) vop_nolookup }, 95 { &vop_open_desc, (vop_t *) vop_null }, 96 { &vop_pathconf_desc, (vop_t *) vop_einval }, 97 { &vop_putpages_desc, (vop_t *) vop_stdputpages }, 98 { &vop_poll_desc, (vop_t *) vop_nopoll }, 99 { &vop_readlink_desc, (vop_t *) vop_einval }, 100 { &vop_revoke_desc, (vop_t *) vop_revoke }, 101 { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 102 { &vop_unlock_desc, (vop_t *) vop_nounlock }, 103 { NULL, NULL } 104 }; 105 106 static struct vnodeopv_desc default_vnodeop_opv_desc = 107 { &default_vnodeop_p, default_vnodeop_entries }; 108 109 VNODEOP_SET(default_vnodeop_opv_desc); 110 111 /* 112 * Series of placeholder functions for various error returns for 113 * VOPs. 114 */ 115 116 int 117 vop_eopnotsupp(struct vop_generic_args *ap) 118 { 119 /* 120 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 121 */ 122 123 return (EOPNOTSUPP); 124 } 125 126 int 127 vop_ebadf(struct vop_generic_args *ap) 128 { 129 130 return (EBADF); 131 } 132 133 int 134 vop_enotty(struct vop_generic_args *ap) 135 { 136 137 return (ENOTTY); 138 } 139 140 int 141 vop_einval(struct vop_generic_args *ap) 142 { 143 144 return (EINVAL); 145 } 146 147 int 148 vop_null(struct vop_generic_args *ap) 149 { 150 151 return (0); 152 } 153 154 /* 155 * Used to make a defined VOP fall back to the default VOP. 156 */ 157 int 158 vop_defaultop(struct vop_generic_args *ap) 159 { 160 161 return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 162 } 163 164 /* 165 * Helper function to panic on some bad VOPs in some filesystems. 166 */ 167 int 168 vop_panic(struct vop_generic_args *ap) 169 { 170 171 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 172 } 173 174 /* 175 * vop_std<something> and vop_no<something> are default functions for use by 176 * filesystems that need the "default reasonable" implementation for a 177 * particular operation. 178 * 179 * The documentation for the operations they implement exists (if it exists) 180 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 181 */ 182 183 /* 184 * Default vop for filesystems that do not support name lookup 185 */ 186 static int 187 vop_nolookup(ap) 188 struct vop_lookup_args /* { 189 struct vnode *a_dvp; 190 struct vnode **a_vpp; 191 struct componentname *a_cnp; 192 } */ *ap; 193 { 194 195 *ap->a_vpp = NULL; 196 return (ENOTDIR); 197 } 198 199 /* 200 * vop_nostrategy: 201 * 202 * Strategy routine for VFS devices that have none. 203 * 204 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 205 * routine. Typically this is done for a BIO_READ strategy call. 206 * Typically B_INVAL is assumed to already be clear prior to a write 207 * and should not be cleared manually unless you just made the buffer 208 * invalid. BIO_ERROR should be cleared either way. 209 */ 210 211 static int 212 vop_nostrategy (struct vop_strategy_args *ap) 213 { 214 printf("No strategy for buffer at %p\n", ap->a_bp); 215 vprint("", ap->a_vp); 216 vprint("", ap->a_bp->b_vp); 217 ap->a_bp->b_ioflags |= BIO_ERROR; 218 ap->a_bp->b_error = EOPNOTSUPP; 219 bufdone(ap->a_bp); 220 return (EOPNOTSUPP); 221 } 222 223 /* 224 * vop_stdpathconf: 225 * 226 * Standard implementation of POSIX pathconf, to get information about limits 227 * for a filesystem. 228 * Override per filesystem for the case where the filesystem has smaller 229 * limits. 230 */ 231 int 232 vop_stdpathconf(ap) 233 struct vop_pathconf_args /* { 234 struct vnode *a_vp; 235 int a_name; 236 int *a_retval; 237 } */ *ap; 238 { 239 240 switch (ap->a_name) { 241 case _PC_LINK_MAX: 242 *ap->a_retval = LINK_MAX; 243 return (0); 244 case _PC_MAX_CANON: 245 *ap->a_retval = MAX_CANON; 246 return (0); 247 case _PC_MAX_INPUT: 248 *ap->a_retval = MAX_INPUT; 249 return (0); 250 case _PC_PIPE_BUF: 251 *ap->a_retval = PIPE_BUF; 252 return (0); 253 case _PC_CHOWN_RESTRICTED: 254 *ap->a_retval = 1; 255 return (0); 256 case _PC_VDISABLE: 257 *ap->a_retval = _POSIX_VDISABLE; 258 return (0); 259 default: 260 return (EINVAL); 261 } 262 /* NOTREACHED */ 263 } 264 265 /* 266 * Standard lock, unlock and islocked functions. 267 * 268 * These depend on the lock structure being the first element in the 269 * inode, ie: vp->v_data points to the the lock! 270 */ 271 int 272 vop_stdlock(ap) 273 struct vop_lock_args /* { 274 struct vnode *a_vp; 275 int a_flags; 276 struct thread *a_td; 277 } */ *ap; 278 { 279 struct vnode *vp = ap->a_vp; 280 281 #ifndef DEBUG_LOCKS 282 return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock, ap->a_td)); 283 #else 284 return (debuglockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock, 285 ap->a_td, "vop_stdlock", vp->filename, vp->line)); 286 #endif 287 } 288 289 /* See above. */ 290 int 291 vop_stdunlock(ap) 292 struct vop_unlock_args /* { 293 struct vnode *a_vp; 294 int a_flags; 295 struct thread *a_td; 296 } */ *ap; 297 { 298 struct vnode *vp = ap->a_vp; 299 300 return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock, 301 ap->a_td)); 302 } 303 304 /* See above. */ 305 int 306 vop_stdislocked(ap) 307 struct vop_islocked_args /* { 308 struct vnode *a_vp; 309 struct thread *a_td; 310 } */ *ap; 311 { 312 313 return (lockstatus(&ap->a_vp->v_lock, ap->a_td)); 314 } 315 316 /* Mark the vnode inactive */ 317 int 318 vop_stdinactive(ap) 319 struct vop_inactive_args /* { 320 struct vnode *a_vp; 321 struct thread *a_td; 322 } */ *ap; 323 { 324 325 VOP_UNLOCK(ap->a_vp, 0, ap->a_td); 326 return (0); 327 } 328 329 /* 330 * Return true for select/poll. 331 */ 332 int 333 vop_nopoll(ap) 334 struct vop_poll_args /* { 335 struct vnode *a_vp; 336 int a_events; 337 struct ucred *a_cred; 338 struct thread *a_td; 339 } */ *ap; 340 { 341 /* 342 * Return true for read/write. If the user asked for something 343 * special, return POLLNVAL, so that clients have a way of 344 * determining reliably whether or not the extended 345 * functionality is present without hard-coding knowledge 346 * of specific filesystem implementations. 347 */ 348 if (ap->a_events & ~POLLSTANDARD) 349 return (POLLNVAL); 350 351 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 352 } 353 354 /* 355 * Implement poll for local filesystems that support it. 356 */ 357 int 358 vop_stdpoll(ap) 359 struct vop_poll_args /* { 360 struct vnode *a_vp; 361 int a_events; 362 struct ucred *a_cred; 363 struct thread *a_td; 364 } */ *ap; 365 { 366 if (ap->a_events & ~POLLSTANDARD) 367 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 368 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 369 } 370 371 /* 372 * Stubs to use when there is no locking to be done on the underlying object. 373 * A minimal shared lock is necessary to ensure that the underlying object 374 * is not revoked while an operation is in progress. So, an active shared 375 * count is maintained in an auxillary vnode lock structure. 376 */ 377 int 378 vop_sharedlock(ap) 379 struct vop_lock_args /* { 380 struct vnode *a_vp; 381 int a_flags; 382 struct thread *a_td; 383 } */ *ap; 384 { 385 /* 386 * This code cannot be used until all the non-locking filesystems 387 * (notably NFS) are converted to properly lock and release nodes. 388 * Also, certain vnode operations change the locking state within 389 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 390 * and symlink). Ideally these operations should not change the 391 * lock state, but should be changed to let the caller of the 392 * function unlock them. Otherwise all intermediate vnode layers 393 * (such as union, umapfs, etc) must catch these functions to do 394 * the necessary locking at their layer. Note that the inactive 395 * and lookup operations also change their lock state, but this 396 * cannot be avoided, so these two operations will always need 397 * to be handled in intermediate layers. 398 */ 399 struct vnode *vp = ap->a_vp; 400 int vnflags, flags = ap->a_flags; 401 402 switch (flags & LK_TYPE_MASK) { 403 case LK_DRAIN: 404 vnflags = LK_DRAIN; 405 break; 406 case LK_EXCLUSIVE: 407 #ifdef DEBUG_VFS_LOCKS 408 /* 409 * Normally, we use shared locks here, but that confuses 410 * the locking assertions. 411 */ 412 vnflags = LK_EXCLUSIVE; 413 break; 414 #endif 415 case LK_SHARED: 416 vnflags = LK_SHARED; 417 break; 418 case LK_UPGRADE: 419 case LK_EXCLUPGRADE: 420 case LK_DOWNGRADE: 421 return (0); 422 case LK_RELEASE: 423 default: 424 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 425 } 426 if (flags & LK_INTERLOCK) 427 vnflags |= LK_INTERLOCK; 428 #ifndef DEBUG_LOCKS 429 return (lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td)); 430 #else 431 return (debuglockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td, 432 "vop_sharedlock", vp->filename, vp->line)); 433 #endif 434 } 435 436 /* 437 * Stubs to use when there is no locking to be done on the underlying object. 438 * A minimal shared lock is necessary to ensure that the underlying object 439 * is not revoked while an operation is in progress. So, an active shared 440 * count is maintained in an auxillary vnode lock structure. 441 */ 442 int 443 vop_nolock(ap) 444 struct vop_lock_args /* { 445 struct vnode *a_vp; 446 int a_flags; 447 struct thread *a_td; 448 } */ *ap; 449 { 450 #ifdef notyet 451 /* 452 * This code cannot be used until all the non-locking filesystems 453 * (notably NFS) are converted to properly lock and release nodes. 454 * Also, certain vnode operations change the locking state within 455 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 456 * and symlink). Ideally these operations should not change the 457 * lock state, but should be changed to let the caller of the 458 * function unlock them. Otherwise all intermediate vnode layers 459 * (such as union, umapfs, etc) must catch these functions to do 460 * the necessary locking at their layer. Note that the inactive 461 * and lookup operations also change their lock state, but this 462 * cannot be avoided, so these two operations will always need 463 * to be handled in intermediate layers. 464 */ 465 struct vnode *vp = ap->a_vp; 466 int vnflags, flags = ap->a_flags; 467 468 switch (flags & LK_TYPE_MASK) { 469 case LK_DRAIN: 470 vnflags = LK_DRAIN; 471 break; 472 case LK_EXCLUSIVE: 473 case LK_SHARED: 474 vnflags = LK_SHARED; 475 break; 476 case LK_UPGRADE: 477 case LK_EXCLUPGRADE: 478 case LK_DOWNGRADE: 479 return (0); 480 case LK_RELEASE: 481 default: 482 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 483 } 484 if (flags & LK_INTERLOCK) 485 vnflags |= LK_INTERLOCK; 486 return(lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td)); 487 #else /* for now */ 488 /* 489 * Since we are not using the lock manager, we must clear 490 * the interlock here. 491 */ 492 if (ap->a_flags & LK_INTERLOCK) 493 mtx_unlock(&ap->a_vp->v_interlock); 494 return (0); 495 #endif 496 } 497 498 /* 499 * Do the inverse of vop_nolock, handling the interlock in a compatible way. 500 */ 501 int 502 vop_nounlock(ap) 503 struct vop_unlock_args /* { 504 struct vnode *a_vp; 505 int a_flags; 506 struct thread *a_td; 507 } */ *ap; 508 { 509 510 /* 511 * Since we are not using the lock manager, we must clear 512 * the interlock here. 513 */ 514 if (ap->a_flags & LK_INTERLOCK) 515 mtx_unlock(&ap->a_vp->v_interlock); 516 return (0); 517 } 518 519 /* 520 * Return whether or not the node is in use. 521 */ 522 int 523 vop_noislocked(ap) 524 struct vop_islocked_args /* { 525 struct vnode *a_vp; 526 struct thread *a_td; 527 } */ *ap; 528 { 529 530 return (0); 531 } 532 533 /* 534 * Return our mount point, as we will take charge of the writes. 535 */ 536 int 537 vop_stdgetwritemount(ap) 538 struct vop_getwritemount_args /* { 539 struct vnode *a_vp; 540 struct mount **a_mpp; 541 } */ *ap; 542 { 543 544 *(ap->a_mpp) = ap->a_vp->v_mount; 545 return (0); 546 } 547 548 /* Create the VM system backing object for this vnode */ 549 int 550 vop_stdcreatevobject(ap) 551 struct vop_createvobject_args /* { 552 struct vnode *vp; 553 struct ucred *cred; 554 struct thread *td; 555 } */ *ap; 556 { 557 struct vnode *vp = ap->a_vp; 558 struct ucred *cred = ap->a_cred; 559 struct thread *td = ap->a_td; 560 struct vattr vat; 561 vm_object_t object; 562 int error = 0; 563 564 GIANT_REQUIRED; 565 566 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 567 return (0); 568 569 retry: 570 if ((object = vp->v_object) == NULL) { 571 if (vp->v_type == VREG || vp->v_type == VDIR) { 572 if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0) 573 goto retn; 574 object = vnode_pager_alloc(vp, vat.va_size, 0, 0); 575 } else if (devsw(vp->v_rdev) != NULL) { 576 /* 577 * This simply allocates the biggest object possible 578 * for a disk vnode. This should be fixed, but doesn't 579 * cause any problems (yet). 580 */ 581 object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0); 582 } else { 583 goto retn; 584 } 585 /* 586 * Dereference the reference we just created. This assumes 587 * that the object is associated with the vp. 588 */ 589 object->ref_count--; 590 vp->v_usecount--; 591 } else { 592 if (object->flags & OBJ_DEAD) { 593 VOP_UNLOCK(vp, 0, td); 594 tsleep(object, PVM, "vodead", 0); 595 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 596 goto retry; 597 } 598 } 599 600 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object")); 601 vp->v_flag |= VOBJBUF; 602 603 retn: 604 return (error); 605 } 606 607 /* Destroy the VM system object associated with this vnode */ 608 int 609 vop_stddestroyvobject(ap) 610 struct vop_destroyvobject_args /* { 611 struct vnode *vp; 612 } */ *ap; 613 { 614 struct vnode *vp = ap->a_vp; 615 vm_object_t obj = vp->v_object; 616 617 GIANT_REQUIRED; 618 619 if (vp->v_object == NULL) 620 return (0); 621 622 if (obj->ref_count == 0) { 623 /* 624 * vclean() may be called twice. The first time 625 * removes the primary reference to the object, 626 * the second time goes one further and is a 627 * special-case to terminate the object. 628 * 629 * don't double-terminate the object 630 */ 631 if ((obj->flags & OBJ_DEAD) == 0) 632 vm_object_terminate(obj); 633 } else { 634 /* 635 * Woe to the process that tries to page now :-). 636 */ 637 vm_pager_deallocate(obj); 638 } 639 return (0); 640 } 641 642 /* 643 * Return the underlying VM object. This routine may be called with or 644 * without the vnode interlock held. If called without, the returned 645 * object is not guarenteed to be valid. The syncer typically gets the 646 * object without holding the interlock in order to quickly test whether 647 * it might be dirty before going heavy-weight. vm_object's use zalloc 648 * and thus stable-storage, so this is safe. 649 */ 650 int 651 vop_stdgetvobject(ap) 652 struct vop_getvobject_args /* { 653 struct vnode *vp; 654 struct vm_object **objpp; 655 } */ *ap; 656 { 657 struct vnode *vp = ap->a_vp; 658 struct vm_object **objpp = ap->a_objpp; 659 660 if (objpp) 661 *objpp = vp->v_object; 662 return (vp->v_object ? 0 : EINVAL); 663 } 664 665 /* XXX Needs good comment and VOP_BMAP(9) manpage */ 666 int 667 vop_stdbmap(ap) 668 struct vop_bmap_args /* { 669 struct vnode *a_vp; 670 daddr_t a_bn; 671 struct vnode **a_vpp; 672 daddr_t *a_bnp; 673 int *a_runp; 674 int *a_runb; 675 } */ *ap; 676 { 677 678 if (ap->a_vpp != NULL) 679 *ap->a_vpp = ap->a_vp; 680 if (ap->a_bnp != NULL) 681 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 682 if (ap->a_runp != NULL) 683 *ap->a_runp = 0; 684 if (ap->a_runb != NULL) 685 *ap->a_runb = 0; 686 return (0); 687 } 688 689 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 690 int 691 vop_stdgetpages(ap) 692 struct vop_getpages_args /* { 693 struct vnode *a_vp; 694 vm_page_t *a_m; 695 int a_count; 696 int a_reqpage; 697 vm_ooffset_t a_offset; 698 } */ *ap; 699 { 700 701 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 702 ap->a_count, ap->a_reqpage); 703 } 704 705 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 706 int 707 vop_stdputpages(ap) 708 struct vop_putpages_args /* { 709 struct vnode *a_vp; 710 vm_page_t *a_m; 711 int a_count; 712 int a_sync; 713 int *a_rtvals; 714 vm_ooffset_t a_offset; 715 } */ *ap; 716 { 717 718 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 719 ap->a_sync, ap->a_rtvals); 720 } 721 722 723 724 /* 725 * vfs default ops 726 * used to fill the vfs function table to get reasonable default return values. 727 */ 728 int 729 vfs_stdmount (mp, path, data, ndp, td) 730 struct mount *mp; 731 char *path; 732 caddr_t data; 733 struct nameidata *ndp; 734 struct thread *td; 735 { 736 return (0); 737 } 738 739 int 740 vfs_stdunmount (mp, mntflags, td) 741 struct mount *mp; 742 int mntflags; 743 struct thread *td; 744 { 745 return (0); 746 } 747 748 int 749 vfs_stdroot (mp, vpp) 750 struct mount *mp; 751 struct vnode **vpp; 752 { 753 return (EOPNOTSUPP); 754 } 755 756 int 757 vfs_stdstatfs (mp, sbp, td) 758 struct mount *mp; 759 struct statfs *sbp; 760 struct thread *td; 761 { 762 return (EOPNOTSUPP); 763 } 764 765 int 766 vfs_stdvptofh (vp, fhp) 767 struct vnode *vp; 768 struct fid *fhp; 769 { 770 return (EOPNOTSUPP); 771 } 772 773 int 774 vfs_stdstart (mp, flags, td) 775 struct mount *mp; 776 int flags; 777 struct thread *td; 778 { 779 return (0); 780 } 781 782 int 783 vfs_stdquotactl (mp, cmds, uid, arg, td) 784 struct mount *mp; 785 int cmds; 786 uid_t uid; 787 caddr_t arg; 788 struct thread *td; 789 { 790 return (EOPNOTSUPP); 791 } 792 793 int 794 vfs_stdsync (mp, waitfor, cred, td) 795 struct mount *mp; 796 int waitfor; 797 struct ucred *cred; 798 struct thread *td; 799 { 800 return (0); 801 } 802 803 int 804 vfs_stdvget (mp, ino, flags, vpp) 805 struct mount *mp; 806 ino_t ino; 807 int flags; 808 struct vnode **vpp; 809 { 810 return (EOPNOTSUPP); 811 } 812 813 int 814 vfs_stdfhtovp (mp, fhp, vpp) 815 struct mount *mp; 816 struct fid *fhp; 817 struct vnode **vpp; 818 { 819 return (EOPNOTSUPP); 820 } 821 822 int 823 vfs_stdinit (vfsp) 824 struct vfsconf *vfsp; 825 { 826 return (0); 827 } 828 829 int 830 vfs_stduninit (vfsp) 831 struct vfsconf *vfsp; 832 { 833 return(0); 834 } 835 836 int 837 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) 838 struct mount *mp; 839 int cmd; 840 struct vnode *filename_vp; 841 int attrnamespace; 842 const char *attrname; 843 struct thread *td; 844 { 845 return(EOPNOTSUPP); 846 } 847 848 /* end of vfs default ops */ 849