xref: /freebsd/sys/kern/vfs_default.c (revision 6de306ecee3831f48debaad1d0b22418faa48e10)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *
39  * $FreeBSD$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bio.h>
45 #include <sys/buf.h>
46 #include <sys/conf.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/mutex.h>
52 #include <sys/unistd.h>
53 #include <sys/vnode.h>
54 #include <sys/poll.h>
55 
56 #include <machine/limits.h>
57 
58 #include <vm/vm.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_extern.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_pager.h>
65 #include <vm/vnode_pager.h>
66 #include <vm/vm_zone.h>
67 
68 static int	vop_nolookup __P((struct vop_lookup_args *));
69 static int	vop_nostrategy __P((struct vop_strategy_args *));
70 
71 /*
72  * This vnode table stores what we want to do if the filesystem doesn't
73  * implement a particular VOP.
74  *
75  * If there is no specific entry here, we will return EOPNOTSUPP.
76  *
77  */
78 
79 vop_t **default_vnodeop_p;
80 static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
81 	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
82 	{ &vop_advlock_desc,		(vop_t *) vop_einval },
83 	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
84 	{ &vop_close_desc,		(vop_t *) vop_null },
85 	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
86 	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
87 	{ &vop_fsync_desc,		(vop_t *) vop_null },
88 	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
89 	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
90 	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
91 	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
92 	{ &vop_lease_desc,		(vop_t *) vop_null },
93 	{ &vop_lock_desc,		(vop_t *) vop_nolock },
94 	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
95 	{ &vop_open_desc,		(vop_t *) vop_null },
96 	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
97 	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
98 	{ &vop_readlink_desc,		(vop_t *) vop_einval },
99 	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
100 	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
101 	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
102 	{ NULL, NULL }
103 };
104 
105 static struct vnodeopv_desc default_vnodeop_opv_desc =
106         { &default_vnodeop_p, default_vnodeop_entries };
107 
108 VNODEOP_SET(default_vnodeop_opv_desc);
109 
110 int
111 vop_eopnotsupp(struct vop_generic_args *ap)
112 {
113 	/*
114 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
115 	*/
116 
117 	return (EOPNOTSUPP);
118 }
119 
120 int
121 vop_ebadf(struct vop_generic_args *ap)
122 {
123 
124 	return (EBADF);
125 }
126 
127 int
128 vop_enotty(struct vop_generic_args *ap)
129 {
130 
131 	return (ENOTTY);
132 }
133 
134 int
135 vop_einval(struct vop_generic_args *ap)
136 {
137 
138 	return (EINVAL);
139 }
140 
141 int
142 vop_null(struct vop_generic_args *ap)
143 {
144 
145 	return (0);
146 }
147 
148 int
149 vop_defaultop(struct vop_generic_args *ap)
150 {
151 
152 	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
153 }
154 
155 int
156 vop_panic(struct vop_generic_args *ap)
157 {
158 
159 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
160 }
161 
162 static int
163 vop_nolookup(ap)
164 	struct vop_lookup_args /* {
165 		struct vnode *a_dvp;
166 		struct vnode **a_vpp;
167 		struct componentname *a_cnp;
168 	} */ *ap;
169 {
170 
171 	*ap->a_vpp = NULL;
172 	return (ENOTDIR);
173 }
174 
175 /*
176  *	vop_nostrategy:
177  *
178  *	Strategy routine for VFS devices that have none.
179  *
180  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
181  *	routine.  Typically this is done for a BIO_READ strategy call.
182  *	Typically B_INVAL is assumed to already be clear prior to a write
183  *	and should not be cleared manually unless you just made the buffer
184  *	invalid.  BIO_ERROR should be cleared either way.
185  */
186 
187 static int
188 vop_nostrategy (struct vop_strategy_args *ap)
189 {
190 	printf("No strategy for buffer at %p\n", ap->a_bp);
191 	vprint("", ap->a_vp);
192 	vprint("", ap->a_bp->b_vp);
193 	ap->a_bp->b_ioflags |= BIO_ERROR;
194 	ap->a_bp->b_error = EOPNOTSUPP;
195 	bufdone(ap->a_bp);
196 	return (EOPNOTSUPP);
197 }
198 
199 int
200 vop_stdpathconf(ap)
201 	struct vop_pathconf_args /* {
202 	struct vnode *a_vp;
203 	int a_name;
204 	int *a_retval;
205 	} */ *ap;
206 {
207 
208 	switch (ap->a_name) {
209 		case _PC_LINK_MAX:
210 			*ap->a_retval = LINK_MAX;
211 			return (0);
212 		case _PC_MAX_CANON:
213 			*ap->a_retval = MAX_CANON;
214 			return (0);
215 		case _PC_MAX_INPUT:
216 			*ap->a_retval = MAX_INPUT;
217 			return (0);
218 		case _PC_PIPE_BUF:
219 			*ap->a_retval = PIPE_BUF;
220 			return (0);
221 		case _PC_CHOWN_RESTRICTED:
222 			*ap->a_retval = 1;
223 			return (0);
224 		case _PC_VDISABLE:
225 			*ap->a_retval = _POSIX_VDISABLE;
226 			return (0);
227 		default:
228 			return (EINVAL);
229 	}
230 	/* NOTREACHED */
231 }
232 
233 /*
234  * Standard lock, unlock and islocked functions.
235  *
236  * These depend on the lock structure being the first element in the
237  * inode, ie: vp->v_data points to the the lock!
238  */
239 int
240 vop_stdlock(ap)
241 	struct vop_lock_args /* {
242 		struct vnode *a_vp;
243 		int a_flags;
244 		struct proc *a_p;
245 	} */ *ap;
246 {
247 	struct vnode *vp = ap->a_vp;
248 
249 #ifndef	DEBUG_LOCKS
250 	return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock, ap->a_p));
251 #else
252 	return (debuglockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock,
253 	    ap->a_p, "vop_stdlock", vp->filename, vp->line));
254 #endif
255 }
256 
257 int
258 vop_stdunlock(ap)
259 	struct vop_unlock_args /* {
260 		struct vnode *a_vp;
261 		int a_flags;
262 		struct proc *a_p;
263 	} */ *ap;
264 {
265 	struct vnode *vp = ap->a_vp;
266 
267 	return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock,
268 	    ap->a_p));
269 }
270 
271 int
272 vop_stdislocked(ap)
273 	struct vop_islocked_args /* {
274 		struct vnode *a_vp;
275 		struct proc *a_p;
276 	} */ *ap;
277 {
278 
279 	return (lockstatus(&ap->a_vp->v_lock, ap->a_p));
280 }
281 
282 int
283 vop_stdinactive(ap)
284 	struct vop_inactive_args /* {
285 		struct vnode *a_vp;
286 		struct proc *a_p;
287 	} */ *ap;
288 {
289 
290 	VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
291 	return (0);
292 }
293 
294 /*
295  * Return true for select/poll.
296  */
297 int
298 vop_nopoll(ap)
299 	struct vop_poll_args /* {
300 		struct vnode *a_vp;
301 		int  a_events;
302 		struct ucred *a_cred;
303 		struct proc *a_p;
304 	} */ *ap;
305 {
306 	/*
307 	 * Return true for read/write.  If the user asked for something
308 	 * special, return POLLNVAL, so that clients have a way of
309 	 * determining reliably whether or not the extended
310 	 * functionality is present without hard-coding knowledge
311 	 * of specific filesystem implementations.
312 	 */
313 	if (ap->a_events & ~POLLSTANDARD)
314 		return (POLLNVAL);
315 
316 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
317 }
318 
319 /*
320  * Implement poll for local filesystems that support it.
321  */
322 int
323 vop_stdpoll(ap)
324 	struct vop_poll_args /* {
325 		struct vnode *a_vp;
326 		int  a_events;
327 		struct ucred *a_cred;
328 		struct proc *a_p;
329 	} */ *ap;
330 {
331 	if ((ap->a_events & ~POLLSTANDARD) == 0)
332 		return (ap->a_events & (POLLRDNORM|POLLWRNORM));
333 	return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
334 }
335 
336 int
337 vop_stdbwrite(ap)
338 	struct vop_bwrite_args *ap;
339 {
340 	return (bwrite(ap->a_bp));
341 }
342 
343 /*
344  * Stubs to use when there is no locking to be done on the underlying object.
345  * A minimal shared lock is necessary to ensure that the underlying object
346  * is not revoked while an operation is in progress. So, an active shared
347  * count is maintained in an auxillary vnode lock structure.
348  */
349 int
350 vop_sharedlock(ap)
351 	struct vop_lock_args /* {
352 		struct vnode *a_vp;
353 		int a_flags;
354 		struct proc *a_p;
355 	} */ *ap;
356 {
357 	/*
358 	 * This code cannot be used until all the non-locking filesystems
359 	 * (notably NFS) are converted to properly lock and release nodes.
360 	 * Also, certain vnode operations change the locking state within
361 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
362 	 * and symlink). Ideally these operations should not change the
363 	 * lock state, but should be changed to let the caller of the
364 	 * function unlock them. Otherwise all intermediate vnode layers
365 	 * (such as union, umapfs, etc) must catch these functions to do
366 	 * the necessary locking at their layer. Note that the inactive
367 	 * and lookup operations also change their lock state, but this
368 	 * cannot be avoided, so these two operations will always need
369 	 * to be handled in intermediate layers.
370 	 */
371 	struct vnode *vp = ap->a_vp;
372 	int vnflags, flags = ap->a_flags;
373 
374 	switch (flags & LK_TYPE_MASK) {
375 	case LK_DRAIN:
376 		vnflags = LK_DRAIN;
377 		break;
378 	case LK_EXCLUSIVE:
379 #ifdef DEBUG_VFS_LOCKS
380 		/*
381 		 * Normally, we use shared locks here, but that confuses
382 		 * the locking assertions.
383 		 */
384 		vnflags = LK_EXCLUSIVE;
385 		break;
386 #endif
387 	case LK_SHARED:
388 		vnflags = LK_SHARED;
389 		break;
390 	case LK_UPGRADE:
391 	case LK_EXCLUPGRADE:
392 	case LK_DOWNGRADE:
393 		return (0);
394 	case LK_RELEASE:
395 	default:
396 		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
397 	}
398 	if (flags & LK_INTERLOCK)
399 		vnflags |= LK_INTERLOCK;
400 #ifndef	DEBUG_LOCKS
401 	return (lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p));
402 #else
403 	return (debuglockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p,
404 	    "vop_sharedlock", vp->filename, vp->line));
405 #endif
406 }
407 
408 /*
409  * Stubs to use when there is no locking to be done on the underlying object.
410  * A minimal shared lock is necessary to ensure that the underlying object
411  * is not revoked while an operation is in progress. So, an active shared
412  * count is maintained in an auxillary vnode lock structure.
413  */
414 int
415 vop_nolock(ap)
416 	struct vop_lock_args /* {
417 		struct vnode *a_vp;
418 		int a_flags;
419 		struct proc *a_p;
420 	} */ *ap;
421 {
422 #ifdef notyet
423 	/*
424 	 * This code cannot be used until all the non-locking filesystems
425 	 * (notably NFS) are converted to properly lock and release nodes.
426 	 * Also, certain vnode operations change the locking state within
427 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
428 	 * and symlink). Ideally these operations should not change the
429 	 * lock state, but should be changed to let the caller of the
430 	 * function unlock them. Otherwise all intermediate vnode layers
431 	 * (such as union, umapfs, etc) must catch these functions to do
432 	 * the necessary locking at their layer. Note that the inactive
433 	 * and lookup operations also change their lock state, but this
434 	 * cannot be avoided, so these two operations will always need
435 	 * to be handled in intermediate layers.
436 	 */
437 	struct vnode *vp = ap->a_vp;
438 	int vnflags, flags = ap->a_flags;
439 
440 	switch (flags & LK_TYPE_MASK) {
441 	case LK_DRAIN:
442 		vnflags = LK_DRAIN;
443 		break;
444 	case LK_EXCLUSIVE:
445 	case LK_SHARED:
446 		vnflags = LK_SHARED;
447 		break;
448 	case LK_UPGRADE:
449 	case LK_EXCLUPGRADE:
450 	case LK_DOWNGRADE:
451 		return (0);
452 	case LK_RELEASE:
453 	default:
454 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
455 	}
456 	if (flags & LK_INTERLOCK)
457 		vnflags |= LK_INTERLOCK;
458 	return(lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p));
459 #else /* for now */
460 	/*
461 	 * Since we are not using the lock manager, we must clear
462 	 * the interlock here.
463 	 */
464 	if (ap->a_flags & LK_INTERLOCK)
465 		mtx_unlock(&ap->a_vp->v_interlock);
466 	return (0);
467 #endif
468 }
469 
470 /*
471  * Do the inverse of vop_nolock, handling the interlock in a compatible way.
472  */
473 int
474 vop_nounlock(ap)
475 	struct vop_unlock_args /* {
476 		struct vnode *a_vp;
477 		int a_flags;
478 		struct proc *a_p;
479 	} */ *ap;
480 {
481 
482 	/*
483 	 * Since we are not using the lock manager, we must clear
484 	 * the interlock here.
485 	 */
486 	if (ap->a_flags & LK_INTERLOCK)
487 		mtx_unlock(&ap->a_vp->v_interlock);
488 	return (0);
489 }
490 
491 /*
492  * Return whether or not the node is in use.
493  */
494 int
495 vop_noislocked(ap)
496 	struct vop_islocked_args /* {
497 		struct vnode *a_vp;
498 		struct proc *a_p;
499 	} */ *ap;
500 {
501 
502 	return (0);
503 }
504 
505 /*
506  * Return our mount point, as we will take charge of the writes.
507  */
508 int
509 vop_stdgetwritemount(ap)
510 	struct vop_getwritemount_args /* {
511 		struct vnode *a_vp;
512 		struct mount **a_mpp;
513 	} */ *ap;
514 {
515 
516 	*(ap->a_mpp) = ap->a_vp->v_mount;
517 	return (0);
518 }
519 
520 int
521 vop_stdcreatevobject(ap)
522 	struct vop_createvobject_args /* {
523 		struct vnode *vp;
524 		struct ucred *cred;
525 		struct proc *p;
526 	} */ *ap;
527 {
528 	struct vnode *vp = ap->a_vp;
529 	struct ucred *cred = ap->a_cred;
530 	struct proc *p = ap->a_p;
531 	struct vattr vat;
532 	vm_object_t object;
533 	int error = 0;
534 
535 	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
536 		return (0);
537 
538 retry:
539 	if ((object = vp->v_object) == NULL) {
540 		if (vp->v_type == VREG || vp->v_type == VDIR) {
541 			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
542 				goto retn;
543 			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
544 		} else if (devsw(vp->v_rdev) != NULL) {
545 			/*
546 			 * This simply allocates the biggest object possible
547 			 * for a disk vnode.  This should be fixed, but doesn't
548 			 * cause any problems (yet).
549 			 */
550 			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
551 		} else {
552 			goto retn;
553 		}
554 		/*
555 		 * Dereference the reference we just created.  This assumes
556 		 * that the object is associated with the vp.
557 		 */
558 		object->ref_count--;
559 		vp->v_usecount--;
560 	} else {
561 		if (object->flags & OBJ_DEAD) {
562 			VOP_UNLOCK(vp, 0, p);
563 			tsleep(object, PVM, "vodead", 0);
564 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
565 			goto retry;
566 		}
567 	}
568 
569 	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
570 	vp->v_flag |= VOBJBUF;
571 
572 retn:
573 	return (error);
574 }
575 
576 int
577 vop_stddestroyvobject(ap)
578 	struct vop_destroyvobject_args /* {
579 		struct vnode *vp;
580 	} */ *ap;
581 {
582 	struct vnode *vp = ap->a_vp;
583 	vm_object_t obj = vp->v_object;
584 
585 	if (vp->v_object == NULL)
586 		return (0);
587 
588 	if (obj->ref_count == 0) {
589 		/*
590 		 * vclean() may be called twice. The first time
591 		 * removes the primary reference to the object,
592 		 * the second time goes one further and is a
593 		 * special-case to terminate the object.
594 		 */
595 		vm_object_terminate(obj);
596 	} else {
597 		/*
598 		 * Woe to the process that tries to page now :-).
599 		 */
600 		vm_pager_deallocate(obj);
601 	}
602 	return (0);
603 }
604 
605 int
606 vop_stdgetvobject(ap)
607 	struct vop_getvobject_args /* {
608 		struct vnode *vp;
609 		struct vm_object **objpp;
610 	} */ *ap;
611 {
612 	struct vnode *vp = ap->a_vp;
613 	struct vm_object **objpp = ap->a_objpp;
614 
615 	if (objpp)
616 		*objpp = vp->v_object;
617 	return (vp->v_object ? 0 : EINVAL);
618 }
619 
620 /*
621  * vfs default ops
622  * used to fill the vfs fucntion table to get reasonable default return values.
623  */
624 int
625 vfs_stdmount (mp, path, data, ndp, p)
626 	struct mount *mp;
627 	char *path;
628 	caddr_t data;
629 	struct nameidata *ndp;
630 	struct proc *p;
631 {
632 	return (0);
633 }
634 
635 int
636 vfs_stdunmount (mp, mntflags, p)
637 	struct mount *mp;
638 	int mntflags;
639 	struct proc *p;
640 {
641 	return (0);
642 }
643 
644 int
645 vfs_stdroot (mp, vpp)
646 	struct mount *mp;
647 	struct vnode **vpp;
648 {
649 	return (EOPNOTSUPP);
650 }
651 
652 int
653 vfs_stdstatfs (mp, sbp, p)
654 	struct mount *mp;
655 	struct statfs *sbp;
656 	struct proc *p;
657 {
658 	return (EOPNOTSUPP);
659 }
660 
661 int
662 vfs_stdvptofh (vp, fhp)
663 	struct vnode *vp;
664 	struct fid *fhp;
665 {
666 	return (EOPNOTSUPP);
667 }
668 
669 int
670 vfs_stdstart (mp, flags, p)
671 	struct mount *mp;
672 	int flags;
673 	struct proc *p;
674 {
675 	return (0);
676 }
677 
678 int
679 vfs_stdquotactl (mp, cmds, uid, arg, p)
680 	struct mount *mp;
681 	int cmds;
682 	uid_t uid;
683 	caddr_t arg;
684 	struct proc *p;
685 {
686 	return (EOPNOTSUPP);
687 }
688 
689 int
690 vfs_stdsync (mp, waitfor, cred, p)
691 	struct mount *mp;
692 	int waitfor;
693 	struct ucred *cred;
694 	struct proc *p;
695 {
696 	return (0);
697 }
698 
699 int
700 vfs_stdvget (mp, ino, vpp)
701 	struct mount *mp;
702 	ino_t ino;
703 	struct vnode **vpp;
704 {
705 	return (EOPNOTSUPP);
706 }
707 
708 int
709 vfs_stdfhtovp (mp, fhp, vpp)
710 	struct mount *mp;
711 	struct fid *fhp;
712 	struct vnode **vpp;
713 {
714 	return (EOPNOTSUPP);
715 }
716 
717 int
718 vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
719 	struct mount *mp;
720 	struct sockaddr *nam;
721 	int *extflagsp;
722 	struct ucred **credanonp;
723 {
724 	return (EOPNOTSUPP);
725 }
726 
727 int
728 vfs_stdinit (vfsp)
729 	struct vfsconf *vfsp;
730 {
731 	return (0);
732 }
733 
734 int
735 vfs_stduninit (vfsp)
736 	struct vfsconf *vfsp;
737 {
738 	return(0);
739 }
740 
741 int
742 vfs_stdextattrctl(mp, cmd, attrname, arg, p)
743 	struct mount *mp;
744 	int cmd;
745 	const char *attrname;
746 	caddr_t arg;
747 	struct proc *p;
748 {
749 	return(EOPNOTSUPP);
750 }
751 
752 /* end of vfs default ops */
753