xref: /freebsd/sys/kern/vfs_default.c (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *
39  * $FreeBSD$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bio.h>
45 #include <sys/buf.h>
46 #include <sys/conf.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/mutex.h>
52 #include <sys/unistd.h>
53 #include <sys/vnode.h>
54 #include <sys/poll.h>
55 
56 #include <machine/limits.h>
57 
58 #include <vm/vm.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_extern.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_pager.h>
65 #include <vm/vnode_pager.h>
66 #include <vm/vm_zone.h>
67 
68 static int vop_nostrategy __P((struct vop_strategy_args *));
69 
70 /*
71  * This vnode table stores what we want to do if the filesystem doesn't
72  * implement a particular VOP.
73  *
74  * If there is no specific entry here, we will return EOPNOTSUPP.
75  *
76  */
77 
78 vop_t **default_vnodeop_p;
79 static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
80 	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
81 	{ &vop_advlock_desc,		(vop_t *) vop_einval },
82 	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
83 	{ &vop_close_desc,		(vop_t *) vop_null },
84 	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
85 	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
86 	{ &vop_fsync_desc,		(vop_t *) vop_null },
87 	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
88 	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
89 	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
90 	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
91 	{ &vop_lease_desc,		(vop_t *) vop_null },
92 	{ &vop_lock_desc,		(vop_t *) vop_nolock },
93 	{ &vop_open_desc,		(vop_t *) vop_null },
94 	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
95 	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
96 	{ &vop_readlink_desc,		(vop_t *) vop_einval },
97 	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
98 	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
99 	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
100 	{ NULL, NULL }
101 };
102 
103 static struct vnodeopv_desc default_vnodeop_opv_desc =
104         { &default_vnodeop_p, default_vnodeop_entries };
105 
106 VNODEOP_SET(default_vnodeop_opv_desc);
107 
108 int
109 vop_eopnotsupp(struct vop_generic_args *ap)
110 {
111 	/*
112 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
113 	*/
114 
115 	return (EOPNOTSUPP);
116 }
117 
118 int
119 vop_ebadf(struct vop_generic_args *ap)
120 {
121 
122 	return (EBADF);
123 }
124 
125 int
126 vop_enotty(struct vop_generic_args *ap)
127 {
128 
129 	return (ENOTTY);
130 }
131 
132 int
133 vop_einval(struct vop_generic_args *ap)
134 {
135 
136 	return (EINVAL);
137 }
138 
139 int
140 vop_null(struct vop_generic_args *ap)
141 {
142 
143 	return (0);
144 }
145 
146 int
147 vop_defaultop(struct vop_generic_args *ap)
148 {
149 
150 	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
151 }
152 
153 int
154 vop_panic(struct vop_generic_args *ap)
155 {
156 
157 	printf("vop_panic[%s]\n", ap->a_desc->vdesc_name);
158 	panic("Filesystem goof");
159 	return (0);
160 }
161 
162 /*
163  *	vop_nostrategy:
164  *
165  *	Strategy routine for VFS devices that have none.
166  *
167  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
168  *	routine.  Typically this is done for a BIO_READ strategy call.
169  *	Typically B_INVAL is assumed to already be clear prior to a write
170  *	and should not be cleared manually unless you just made the buffer
171  *	invalid.  BIO_ERROR should be cleared either way.
172  */
173 
174 static int
175 vop_nostrategy (struct vop_strategy_args *ap)
176 {
177 	printf("No strategy for buffer at %p\n", ap->a_bp);
178 	vprint("", ap->a_vp);
179 	vprint("", ap->a_bp->b_vp);
180 	ap->a_bp->b_ioflags |= BIO_ERROR;
181 	ap->a_bp->b_error = EOPNOTSUPP;
182 	bufdone(ap->a_bp);
183 	return (EOPNOTSUPP);
184 }
185 
186 int
187 vop_stdpathconf(ap)
188 	struct vop_pathconf_args /* {
189 	struct vnode *a_vp;
190 	int a_name;
191 	int *a_retval;
192 	} */ *ap;
193 {
194 
195 	switch (ap->a_name) {
196 		case _PC_LINK_MAX:
197 			*ap->a_retval = LINK_MAX;
198 			return (0);
199 		case _PC_MAX_CANON:
200 			*ap->a_retval = MAX_CANON;
201 			return (0);
202 		case _PC_MAX_INPUT:
203 			*ap->a_retval = MAX_INPUT;
204 			return (0);
205 		case _PC_PIPE_BUF:
206 			*ap->a_retval = PIPE_BUF;
207 			return (0);
208 		case _PC_CHOWN_RESTRICTED:
209 			*ap->a_retval = 1;
210 			return (0);
211 		case _PC_VDISABLE:
212 			*ap->a_retval = _POSIX_VDISABLE;
213 			return (0);
214 		default:
215 			return (EINVAL);
216 	}
217 	/* NOTREACHED */
218 }
219 
220 /*
221  * Standard lock, unlock and islocked functions.
222  *
223  * These depend on the lock structure being the first element in the
224  * inode, ie: vp->v_data points to the the lock!
225  */
226 int
227 vop_stdlock(ap)
228 	struct vop_lock_args /* {
229 		struct vnode *a_vp;
230 		int a_flags;
231 		struct proc *a_p;
232 	} */ *ap;
233 {
234 	struct vnode *vp = ap->a_vp;
235 
236 #ifndef	DEBUG_LOCKS
237 	return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock, ap->a_p));
238 #else
239 	return (debuglockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock,
240 	    ap->a_p, "vop_stdlock", vp->filename, vp->line));
241 #endif
242 }
243 
244 int
245 vop_stdunlock(ap)
246 	struct vop_unlock_args /* {
247 		struct vnode *a_vp;
248 		int a_flags;
249 		struct proc *a_p;
250 	} */ *ap;
251 {
252 	struct vnode *vp = ap->a_vp;
253 
254 	return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock,
255 	    ap->a_p));
256 }
257 
258 int
259 vop_stdislocked(ap)
260 	struct vop_islocked_args /* {
261 		struct vnode *a_vp;
262 		struct proc *a_p;
263 	} */ *ap;
264 {
265 
266 	return (lockstatus(&ap->a_vp->v_lock, ap->a_p));
267 }
268 
269 int
270 vop_stdinactive(ap)
271 	struct vop_inactive_args /* {
272 		struct vnode *a_vp;
273 		struct proc *a_p;
274 	} */ *ap;
275 {
276 
277 	VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
278 	return (0);
279 }
280 
281 /*
282  * Return true for select/poll.
283  */
284 int
285 vop_nopoll(ap)
286 	struct vop_poll_args /* {
287 		struct vnode *a_vp;
288 		int  a_events;
289 		struct ucred *a_cred;
290 		struct proc *a_p;
291 	} */ *ap;
292 {
293 	/*
294 	 * Return true for read/write.  If the user asked for something
295 	 * special, return POLLNVAL, so that clients have a way of
296 	 * determining reliably whether or not the extended
297 	 * functionality is present without hard-coding knowledge
298 	 * of specific filesystem implementations.
299 	 */
300 	if (ap->a_events & ~POLLSTANDARD)
301 		return (POLLNVAL);
302 
303 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
304 }
305 
306 /*
307  * Implement poll for local filesystems that support it.
308  */
309 int
310 vop_stdpoll(ap)
311 	struct vop_poll_args /* {
312 		struct vnode *a_vp;
313 		int  a_events;
314 		struct ucred *a_cred;
315 		struct proc *a_p;
316 	} */ *ap;
317 {
318 	if ((ap->a_events & ~POLLSTANDARD) == 0)
319 		return (ap->a_events & (POLLRDNORM|POLLWRNORM));
320 	return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
321 }
322 
323 int
324 vop_stdbwrite(ap)
325 	struct vop_bwrite_args *ap;
326 {
327 	return (bwrite(ap->a_bp));
328 }
329 
330 /*
331  * Stubs to use when there is no locking to be done on the underlying object.
332  * A minimal shared lock is necessary to ensure that the underlying object
333  * is not revoked while an operation is in progress. So, an active shared
334  * count is maintained in an auxillary vnode lock structure.
335  */
336 int
337 vop_sharedlock(ap)
338 	struct vop_lock_args /* {
339 		struct vnode *a_vp;
340 		int a_flags;
341 		struct proc *a_p;
342 	} */ *ap;
343 {
344 	/*
345 	 * This code cannot be used until all the non-locking filesystems
346 	 * (notably NFS) are converted to properly lock and release nodes.
347 	 * Also, certain vnode operations change the locking state within
348 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
349 	 * and symlink). Ideally these operations should not change the
350 	 * lock state, but should be changed to let the caller of the
351 	 * function unlock them. Otherwise all intermediate vnode layers
352 	 * (such as union, umapfs, etc) must catch these functions to do
353 	 * the necessary locking at their layer. Note that the inactive
354 	 * and lookup operations also change their lock state, but this
355 	 * cannot be avoided, so these two operations will always need
356 	 * to be handled in intermediate layers.
357 	 */
358 	struct vnode *vp = ap->a_vp;
359 	int vnflags, flags = ap->a_flags;
360 
361 	switch (flags & LK_TYPE_MASK) {
362 	case LK_DRAIN:
363 		vnflags = LK_DRAIN;
364 		break;
365 	case LK_EXCLUSIVE:
366 #ifdef DEBUG_VFS_LOCKS
367 		/*
368 		 * Normally, we use shared locks here, but that confuses
369 		 * the locking assertions.
370 		 */
371 		vnflags = LK_EXCLUSIVE;
372 		break;
373 #endif
374 	case LK_SHARED:
375 		vnflags = LK_SHARED;
376 		break;
377 	case LK_UPGRADE:
378 	case LK_EXCLUPGRADE:
379 	case LK_DOWNGRADE:
380 		return (0);
381 	case LK_RELEASE:
382 	default:
383 		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
384 	}
385 	if (flags & LK_INTERLOCK)
386 		vnflags |= LK_INTERLOCK;
387 #ifndef	DEBUG_LOCKS
388 	return (lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p));
389 #else
390 	return (debuglockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p,
391 	    "vop_sharedlock", vp->filename, vp->line));
392 #endif
393 }
394 
395 /*
396  * Stubs to use when there is no locking to be done on the underlying object.
397  * A minimal shared lock is necessary to ensure that the underlying object
398  * is not revoked while an operation is in progress. So, an active shared
399  * count is maintained in an auxillary vnode lock structure.
400  */
401 int
402 vop_nolock(ap)
403 	struct vop_lock_args /* {
404 		struct vnode *a_vp;
405 		int a_flags;
406 		struct proc *a_p;
407 	} */ *ap;
408 {
409 #ifdef notyet
410 	/*
411 	 * This code cannot be used until all the non-locking filesystems
412 	 * (notably NFS) are converted to properly lock and release nodes.
413 	 * Also, certain vnode operations change the locking state within
414 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
415 	 * and symlink). Ideally these operations should not change the
416 	 * lock state, but should be changed to let the caller of the
417 	 * function unlock them. Otherwise all intermediate vnode layers
418 	 * (such as union, umapfs, etc) must catch these functions to do
419 	 * the necessary locking at their layer. Note that the inactive
420 	 * and lookup operations also change their lock state, but this
421 	 * cannot be avoided, so these two operations will always need
422 	 * to be handled in intermediate layers.
423 	 */
424 	struct vnode *vp = ap->a_vp;
425 	int vnflags, flags = ap->a_flags;
426 
427 	switch (flags & LK_TYPE_MASK) {
428 	case LK_DRAIN:
429 		vnflags = LK_DRAIN;
430 		break;
431 	case LK_EXCLUSIVE:
432 	case LK_SHARED:
433 		vnflags = LK_SHARED;
434 		break;
435 	case LK_UPGRADE:
436 	case LK_EXCLUPGRADE:
437 	case LK_DOWNGRADE:
438 		return (0);
439 	case LK_RELEASE:
440 	default:
441 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
442 	}
443 	if (flags & LK_INTERLOCK)
444 		vnflags |= LK_INTERLOCK;
445 	return(lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p));
446 #else /* for now */
447 	/*
448 	 * Since we are not using the lock manager, we must clear
449 	 * the interlock here.
450 	 */
451 	if (ap->a_flags & LK_INTERLOCK)
452 		mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
453 	return (0);
454 #endif
455 }
456 
457 /*
458  * Do the inverse of vop_nolock, handling the interlock in a compatible way.
459  */
460 int
461 vop_nounlock(ap)
462 	struct vop_unlock_args /* {
463 		struct vnode *a_vp;
464 		int a_flags;
465 		struct proc *a_p;
466 	} */ *ap;
467 {
468 
469 	/*
470 	 * Since we are not using the lock manager, we must clear
471 	 * the interlock here.
472 	 */
473 	if (ap->a_flags & LK_INTERLOCK)
474 		mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
475 	return (0);
476 }
477 
478 /*
479  * Return whether or not the node is in use.
480  */
481 int
482 vop_noislocked(ap)
483 	struct vop_islocked_args /* {
484 		struct vnode *a_vp;
485 		struct proc *a_p;
486 	} */ *ap;
487 {
488 
489 	return (0);
490 }
491 
492 /*
493  * Return our mount point, as we will take charge of the writes.
494  */
495 int
496 vop_stdgetwritemount(ap)
497 	struct vop_getwritemount_args /* {
498 		struct vnode *a_vp;
499 		struct mount **a_mpp;
500 	} */ *ap;
501 {
502 
503 	*(ap->a_mpp) = ap->a_vp->v_mount;
504 	return (0);
505 }
506 
507 int
508 vop_stdcreatevobject(ap)
509 	struct vop_createvobject_args /* {
510 		struct vnode *vp;
511 		struct ucred *cred;
512 		struct proc *p;
513 	} */ *ap;
514 {
515 	struct vnode *vp = ap->a_vp;
516 	struct ucred *cred = ap->a_cred;
517 	struct proc *p = ap->a_p;
518 	struct vattr vat;
519 	vm_object_t object;
520 	int error = 0;
521 
522 	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
523 		return (0);
524 
525 retry:
526 	if ((object = vp->v_object) == NULL) {
527 		if (vp->v_type == VREG || vp->v_type == VDIR) {
528 			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
529 				goto retn;
530 			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
531 		} else if (devsw(vp->v_rdev) != NULL) {
532 			/*
533 			 * This simply allocates the biggest object possible
534 			 * for a disk vnode.  This should be fixed, but doesn't
535 			 * cause any problems (yet).
536 			 */
537 			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
538 		} else {
539 			goto retn;
540 		}
541 		/*
542 		 * Dereference the reference we just created.  This assumes
543 		 * that the object is associated with the vp.
544 		 */
545 		object->ref_count--;
546 		vp->v_usecount--;
547 	} else {
548 		if (object->flags & OBJ_DEAD) {
549 			VOP_UNLOCK(vp, 0, p);
550 			tsleep(object, PVM, "vodead", 0);
551 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
552 			goto retry;
553 		}
554 	}
555 
556 	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
557 	vp->v_flag |= VOBJBUF;
558 
559 retn:
560 	return (error);
561 }
562 
563 int
564 vop_stddestroyvobject(ap)
565 	struct vop_destroyvobject_args /* {
566 		struct vnode *vp;
567 	} */ *ap;
568 {
569 	struct vnode *vp = ap->a_vp;
570 	vm_object_t obj = vp->v_object;
571 
572 	if (vp->v_object == NULL)
573 		return (0);
574 
575 	if (obj->ref_count == 0) {
576 		/*
577 		 * vclean() may be called twice. The first time
578 		 * removes the primary reference to the object,
579 		 * the second time goes one further and is a
580 		 * special-case to terminate the object.
581 		 */
582 		vm_object_terminate(obj);
583 	} else {
584 		/*
585 		 * Woe to the process that tries to page now :-).
586 		 */
587 		vm_pager_deallocate(obj);
588 	}
589 	return (0);
590 }
591 
592 int
593 vop_stdgetvobject(ap)
594 	struct vop_getvobject_args /* {
595 		struct vnode *vp;
596 		struct vm_object **objpp;
597 	} */ *ap;
598 {
599 	struct vnode *vp = ap->a_vp;
600 	struct vm_object **objpp = ap->a_objpp;
601 
602 	if (objpp)
603 		*objpp = vp->v_object;
604 	return (vp->v_object ? 0 : EINVAL);
605 }
606 
607 /*
608  * vfs default ops
609  * used to fill the vfs fucntion table to get reasonable default return values.
610  */
611 int
612 vfs_stdmount (mp, path, data, ndp, p)
613 	struct mount *mp;
614 	char *path;
615 	caddr_t data;
616 	struct nameidata *ndp;
617 	struct proc *p;
618 {
619 	return (0);
620 }
621 
622 int
623 vfs_stdunmount (mp, mntflags, p)
624 	struct mount *mp;
625 	int mntflags;
626 	struct proc *p;
627 {
628 	return (0);
629 }
630 
631 int
632 vfs_stdroot (mp, vpp)
633 	struct mount *mp;
634 	struct vnode **vpp;
635 {
636 	return (EOPNOTSUPP);
637 }
638 
639 int
640 vfs_stdstatfs (mp, sbp, p)
641 	struct mount *mp;
642 	struct statfs *sbp;
643 	struct proc *p;
644 {
645 	return (EOPNOTSUPP);
646 }
647 
648 int
649 vfs_stdvptofh (vp, fhp)
650 	struct vnode *vp;
651 	struct fid *fhp;
652 {
653 	return (EOPNOTSUPP);
654 }
655 
656 int
657 vfs_stdstart (mp, flags, p)
658 	struct mount *mp;
659 	int flags;
660 	struct proc *p;
661 {
662 	return (0);
663 }
664 
665 int
666 vfs_stdquotactl (mp, cmds, uid, arg, p)
667 	struct mount *mp;
668 	int cmds;
669 	uid_t uid;
670 	caddr_t arg;
671 	struct proc *p;
672 {
673 	return (EOPNOTSUPP);
674 }
675 
676 int
677 vfs_stdsync (mp, waitfor, cred, p)
678 	struct mount *mp;
679 	int waitfor;
680 	struct ucred *cred;
681 	struct proc *p;
682 {
683 	return (0);
684 }
685 
686 int
687 vfs_stdvget (mp, ino, vpp)
688 	struct mount *mp;
689 	ino_t ino;
690 	struct vnode **vpp;
691 {
692 	return (EOPNOTSUPP);
693 }
694 
695 int
696 vfs_stdfhtovp (mp, fhp, vpp)
697 	struct mount *mp;
698 	struct fid *fhp;
699 	struct vnode **vpp;
700 {
701 	return (EOPNOTSUPP);
702 }
703 
704 int
705 vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
706 	struct mount *mp;
707 	struct sockaddr *nam;
708 	int *extflagsp;
709 	struct ucred **credanonp;
710 {
711 	return (EOPNOTSUPP);
712 }
713 
714 int
715 vfs_stdinit (vfsp)
716 	struct vfsconf *vfsp;
717 {
718 	return (0);
719 }
720 
721 int
722 vfs_stduninit (vfsp)
723 	struct vfsconf *vfsp;
724 {
725 	return(0);
726 }
727 
728 int
729 vfs_stdextattrctl(mp, cmd, attrname, arg, p)
730 	struct mount *mp;
731 	int cmd;
732 	const char *attrname;
733 	caddr_t arg;
734 	struct proc *p;
735 {
736 	return(EOPNOTSUPP);
737 }
738 
739 /* end of vfs default ops */
740