xref: /freebsd/sys/kern/vfs_default.c (revision f0adf7f5cdd241db2f2c817683191a6ef64a4e95)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/kernel.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/mutex.h>
49 #include <sys/unistd.h>
50 #include <sys/vnode.h>
51 #include <sys/poll.h>
52 
53 #include <vm/vm.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_extern.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pager.h>
60 #include <vm/vnode_pager.h>
61 
62 static int	vop_nolookup(struct vop_lookup_args *);
63 static int	vop_nostrategy(struct vop_strategy_args *);
64 
65 /*
66  * This vnode table stores what we want to do if the filesystem doesn't
67  * implement a particular VOP.
68  *
69  * If there is no specific entry here, we will return EOPNOTSUPP.
70  *
71  */
72 
73 vop_t **default_vnodeop_p;
74 static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
75 	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
76 	{ &vop_advlock_desc,		(vop_t *) vop_einval },
77 	{ &vop_bmap_desc,		(vop_t *) vop_stdbmap },
78 	{ &vop_close_desc,		(vop_t *) vop_null },
79 	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
80 	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
81 	{ &vop_fsync_desc,		(vop_t *) vop_null },
82 	{ &vop_getpages_desc,		(vop_t *) vop_stdgetpages },
83 	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
84 	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
85 	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
86 	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
87 	{ &vop_lease_desc,		(vop_t *) vop_null },
88 	{ &vop_lock_desc,		(vop_t *) vop_stdlock },
89 	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
90 	{ &vop_open_desc,		(vop_t *) vop_null },
91 	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
92 	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
93 	{ &vop_putpages_desc,		(vop_t *) vop_stdputpages },
94 	{ &vop_readlink_desc,		(vop_t *) vop_einval },
95 	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
96 	{ &vop_specstrategy_desc,	(vop_t *) vop_panic },
97 	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
98 	{ &vop_unlock_desc,		(vop_t *) vop_stdunlock },
99 	{ NULL, NULL }
100 };
101 
102 static struct vnodeopv_desc default_vnodeop_opv_desc =
103         { &default_vnodeop_p, default_vnodeop_entries };
104 
105 VNODEOP_SET(default_vnodeop_opv_desc);
106 
107 /*
108  * Series of placeholder functions for various error returns for
109  * VOPs.
110  */
111 
112 int
113 vop_eopnotsupp(struct vop_generic_args *ap)
114 {
115 	/*
116 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
117 	*/
118 
119 	return (EOPNOTSUPP);
120 }
121 
122 int
123 vop_ebadf(struct vop_generic_args *ap)
124 {
125 
126 	return (EBADF);
127 }
128 
129 int
130 vop_enotty(struct vop_generic_args *ap)
131 {
132 
133 	return (ENOTTY);
134 }
135 
136 int
137 vop_einval(struct vop_generic_args *ap)
138 {
139 
140 	return (EINVAL);
141 }
142 
143 int
144 vop_null(struct vop_generic_args *ap)
145 {
146 
147 	return (0);
148 }
149 
150 /*
151  * Used to make a defined VOP fall back to the default VOP.
152  */
153 int
154 vop_defaultop(struct vop_generic_args *ap)
155 {
156 
157 	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
158 }
159 
160 /*
161  * Helper function to panic on some bad VOPs in some filesystems.
162  */
163 int
164 vop_panic(struct vop_generic_args *ap)
165 {
166 
167 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
168 }
169 
170 /*
171  * vop_std<something> and vop_no<something> are default functions for use by
172  * filesystems that need the "default reasonable" implementation for a
173  * particular operation.
174  *
175  * The documentation for the operations they implement exists (if it exists)
176  * in the VOP_<SOMETHING>(9) manpage (all uppercase).
177  */
178 
179 /*
180  * Default vop for filesystems that do not support name lookup
181  */
182 static int
183 vop_nolookup(ap)
184 	struct vop_lookup_args /* {
185 		struct vnode *a_dvp;
186 		struct vnode **a_vpp;
187 		struct componentname *a_cnp;
188 	} */ *ap;
189 {
190 
191 	*ap->a_vpp = NULL;
192 	return (ENOTDIR);
193 }
194 
195 /*
196  *	vop_nostrategy:
197  *
198  *	Strategy routine for VFS devices that have none.
199  *
200  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
201  *	routine.  Typically this is done for a BIO_READ strategy call.
202  *	Typically B_INVAL is assumed to already be clear prior to a write
203  *	and should not be cleared manually unless you just made the buffer
204  *	invalid.  BIO_ERROR should be cleared either way.
205  */
206 
207 static int
208 vop_nostrategy (struct vop_strategy_args *ap)
209 {
210 	KASSERT(ap->a_vp == ap->a_bp->b_vp, ("%s(%p != %p)",
211 	    __func__, ap->a_vp, ap->a_bp->b_vp));
212 	printf("No strategy for buffer at %p\n", ap->a_bp);
213 	vprint("vnode", ap->a_vp);
214 	vprint("device vnode", ap->a_bp->b_vp);
215 	ap->a_bp->b_ioflags |= BIO_ERROR;
216 	ap->a_bp->b_error = EOPNOTSUPP;
217 	bufdone(ap->a_bp);
218 	return (EOPNOTSUPP);
219 }
220 
221 /*
222  * vop_stdpathconf:
223  *
224  * Standard implementation of POSIX pathconf, to get information about limits
225  * for a filesystem.
226  * Override per filesystem for the case where the filesystem has smaller
227  * limits.
228  */
229 int
230 vop_stdpathconf(ap)
231 	struct vop_pathconf_args /* {
232 	struct vnode *a_vp;
233 	int a_name;
234 	int *a_retval;
235 	} */ *ap;
236 {
237 
238 	switch (ap->a_name) {
239 		case _PC_LINK_MAX:
240 			*ap->a_retval = LINK_MAX;
241 			return (0);
242 		case _PC_MAX_CANON:
243 			*ap->a_retval = MAX_CANON;
244 			return (0);
245 		case _PC_MAX_INPUT:
246 			*ap->a_retval = MAX_INPUT;
247 			return (0);
248 		case _PC_PIPE_BUF:
249 			*ap->a_retval = PIPE_BUF;
250 			return (0);
251 		case _PC_CHOWN_RESTRICTED:
252 			*ap->a_retval = 1;
253 			return (0);
254 		case _PC_VDISABLE:
255 			*ap->a_retval = _POSIX_VDISABLE;
256 			return (0);
257 		default:
258 			return (EINVAL);
259 	}
260 	/* NOTREACHED */
261 }
262 
263 /*
264  * Standard lock, unlock and islocked functions.
265  */
266 int
267 vop_stdlock(ap)
268 	struct vop_lock_args /* {
269 		struct vnode *a_vp;
270 		int a_flags;
271 		struct thread *a_td;
272 	} */ *ap;
273 {
274 	struct vnode *vp = ap->a_vp;
275 
276 #ifndef	DEBUG_LOCKS
277 	return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
278 #else
279 	return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
280 	    ap->a_td, "vop_stdlock", vp->filename, vp->line));
281 #endif
282 }
283 
284 /* See above. */
285 int
286 vop_stdunlock(ap)
287 	struct vop_unlock_args /* {
288 		struct vnode *a_vp;
289 		int a_flags;
290 		struct thread *a_td;
291 	} */ *ap;
292 {
293 	struct vnode *vp = ap->a_vp;
294 
295 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
296 	    ap->a_td));
297 }
298 
299 /* See above. */
300 int
301 vop_stdislocked(ap)
302 	struct vop_islocked_args /* {
303 		struct vnode *a_vp;
304 		struct thread *a_td;
305 	} */ *ap;
306 {
307 
308 	return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
309 }
310 
311 /* Mark the vnode inactive */
312 int
313 vop_stdinactive(ap)
314 	struct vop_inactive_args /* {
315 		struct vnode *a_vp;
316 		struct thread *a_td;
317 	} */ *ap;
318 {
319 
320 	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
321 	return (0);
322 }
323 
324 /*
325  * Return true for select/poll.
326  */
327 int
328 vop_nopoll(ap)
329 	struct vop_poll_args /* {
330 		struct vnode *a_vp;
331 		int  a_events;
332 		struct ucred *a_cred;
333 		struct thread *a_td;
334 	} */ *ap;
335 {
336 	/*
337 	 * Return true for read/write.  If the user asked for something
338 	 * special, return POLLNVAL, so that clients have a way of
339 	 * determining reliably whether or not the extended
340 	 * functionality is present without hard-coding knowledge
341 	 * of specific filesystem implementations.
342 	 * Stay in sync with kern_conf.c::no_poll().
343 	 */
344 	if (ap->a_events & ~POLLSTANDARD)
345 		return (POLLNVAL);
346 
347 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
348 }
349 
350 /*
351  * Implement poll for local filesystems that support it.
352  */
353 int
354 vop_stdpoll(ap)
355 	struct vop_poll_args /* {
356 		struct vnode *a_vp;
357 		int  a_events;
358 		struct ucred *a_cred;
359 		struct thread *a_td;
360 	} */ *ap;
361 {
362 	if (ap->a_events & ~POLLSTANDARD)
363 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
364 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
365 }
366 
367 /*
368  * Return our mount point, as we will take charge of the writes.
369  */
370 int
371 vop_stdgetwritemount(ap)
372 	struct vop_getwritemount_args /* {
373 		struct vnode *a_vp;
374 		struct mount **a_mpp;
375 	} */ *ap;
376 {
377 
378 	*(ap->a_mpp) = ap->a_vp->v_mount;
379 	return (0);
380 }
381 
382 /* Create the VM system backing object for this vnode */
383 int
384 vop_stdcreatevobject(ap)
385 	struct vop_createvobject_args /* {
386 		struct vnode *vp;
387 		struct ucred *cred;
388 		struct thread *td;
389 	} */ *ap;
390 {
391 	struct vnode *vp = ap->a_vp;
392 	struct ucred *cred = ap->a_cred;
393 	struct thread *td = ap->a_td;
394 	struct vattr vat;
395 	vm_object_t object;
396 	int error = 0;
397 
398 	GIANT_REQUIRED;
399 
400 	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
401 		return (0);
402 
403 retry:
404 	if ((object = vp->v_object) == NULL) {
405 		if (vp->v_type == VREG || vp->v_type == VDIR) {
406 			if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
407 				goto retn;
408 			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
409 		} else if (devsw(vp->v_rdev) != NULL) {
410 			/*
411 			 * This simply allocates the biggest object possible
412 			 * for a disk vnode.  This should be fixed, but doesn't
413 			 * cause any problems (yet).
414 			 */
415 			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
416 		} else {
417 			goto retn;
418 		}
419 		/*
420 		 * Dereference the reference we just created.  This assumes
421 		 * that the object is associated with the vp.
422 		 */
423 		VM_OBJECT_LOCK(object);
424 		object->ref_count--;
425 		VM_OBJECT_UNLOCK(object);
426 		vrele(vp);
427 	} else {
428 		VM_OBJECT_LOCK(object);
429 		if (object->flags & OBJ_DEAD) {
430 			VOP_UNLOCK(vp, 0, td);
431 			msleep(object, VM_OBJECT_MTX(object), PDROP | PVM,
432 			    "vodead", 0);
433 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
434 			goto retry;
435 		}
436 		VM_OBJECT_UNLOCK(object);
437 	}
438 
439 	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
440 	vp->v_vflag |= VV_OBJBUF;
441 
442 retn:
443 	return (error);
444 }
445 
446 /* Destroy the VM system object associated with this vnode */
447 int
448 vop_stddestroyvobject(ap)
449 	struct vop_destroyvobject_args /* {
450 		struct vnode *vp;
451 	} */ *ap;
452 {
453 	struct vnode *vp = ap->a_vp;
454 	vm_object_t obj = vp->v_object;
455 
456 	GIANT_REQUIRED;
457 
458 	if (obj == NULL)
459 		return (0);
460 	VM_OBJECT_LOCK(obj);
461 	if (obj->ref_count == 0) {
462 		/*
463 		 * vclean() may be called twice. The first time
464 		 * removes the primary reference to the object,
465 		 * the second time goes one further and is a
466 		 * special-case to terminate the object.
467 		 *
468 		 * don't double-terminate the object
469 		 */
470 		if ((obj->flags & OBJ_DEAD) == 0)
471 			vm_object_terminate(obj);
472 		else
473 			VM_OBJECT_UNLOCK(obj);
474 	} else {
475 		/*
476 		 * Woe to the process that tries to page now :-).
477 		 */
478 		vm_pager_deallocate(obj);
479 		VM_OBJECT_UNLOCK(obj);
480 	}
481 	return (0);
482 }
483 
484 /*
485  * Return the underlying VM object.  This routine may be called with or
486  * without the vnode interlock held.  If called without, the returned
487  * object is not guarenteed to be valid.  The syncer typically gets the
488  * object without holding the interlock in order to quickly test whether
489  * it might be dirty before going heavy-weight.  vm_object's use zalloc
490  * and thus stable-storage, so this is safe.
491  */
492 int
493 vop_stdgetvobject(ap)
494 	struct vop_getvobject_args /* {
495 		struct vnode *vp;
496 		struct vm_object **objpp;
497 	} */ *ap;
498 {
499 	struct vnode *vp = ap->a_vp;
500 	struct vm_object **objpp = ap->a_objpp;
501 
502 	if (objpp)
503 		*objpp = vp->v_object;
504 	return (vp->v_object ? 0 : EINVAL);
505 }
506 
507 /* XXX Needs good comment and VOP_BMAP(9) manpage */
508 int
509 vop_stdbmap(ap)
510 	struct vop_bmap_args /* {
511 		struct vnode *a_vp;
512 		daddr_t  a_bn;
513 		struct vnode **a_vpp;
514 		daddr_t *a_bnp;
515 		int *a_runp;
516 		int *a_runb;
517 	} */ *ap;
518 {
519 
520 	if (ap->a_vpp != NULL)
521 		*ap->a_vpp = ap->a_vp;
522 	if (ap->a_bnp != NULL)
523 		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
524 	if (ap->a_runp != NULL)
525 		*ap->a_runp = 0;
526 	if (ap->a_runb != NULL)
527 		*ap->a_runb = 0;
528 	return (0);
529 }
530 
531 int
532 vop_stdfsync(ap)
533 	struct vop_fsync_args /* {
534 		struct vnode *a_vp;
535 		struct ucred *a_cred;
536 		int a_waitfor;
537 		struct thread *a_td;
538 	} */ *ap;
539 {
540 	struct vnode *vp = ap->a_vp;
541 	struct buf *bp;
542 	struct buf *nbp;
543 	int s, error = 0;
544 	int maxretry = 100;     /* large, arbitrarily chosen */
545 
546 	VI_LOCK(vp);
547 loop1:
548 	/*
549 	 * MARK/SCAN initialization to avoid infinite loops.
550 	 */
551 	s = splbio();
552         TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
553                 bp->b_vflags &= ~BV_SCANNED;
554 		bp->b_error = 0;
555 	}
556 	splx(s);
557 
558 	/*
559 	 * Flush all dirty buffers associated with a block device.
560 	 */
561 loop2:
562 	s = splbio();
563 	for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) {
564 		nbp = TAILQ_NEXT(bp, b_vnbufs);
565 		if ((bp->b_vflags & BV_SCANNED) != 0)
566 			continue;
567 		bp->b_vflags |= BV_SCANNED;
568 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
569 			continue;
570 		VI_UNLOCK(vp);
571 		if ((bp->b_flags & B_DELWRI) == 0)
572 			panic("fsync: not dirty");
573 		if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
574 			vfs_bio_awrite(bp);
575 			splx(s);
576 		} else {
577 			bremfree(bp);
578 			splx(s);
579 			bawrite(bp);
580 		}
581 		VI_LOCK(vp);
582 		goto loop2;
583 	}
584 
585 	/*
586 	 * If synchronous the caller expects us to completely resolve all
587 	 * dirty buffers in the system.  Wait for in-progress I/O to
588 	 * complete (which could include background bitmap writes), then
589 	 * retry if dirty blocks still exist.
590 	 */
591 	if (ap->a_waitfor == MNT_WAIT) {
592 		while (vp->v_numoutput) {
593 			vp->v_iflag |= VI_BWAIT;
594 			msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp),
595 			    PRIBIO + 1, "fsync", 0);
596 		}
597 		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
598 			/*
599 			 * If we are unable to write any of these buffers
600 			 * then we fail now rather than trying endlessly
601 			 * to write them out.
602 			 */
603 			TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs)
604 				if ((error = bp->b_error) == 0)
605 					continue;
606 			if (error == 0 && --maxretry >= 0) {
607 				splx(s);
608 				goto loop1;
609 			}
610 			vprint("fsync: giving up on dirty", vp);
611 			error = EAGAIN;
612 		}
613 	}
614 	VI_UNLOCK(vp);
615 	splx(s);
616 
617 	return (error);
618 }
619 
620 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
621 int
622 vop_stdgetpages(ap)
623 	struct vop_getpages_args /* {
624 		struct vnode *a_vp;
625 		vm_page_t *a_m;
626 		int a_count;
627 		int a_reqpage;
628 		vm_ooffset_t a_offset;
629 	} */ *ap;
630 {
631 
632 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
633 	    ap->a_count, ap->a_reqpage);
634 }
635 
636 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
637 int
638 vop_stdputpages(ap)
639 	struct vop_putpages_args /* {
640 		struct vnode *a_vp;
641 		vm_page_t *a_m;
642 		int a_count;
643 		int a_sync;
644 		int *a_rtvals;
645 		vm_ooffset_t a_offset;
646 	} */ *ap;
647 {
648 
649 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
650 	     ap->a_sync, ap->a_rtvals);
651 }
652 
653 /*
654  * vfs default ops
655  * used to fill the vfs function table to get reasonable default return values.
656  */
657 int
658 vfs_stdroot (mp, vpp, td)
659 	struct mount *mp;
660 	struct vnode **vpp;
661 	struct thread *td;
662 {
663 
664 	return (EOPNOTSUPP);
665 }
666 
667 int
668 vfs_stdstatfs (mp, sbp, td)
669 	struct mount *mp;
670 	struct statfs *sbp;
671 	struct thread *td;
672 {
673 
674 	return (EOPNOTSUPP);
675 }
676 
677 int
678 vfs_stdvptofh (vp, fhp)
679 	struct vnode *vp;
680 	struct fid *fhp;
681 {
682 
683 	return (EOPNOTSUPP);
684 }
685 
686 int
687 vfs_stdstart (mp, flags, td)
688 	struct mount *mp;
689 	int flags;
690 	struct thread *td;
691 {
692 
693 	return (0);
694 }
695 
696 int
697 vfs_stdquotactl (mp, cmds, uid, arg, td)
698 	struct mount *mp;
699 	int cmds;
700 	uid_t uid;
701 	caddr_t arg;
702 	struct thread *td;
703 {
704 
705 	return (EOPNOTSUPP);
706 }
707 
708 int
709 vfs_stdsync(mp, waitfor, cred, td)
710 	struct mount *mp;
711 	int waitfor;
712 	struct ucred *cred;
713 	struct thread *td;
714 {
715 	struct vnode *vp, *nvp;
716 	int error, lockreq, allerror = 0;
717 
718 	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
719 	if (waitfor != MNT_WAIT)
720 		lockreq |= LK_NOWAIT;
721 	/*
722 	 * Force stale buffer cache information to be flushed.
723 	 */
724 	MNT_ILOCK(mp);
725 loop:
726 	MNT_VNODE_FOREACH(vp, mp, nvp) {
727 
728 		VI_LOCK(vp);
729 		if (TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
730 			VI_UNLOCK(vp);
731 			continue;
732 		}
733 		MNT_IUNLOCK(mp);
734 
735 		if ((error = vget(vp, lockreq, td)) != 0) {
736 			MNT_ILOCK(mp);
737 			if (error == ENOENT)
738 				goto loop;
739 			continue;
740 		}
741 		error = VOP_FSYNC(vp, cred, waitfor, td);
742 		if (error)
743 			allerror = error;
744 
745 		VOP_UNLOCK(vp, 0, td);
746 		vrele(vp);
747 		MNT_ILOCK(mp);
748 	}
749 	MNT_IUNLOCK(mp);
750 	return (allerror);
751 }
752 
753 int
754 vfs_stdnosync (mp, waitfor, cred, td)
755 	struct mount *mp;
756 	int waitfor;
757 	struct ucred *cred;
758 	struct thread *td;
759 {
760 
761 	return (0);
762 }
763 
764 int
765 vfs_stdvget (mp, ino, flags, vpp)
766 	struct mount *mp;
767 	ino_t ino;
768 	int flags;
769 	struct vnode **vpp;
770 {
771 
772 	return (EOPNOTSUPP);
773 }
774 
775 int
776 vfs_stdfhtovp (mp, fhp, vpp)
777 	struct mount *mp;
778 	struct fid *fhp;
779 	struct vnode **vpp;
780 {
781 
782 	return (EOPNOTSUPP);
783 }
784 
785 int
786 vfs_stdinit (vfsp)
787 	struct vfsconf *vfsp;
788 {
789 
790 	return (0);
791 }
792 
793 int
794 vfs_stduninit (vfsp)
795 	struct vfsconf *vfsp;
796 {
797 
798 	return(0);
799 }
800 
801 int
802 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
803 	struct mount *mp;
804 	int cmd;
805 	struct vnode *filename_vp;
806 	int attrnamespace;
807 	const char *attrname;
808 	struct thread *td;
809 {
810 
811 	if (filename_vp != NULL)
812 		VOP_UNLOCK(filename_vp, 0, td);
813 	return (EOPNOTSUPP);
814 }
815 
816 int
817 vfs_stdsysctl(mp, op, req)
818 	struct mount *mp;
819 	fsctlop_t op;
820 	struct sysctl_req *req;
821 {
822 
823 	return (EOPNOTSUPP);
824 }
825 
826 /* end of vfs default ops */
827