xref: /freebsd/sys/kern/vfs_default.c (revision 0ee0dbfb0d26cf4bc37f24f12e76c7f532b0f368)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed
8  * to Berkeley by John Heidemann of the UCLA Ficus project.
9  *
10  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bio.h>
43 #include <sys/buf.h>
44 #include <sys/conf.h>
45 #include <sys/event.h>
46 #include <sys/filio.h>
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/lockf.h>
51 #include <sys/malloc.h>
52 #include <sys/mount.h>
53 #include <sys/namei.h>
54 #include <sys/rwlock.h>
55 #include <sys/fcntl.h>
56 #include <sys/unistd.h>
57 #include <sys/vnode.h>
58 #include <sys/dirent.h>
59 #include <sys/poll.h>
60 #include <sys/stat.h>
61 #include <security/audit/audit.h>
62 #include <sys/priv.h>
63 
64 #include <security/mac/mac_framework.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vnode_pager.h>
74 
75 static int	vop_nolookup(struct vop_lookup_args *);
76 static int	vop_norename(struct vop_rename_args *);
77 static int	vop_nostrategy(struct vop_strategy_args *);
78 static int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
79 				char *dirbuf, int dirbuflen, off_t *off,
80 				char **cpos, int *len, int *eofflag,
81 				struct thread *td);
82 static int	dirent_exists(struct vnode *vp, const char *dirname,
83 			      struct thread *td);
84 
85 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
86 
87 static int vop_stdis_text(struct vop_is_text_args *ap);
88 static int vop_stdunset_text(struct vop_unset_text_args *ap);
89 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap);
90 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap);
91 static int vop_stdfdatasync(struct vop_fdatasync_args *ap);
92 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap);
93 static int vop_stdread_pgcache(struct vop_read_pgcache_args *ap);
94 static int vop_stdstat(struct vop_stat_args *ap);
95 static int vop_stdvput_pair(struct vop_vput_pair_args *ap);
96 
97 /*
98  * This vnode table stores what we want to do if the filesystem doesn't
99  * implement a particular VOP.
100  *
101  * If there is no specific entry here, we will return EOPNOTSUPP.
102  *
103  * Note that every filesystem has to implement either vop_access
104  * or vop_accessx; failing to do so will result in immediate crash
105  * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
106  * which calls vop_stdaccess() etc.
107  */
108 
109 struct vop_vector default_vnodeops = {
110 	.vop_default =		NULL,
111 	.vop_bypass =		VOP_EOPNOTSUPP,
112 
113 	.vop_access =		vop_stdaccess,
114 	.vop_accessx =		vop_stdaccessx,
115 	.vop_advise =		vop_stdadvise,
116 	.vop_advlock =		vop_stdadvlock,
117 	.vop_advlockasync =	vop_stdadvlockasync,
118 	.vop_advlockpurge =	vop_stdadvlockpurge,
119 	.vop_allocate =		vop_stdallocate,
120 	.vop_bmap =		vop_stdbmap,
121 	.vop_close =		VOP_NULL,
122 	.vop_fsync =		VOP_NULL,
123 	.vop_stat =		vop_stdstat,
124 	.vop_fdatasync =	vop_stdfdatasync,
125 	.vop_getpages =		vop_stdgetpages,
126 	.vop_getpages_async =	vop_stdgetpages_async,
127 	.vop_getwritemount = 	vop_stdgetwritemount,
128 	.vop_inactive =		VOP_NULL,
129 	.vop_need_inactive =	vop_stdneed_inactive,
130 	.vop_ioctl =		vop_stdioctl,
131 	.vop_kqfilter =		vop_stdkqfilter,
132 	.vop_islocked =		vop_stdislocked,
133 	.vop_lock1 =		vop_stdlock,
134 	.vop_lookup =		vop_nolookup,
135 	.vop_open =		VOP_NULL,
136 	.vop_pathconf =		VOP_EINVAL,
137 	.vop_poll =		vop_nopoll,
138 	.vop_putpages =		vop_stdputpages,
139 	.vop_readlink =		VOP_EINVAL,
140 	.vop_read_pgcache =	vop_stdread_pgcache,
141 	.vop_rename =		vop_norename,
142 	.vop_revoke =		VOP_PANIC,
143 	.vop_strategy =		vop_nostrategy,
144 	.vop_unlock =		vop_stdunlock,
145 	.vop_vptocnp =		vop_stdvptocnp,
146 	.vop_vptofh =		vop_stdvptofh,
147 	.vop_unp_bind =		vop_stdunp_bind,
148 	.vop_unp_connect =	vop_stdunp_connect,
149 	.vop_unp_detach =	vop_stdunp_detach,
150 	.vop_is_text =		vop_stdis_text,
151 	.vop_set_text =		vop_stdset_text,
152 	.vop_unset_text =	vop_stdunset_text,
153 	.vop_add_writecount =	vop_stdadd_writecount,
154 	.vop_copy_file_range =	vop_stdcopy_file_range,
155 	.vop_vput_pair =	vop_stdvput_pair,
156 };
157 VFS_VOP_VECTOR_REGISTER(default_vnodeops);
158 
159 /*
160  * Series of placeholder functions for various error returns for
161  * VOPs.
162  */
163 
164 int
165 vop_eopnotsupp(struct vop_generic_args *ap)
166 {
167 	/*
168 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
169 	*/
170 
171 	return (EOPNOTSUPP);
172 }
173 
174 int
175 vop_ebadf(struct vop_generic_args *ap)
176 {
177 
178 	return (EBADF);
179 }
180 
181 int
182 vop_enotty(struct vop_generic_args *ap)
183 {
184 
185 	return (ENOTTY);
186 }
187 
188 int
189 vop_einval(struct vop_generic_args *ap)
190 {
191 
192 	return (EINVAL);
193 }
194 
195 int
196 vop_enoent(struct vop_generic_args *ap)
197 {
198 
199 	return (ENOENT);
200 }
201 
202 int
203 vop_eagain(struct vop_generic_args *ap)
204 {
205 
206 	return (EAGAIN);
207 }
208 
209 int
210 vop_null(struct vop_generic_args *ap)
211 {
212 
213 	return (0);
214 }
215 
216 /*
217  * Helper function to panic on some bad VOPs in some filesystems.
218  */
219 int
220 vop_panic(struct vop_generic_args *ap)
221 {
222 
223 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
224 }
225 
226 /*
227  * vop_std<something> and vop_no<something> are default functions for use by
228  * filesystems that need the "default reasonable" implementation for a
229  * particular operation.
230  *
231  * The documentation for the operations they implement exists (if it exists)
232  * in the VOP_<SOMETHING>(9) manpage (all uppercase).
233  */
234 
235 /*
236  * Default vop for filesystems that do not support name lookup
237  */
238 static int
239 vop_nolookup(ap)
240 	struct vop_lookup_args /* {
241 		struct vnode *a_dvp;
242 		struct vnode **a_vpp;
243 		struct componentname *a_cnp;
244 	} */ *ap;
245 {
246 
247 	*ap->a_vpp = NULL;
248 	return (ENOTDIR);
249 }
250 
251 /*
252  * vop_norename:
253  *
254  * Handle unlock and reference counting for arguments of vop_rename
255  * for filesystems that do not implement rename operation.
256  */
257 static int
258 vop_norename(struct vop_rename_args *ap)
259 {
260 
261 	vop_rename_fail(ap);
262 	return (EOPNOTSUPP);
263 }
264 
265 /*
266  *	vop_nostrategy:
267  *
268  *	Strategy routine for VFS devices that have none.
269  *
270  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
271  *	routine.  Typically this is done for a BIO_READ strategy call.
272  *	Typically B_INVAL is assumed to already be clear prior to a write
273  *	and should not be cleared manually unless you just made the buffer
274  *	invalid.  BIO_ERROR should be cleared either way.
275  */
276 
277 static int
278 vop_nostrategy (struct vop_strategy_args *ap)
279 {
280 	printf("No strategy for buffer at %p\n", ap->a_bp);
281 	vn_printf(ap->a_vp, "vnode ");
282 	ap->a_bp->b_ioflags |= BIO_ERROR;
283 	ap->a_bp->b_error = EOPNOTSUPP;
284 	bufdone(ap->a_bp);
285 	return (EOPNOTSUPP);
286 }
287 
288 static int
289 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
290 		int dirbuflen, off_t *off, char **cpos, int *len,
291 		int *eofflag, struct thread *td)
292 {
293 	int error, reclen;
294 	struct uio uio;
295 	struct iovec iov;
296 	struct dirent *dp;
297 
298 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
299 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
300 
301 	if (*len == 0) {
302 		iov.iov_base = dirbuf;
303 		iov.iov_len = dirbuflen;
304 
305 		uio.uio_iov = &iov;
306 		uio.uio_iovcnt = 1;
307 		uio.uio_offset = *off;
308 		uio.uio_resid = dirbuflen;
309 		uio.uio_segflg = UIO_SYSSPACE;
310 		uio.uio_rw = UIO_READ;
311 		uio.uio_td = td;
312 
313 		*eofflag = 0;
314 
315 #ifdef MAC
316 		error = mac_vnode_check_readdir(td->td_ucred, vp);
317 		if (error == 0)
318 #endif
319 			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
320 		    		NULL, NULL);
321 		if (error)
322 			return (error);
323 
324 		*off = uio.uio_offset;
325 
326 		*cpos = dirbuf;
327 		*len = (dirbuflen - uio.uio_resid);
328 
329 		if (*len == 0)
330 			return (ENOENT);
331 	}
332 
333 	dp = (struct dirent *)(*cpos);
334 	reclen = dp->d_reclen;
335 	*dpp = dp;
336 
337 	/* check for malformed directory.. */
338 	if (reclen < DIRENT_MINSIZE)
339 		return (EINVAL);
340 
341 	*cpos += reclen;
342 	*len -= reclen;
343 
344 	return (0);
345 }
346 
347 /*
348  * Check if a named file exists in a given directory vnode.
349  */
350 static int
351 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
352 {
353 	char *dirbuf, *cpos;
354 	int error, eofflag, dirbuflen, len, found;
355 	off_t off;
356 	struct dirent *dp;
357 	struct vattr va;
358 
359 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
360 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
361 
362 	found = 0;
363 
364 	error = VOP_GETATTR(vp, &va, td->td_ucred);
365 	if (error)
366 		return (found);
367 
368 	dirbuflen = DEV_BSIZE;
369 	if (dirbuflen < va.va_blocksize)
370 		dirbuflen = va.va_blocksize;
371 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
372 
373 	off = 0;
374 	len = 0;
375 	do {
376 		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
377 					&cpos, &len, &eofflag, td);
378 		if (error)
379 			goto out;
380 
381 		if (dp->d_type != DT_WHT && dp->d_fileno != 0 &&
382 		    strcmp(dp->d_name, dirname) == 0) {
383 			found = 1;
384 			goto out;
385 		}
386 	} while (len > 0 || !eofflag);
387 
388 out:
389 	free(dirbuf, M_TEMP);
390 	return (found);
391 }
392 
393 int
394 vop_stdaccess(struct vop_access_args *ap)
395 {
396 
397 	KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
398 	    VAPPEND)) == 0, ("invalid bit in accmode"));
399 
400 	return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
401 }
402 
403 int
404 vop_stdaccessx(struct vop_accessx_args *ap)
405 {
406 	int error;
407 	accmode_t accmode = ap->a_accmode;
408 
409 	error = vfs_unixify_accmode(&accmode);
410 	if (error != 0)
411 		return (error);
412 
413 	if (accmode == 0)
414 		return (0);
415 
416 	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
417 }
418 
419 /*
420  * Advisory record locking support
421  */
422 int
423 vop_stdadvlock(struct vop_advlock_args *ap)
424 {
425 	struct vnode *vp;
426 	struct mount *mp;
427 	struct vattr vattr;
428 	int error;
429 
430 	vp = ap->a_vp;
431 
432 	/*
433 	 * Provide atomicity of open(O_CREAT | O_EXCL | O_EXLOCK) for
434 	 * local filesystems.  See vn_open_cred() for reciprocal part.
435 	 */
436 	mp = vp->v_mount;
437 	if (mp != NULL && (mp->mnt_flag & MNT_LOCAL) != 0 &&
438 	    ap->a_op == F_SETLK && (ap->a_flags & F_FIRSTOPEN) == 0) {
439 		VI_LOCK(vp);
440 		while ((vp->v_iflag & VI_FOPENING) != 0)
441 			msleep(vp, VI_MTX(vp), PLOCK, "lockfo", 0);
442 		VI_UNLOCK(vp);
443 	}
444 
445 	if (ap->a_fl->l_whence == SEEK_END) {
446 		/*
447 		 * The NFSv4 server must avoid doing a vn_lock() here, since it
448 		 * can deadlock the nfsd threads, due to a LOR.  Fortunately
449 		 * the NFSv4 server always uses SEEK_SET and this code is
450 		 * only required for the SEEK_END case.
451 		 */
452 		vn_lock(vp, LK_SHARED | LK_RETRY);
453 		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
454 		VOP_UNLOCK(vp);
455 		if (error)
456 			return (error);
457 	} else
458 		vattr.va_size = 0;
459 
460 	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
461 }
462 
463 int
464 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
465 {
466 	struct vnode *vp;
467 	struct vattr vattr;
468 	int error;
469 
470 	vp = ap->a_vp;
471 	if (ap->a_fl->l_whence == SEEK_END) {
472 		/* The size argument is only needed for SEEK_END. */
473 		vn_lock(vp, LK_SHARED | LK_RETRY);
474 		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
475 		VOP_UNLOCK(vp);
476 		if (error)
477 			return (error);
478 	} else
479 		vattr.va_size = 0;
480 
481 	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
482 }
483 
484 int
485 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
486 {
487 	struct vnode *vp;
488 
489 	vp = ap->a_vp;
490 	lf_purgelocks(vp, &vp->v_lockf);
491 	return (0);
492 }
493 
494 /*
495  * vop_stdpathconf:
496  *
497  * Standard implementation of POSIX pathconf, to get information about limits
498  * for a filesystem.
499  * Override per filesystem for the case where the filesystem has smaller
500  * limits.
501  */
502 int
503 vop_stdpathconf(ap)
504 	struct vop_pathconf_args /* {
505 	struct vnode *a_vp;
506 	int a_name;
507 	int *a_retval;
508 	} */ *ap;
509 {
510 
511 	switch (ap->a_name) {
512 		case _PC_ASYNC_IO:
513 			*ap->a_retval = _POSIX_ASYNCHRONOUS_IO;
514 			return (0);
515 		case _PC_PATH_MAX:
516 			*ap->a_retval = PATH_MAX;
517 			return (0);
518 		case _PC_ACL_EXTENDED:
519 		case _PC_ACL_NFS4:
520 		case _PC_CAP_PRESENT:
521 		case _PC_INF_PRESENT:
522 		case _PC_MAC_PRESENT:
523 			*ap->a_retval = 0;
524 			return (0);
525 		default:
526 			return (EINVAL);
527 	}
528 	/* NOTREACHED */
529 }
530 
531 /*
532  * Standard lock, unlock and islocked functions.
533  */
534 int
535 vop_stdlock(ap)
536 	struct vop_lock1_args /* {
537 		struct vnode *a_vp;
538 		int a_flags;
539 		char *file;
540 		int line;
541 	} */ *ap;
542 {
543 	struct vnode *vp = ap->a_vp;
544 	struct mtx *ilk;
545 
546 	ilk = VI_MTX(vp);
547 	return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags,
548 	    &ilk->lock_object, ap->a_file, ap->a_line));
549 }
550 
551 /* See above. */
552 int
553 vop_stdunlock(ap)
554 	struct vop_unlock_args /* {
555 		struct vnode *a_vp;
556 	} */ *ap;
557 {
558 	struct vnode *vp = ap->a_vp;
559 
560 	return (lockmgr_unlock(vp->v_vnlock));
561 }
562 
563 /* See above. */
564 int
565 vop_stdislocked(ap)
566 	struct vop_islocked_args /* {
567 		struct vnode *a_vp;
568 	} */ *ap;
569 {
570 
571 	return (lockstatus(ap->a_vp->v_vnlock));
572 }
573 
574 /*
575  * Variants of the above set.
576  *
577  * Differences are:
578  * - shared locking disablement is not supported
579  * - v_vnlock pointer is not honored
580  */
581 int
582 vop_lock(ap)
583 	struct vop_lock1_args /* {
584 		struct vnode *a_vp;
585 		int a_flags;
586 		char *file;
587 		int line;
588 	} */ *ap;
589 {
590 	struct vnode *vp = ap->a_vp;
591 	int flags = ap->a_flags;
592 	struct mtx *ilk;
593 
594 	MPASS(vp->v_vnlock == &vp->v_lock);
595 
596 	if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0))
597 		goto other;
598 
599 	switch (flags & LK_TYPE_MASK) {
600 	case LK_SHARED:
601 		return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line));
602 	case LK_EXCLUSIVE:
603 		return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line));
604 	}
605 other:
606 	ilk = VI_MTX(vp);
607 	return (lockmgr_lock_flags(&vp->v_lock, flags,
608 	    &ilk->lock_object, ap->a_file, ap->a_line));
609 }
610 
611 int
612 vop_unlock(ap)
613 	struct vop_unlock_args /* {
614 		struct vnode *a_vp;
615 	} */ *ap;
616 {
617 	struct vnode *vp = ap->a_vp;
618 
619 	MPASS(vp->v_vnlock == &vp->v_lock);
620 
621 	return (lockmgr_unlock(&vp->v_lock));
622 }
623 
624 int
625 vop_islocked(ap)
626 	struct vop_islocked_args /* {
627 		struct vnode *a_vp;
628 	} */ *ap;
629 {
630 	struct vnode *vp = ap->a_vp;
631 
632 	MPASS(vp->v_vnlock == &vp->v_lock);
633 
634 	return (lockstatus(&vp->v_lock));
635 }
636 
637 /*
638  * Return true for select/poll.
639  */
640 int
641 vop_nopoll(ap)
642 	struct vop_poll_args /* {
643 		struct vnode *a_vp;
644 		int  a_events;
645 		struct ucred *a_cred;
646 		struct thread *a_td;
647 	} */ *ap;
648 {
649 
650 	if (ap->a_events & ~POLLSTANDARD)
651 		return (POLLNVAL);
652 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
653 }
654 
655 /*
656  * Implement poll for local filesystems that support it.
657  */
658 int
659 vop_stdpoll(ap)
660 	struct vop_poll_args /* {
661 		struct vnode *a_vp;
662 		int  a_events;
663 		struct ucred *a_cred;
664 		struct thread *a_td;
665 	} */ *ap;
666 {
667 	if (ap->a_events & ~POLLSTANDARD)
668 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
669 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
670 }
671 
672 /*
673  * Return our mount point, as we will take charge of the writes.
674  */
675 int
676 vop_stdgetwritemount(ap)
677 	struct vop_getwritemount_args /* {
678 		struct vnode *a_vp;
679 		struct mount **a_mpp;
680 	} */ *ap;
681 {
682 	struct mount *mp;
683 	struct mount_pcpu *mpcpu;
684 	struct vnode *vp;
685 
686 	/*
687 	 * Note that having a reference does not prevent forced unmount from
688 	 * setting ->v_mount to NULL after the lock gets released. This is of
689 	 * no consequence for typical consumers (most notably vn_start_write)
690 	 * since in this case the vnode is VIRF_DOOMED. Unmount might have
691 	 * progressed far enough that its completion is only delayed by the
692 	 * reference obtained here. The consumer only needs to concern itself
693 	 * with releasing it.
694 	 */
695 	vp = ap->a_vp;
696 	mp = vp->v_mount;
697 	if (mp == NULL) {
698 		*(ap->a_mpp) = NULL;
699 		return (0);
700 	}
701 	if (vfs_op_thread_enter(mp, mpcpu)) {
702 		if (mp == vp->v_mount) {
703 			vfs_mp_count_add_pcpu(mpcpu, ref, 1);
704 			vfs_op_thread_exit(mp, mpcpu);
705 		} else {
706 			vfs_op_thread_exit(mp, mpcpu);
707 			mp = NULL;
708 		}
709 	} else {
710 		MNT_ILOCK(mp);
711 		if (mp == vp->v_mount) {
712 			MNT_REF(mp);
713 			MNT_IUNLOCK(mp);
714 		} else {
715 			MNT_IUNLOCK(mp);
716 			mp = NULL;
717 		}
718 	}
719 	*(ap->a_mpp) = mp;
720 	return (0);
721 }
722 
723 /*
724  * If the file system doesn't implement VOP_BMAP, then return sensible defaults:
725  * - Return the vnode's bufobj instead of any underlying device's bufobj
726  * - Calculate the physical block number as if there were equal size
727  *   consecutive blocks, but
728  * - Report no contiguous runs of blocks.
729  */
730 int
731 vop_stdbmap(ap)
732 	struct vop_bmap_args /* {
733 		struct vnode *a_vp;
734 		daddr_t  a_bn;
735 		struct bufobj **a_bop;
736 		daddr_t *a_bnp;
737 		int *a_runp;
738 		int *a_runb;
739 	} */ *ap;
740 {
741 
742 	if (ap->a_bop != NULL)
743 		*ap->a_bop = &ap->a_vp->v_bufobj;
744 	if (ap->a_bnp != NULL)
745 		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
746 	if (ap->a_runp != NULL)
747 		*ap->a_runp = 0;
748 	if (ap->a_runb != NULL)
749 		*ap->a_runb = 0;
750 	return (0);
751 }
752 
753 int
754 vop_stdfsync(ap)
755 	struct vop_fsync_args /* {
756 		struct vnode *a_vp;
757 		int a_waitfor;
758 		struct thread *a_td;
759 	} */ *ap;
760 {
761 
762 	return (vn_fsync_buf(ap->a_vp, ap->a_waitfor));
763 }
764 
765 static int
766 vop_stdfdatasync(struct vop_fdatasync_args *ap)
767 {
768 
769 	return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td));
770 }
771 
772 int
773 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap)
774 {
775 
776 	return (vn_fsync_buf(ap->a_vp, MNT_WAIT));
777 }
778 
779 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
780 int
781 vop_stdgetpages(ap)
782 	struct vop_getpages_args /* {
783 		struct vnode *a_vp;
784 		vm_page_t *a_m;
785 		int a_count;
786 		int *a_rbehind;
787 		int *a_rahead;
788 	} */ *ap;
789 {
790 
791 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
792 	    ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL);
793 }
794 
795 static int
796 vop_stdgetpages_async(struct vop_getpages_async_args *ap)
797 {
798 	int error;
799 
800 	error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
801 	    ap->a_rahead);
802 	if (ap->a_iodone != NULL)
803 		ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
804 	return (error);
805 }
806 
807 int
808 vop_stdkqfilter(struct vop_kqfilter_args *ap)
809 {
810 	return vfs_kqfilter(ap);
811 }
812 
813 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
814 int
815 vop_stdputpages(ap)
816 	struct vop_putpages_args /* {
817 		struct vnode *a_vp;
818 		vm_page_t *a_m;
819 		int a_count;
820 		int a_sync;
821 		int *a_rtvals;
822 	} */ *ap;
823 {
824 
825 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
826 	     ap->a_sync, ap->a_rtvals);
827 }
828 
829 int
830 vop_stdvptofh(struct vop_vptofh_args *ap)
831 {
832 	return (EOPNOTSUPP);
833 }
834 
835 int
836 vop_stdvptocnp(struct vop_vptocnp_args *ap)
837 {
838 	struct vnode *vp = ap->a_vp;
839 	struct vnode **dvp = ap->a_vpp;
840 	struct ucred *cred;
841 	char *buf = ap->a_buf;
842 	size_t *buflen = ap->a_buflen;
843 	char *dirbuf, *cpos;
844 	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
845 	off_t off;
846 	ino_t fileno;
847 	struct vattr va;
848 	struct nameidata nd;
849 	struct thread *td;
850 	struct dirent *dp;
851 	struct vnode *mvp;
852 
853 	i = *buflen;
854 	error = 0;
855 	covered = 0;
856 	td = curthread;
857 	cred = td->td_ucred;
858 
859 	if (vp->v_type != VDIR)
860 		return (ENOENT);
861 
862 	error = VOP_GETATTR(vp, &va, cred);
863 	if (error)
864 		return (error);
865 
866 	VREF(vp);
867 	locked = VOP_ISLOCKED(vp);
868 	VOP_UNLOCK(vp);
869 	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE,
870 	    "..", vp, td);
871 	flags = FREAD;
872 	error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
873 	if (error) {
874 		vn_lock(vp, locked | LK_RETRY);
875 		return (error);
876 	}
877 	NDFREE(&nd, NDF_ONLY_PNBUF);
878 
879 	mvp = *dvp = nd.ni_vp;
880 
881 	if (vp->v_mount != (*dvp)->v_mount &&
882 	    ((*dvp)->v_vflag & VV_ROOT) &&
883 	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
884 		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
885 		VREF(mvp);
886 		VOP_UNLOCK(mvp);
887 		vn_close(mvp, FREAD, cred, td);
888 		VREF(*dvp);
889 		vn_lock(*dvp, LK_SHARED | LK_RETRY);
890 		covered = 1;
891 	}
892 
893 	fileno = va.va_fileid;
894 
895 	dirbuflen = DEV_BSIZE;
896 	if (dirbuflen < va.va_blocksize)
897 		dirbuflen = va.va_blocksize;
898 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
899 
900 	if ((*dvp)->v_type != VDIR) {
901 		error = ENOENT;
902 		goto out;
903 	}
904 
905 	off = 0;
906 	len = 0;
907 	do {
908 		/* call VOP_READDIR of parent */
909 		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
910 					&cpos, &len, &eofflag, td);
911 		if (error)
912 			goto out;
913 
914 		if ((dp->d_type != DT_WHT) &&
915 		    (dp->d_fileno == fileno)) {
916 			if (covered) {
917 				VOP_UNLOCK(*dvp);
918 				vn_lock(mvp, LK_SHARED | LK_RETRY);
919 				if (dirent_exists(mvp, dp->d_name, td)) {
920 					error = ENOENT;
921 					VOP_UNLOCK(mvp);
922 					vn_lock(*dvp, LK_SHARED | LK_RETRY);
923 					goto out;
924 				}
925 				VOP_UNLOCK(mvp);
926 				vn_lock(*dvp, LK_SHARED | LK_RETRY);
927 			}
928 			i -= dp->d_namlen;
929 
930 			if (i < 0) {
931 				error = ENOMEM;
932 				goto out;
933 			}
934 			if (dp->d_namlen == 1 && dp->d_name[0] == '.') {
935 				error = ENOENT;
936 			} else {
937 				bcopy(dp->d_name, buf + i, dp->d_namlen);
938 				error = 0;
939 			}
940 			goto out;
941 		}
942 	} while (len > 0 || !eofflag);
943 	error = ENOENT;
944 
945 out:
946 	free(dirbuf, M_TEMP);
947 	if (!error) {
948 		*buflen = i;
949 		vref(*dvp);
950 	}
951 	if (covered) {
952 		vput(*dvp);
953 		vrele(mvp);
954 	} else {
955 		VOP_UNLOCK(mvp);
956 		vn_close(mvp, FREAD, cred, td);
957 	}
958 	vn_lock(vp, locked | LK_RETRY);
959 	return (error);
960 }
961 
962 int
963 vop_stdallocate(struct vop_allocate_args *ap)
964 {
965 #ifdef __notyet__
966 	struct statfs *sfs;
967 	off_t maxfilesize = 0;
968 #endif
969 	struct iovec aiov;
970 	struct vattr vattr, *vap;
971 	struct uio auio;
972 	off_t fsize, len, cur, offset;
973 	uint8_t *buf;
974 	struct thread *td;
975 	struct vnode *vp;
976 	size_t iosize;
977 	int error;
978 
979 	buf = NULL;
980 	error = 0;
981 	td = curthread;
982 	vap = &vattr;
983 	vp = ap->a_vp;
984 	len = *ap->a_len;
985 	offset = *ap->a_offset;
986 
987 	error = VOP_GETATTR(vp, vap, td->td_ucred);
988 	if (error != 0)
989 		goto out;
990 	fsize = vap->va_size;
991 	iosize = vap->va_blocksize;
992 	if (iosize == 0)
993 		iosize = BLKDEV_IOSIZE;
994 	if (iosize > maxphys)
995 		iosize = maxphys;
996 	buf = malloc(iosize, M_TEMP, M_WAITOK);
997 
998 #ifdef __notyet__
999 	/*
1000 	 * Check if the filesystem sets f_maxfilesize; if not use
1001 	 * VOP_SETATTR to perform the check.
1002 	 */
1003 	sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
1004 	error = VFS_STATFS(vp->v_mount, sfs, td);
1005 	if (error == 0)
1006 		maxfilesize = sfs->f_maxfilesize;
1007 	free(sfs, M_STATFS);
1008 	if (error != 0)
1009 		goto out;
1010 	if (maxfilesize) {
1011 		if (offset > maxfilesize || len > maxfilesize ||
1012 		    offset + len > maxfilesize) {
1013 			error = EFBIG;
1014 			goto out;
1015 		}
1016 	} else
1017 #endif
1018 	if (offset + len > vap->va_size) {
1019 		/*
1020 		 * Test offset + len against the filesystem's maxfilesize.
1021 		 */
1022 		VATTR_NULL(vap);
1023 		vap->va_size = offset + len;
1024 		error = VOP_SETATTR(vp, vap, td->td_ucred);
1025 		if (error != 0)
1026 			goto out;
1027 		VATTR_NULL(vap);
1028 		vap->va_size = fsize;
1029 		error = VOP_SETATTR(vp, vap, td->td_ucred);
1030 		if (error != 0)
1031 			goto out;
1032 	}
1033 
1034 	for (;;) {
1035 		/*
1036 		 * Read and write back anything below the nominal file
1037 		 * size.  There's currently no way outside the filesystem
1038 		 * to know whether this area is sparse or not.
1039 		 */
1040 		cur = iosize;
1041 		if ((offset % iosize) != 0)
1042 			cur -= (offset % iosize);
1043 		if (cur > len)
1044 			cur = len;
1045 		if (offset < fsize) {
1046 			aiov.iov_base = buf;
1047 			aiov.iov_len = cur;
1048 			auio.uio_iov = &aiov;
1049 			auio.uio_iovcnt = 1;
1050 			auio.uio_offset = offset;
1051 			auio.uio_resid = cur;
1052 			auio.uio_segflg = UIO_SYSSPACE;
1053 			auio.uio_rw = UIO_READ;
1054 			auio.uio_td = td;
1055 			error = VOP_READ(vp, &auio, 0, td->td_ucred);
1056 			if (error != 0)
1057 				break;
1058 			if (auio.uio_resid > 0) {
1059 				bzero(buf + cur - auio.uio_resid,
1060 				    auio.uio_resid);
1061 			}
1062 		} else {
1063 			bzero(buf, cur);
1064 		}
1065 
1066 		aiov.iov_base = buf;
1067 		aiov.iov_len = cur;
1068 		auio.uio_iov = &aiov;
1069 		auio.uio_iovcnt = 1;
1070 		auio.uio_offset = offset;
1071 		auio.uio_resid = cur;
1072 		auio.uio_segflg = UIO_SYSSPACE;
1073 		auio.uio_rw = UIO_WRITE;
1074 		auio.uio_td = td;
1075 
1076 		error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
1077 		if (error != 0)
1078 			break;
1079 
1080 		len -= cur;
1081 		offset += cur;
1082 		if (len == 0)
1083 			break;
1084 		if (should_yield())
1085 			break;
1086 	}
1087 
1088  out:
1089 	*ap->a_len = len;
1090 	*ap->a_offset = offset;
1091 	free(buf, M_TEMP);
1092 	return (error);
1093 }
1094 
1095 int
1096 vop_stdadvise(struct vop_advise_args *ap)
1097 {
1098 	struct vnode *vp;
1099 	struct bufobj *bo;
1100 	daddr_t startn, endn;
1101 	off_t bstart, bend, start, end;
1102 	int bsize, error;
1103 
1104 	vp = ap->a_vp;
1105 	switch (ap->a_advice) {
1106 	case POSIX_FADV_WILLNEED:
1107 		/*
1108 		 * Do nothing for now.  Filesystems should provide a
1109 		 * custom method which starts an asynchronous read of
1110 		 * the requested region.
1111 		 */
1112 		error = 0;
1113 		break;
1114 	case POSIX_FADV_DONTNEED:
1115 		error = 0;
1116 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1117 		if (VN_IS_DOOMED(vp)) {
1118 			VOP_UNLOCK(vp);
1119 			break;
1120 		}
1121 
1122 		/*
1123 		 * Round to block boundaries (and later possibly further to
1124 		 * page boundaries).  Applications cannot reasonably be aware
1125 		 * of the boundaries, and the rounding must be to expand at
1126 		 * both extremities to cover enough.  It still doesn't cover
1127 		 * read-ahead.  For partial blocks, this gives unnecessary
1128 		 * discarding of buffers but is efficient enough since the
1129 		 * pages usually remain in VMIO for some time.
1130 		 */
1131 		bsize = vp->v_bufobj.bo_bsize;
1132 		bstart = rounddown(ap->a_start, bsize);
1133 		bend = roundup(ap->a_end, bsize);
1134 
1135 		/*
1136 		 * Deactivate pages in the specified range from the backing VM
1137 		 * object.  Pages that are resident in the buffer cache will
1138 		 * remain wired until their corresponding buffers are released
1139 		 * below.
1140 		 */
1141 		if (vp->v_object != NULL) {
1142 			start = trunc_page(bstart);
1143 			end = round_page(bend);
1144 			VM_OBJECT_RLOCK(vp->v_object);
1145 			vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start),
1146 			    OFF_TO_IDX(end));
1147 			VM_OBJECT_RUNLOCK(vp->v_object);
1148 		}
1149 
1150 		bo = &vp->v_bufobj;
1151 		BO_RLOCK(bo);
1152 		startn = bstart / bsize;
1153 		endn = bend / bsize;
1154 		error = bnoreuselist(&bo->bo_clean, bo, startn, endn);
1155 		if (error == 0)
1156 			error = bnoreuselist(&bo->bo_dirty, bo, startn, endn);
1157 		BO_RUNLOCK(bo);
1158 		VOP_UNLOCK(vp);
1159 		break;
1160 	default:
1161 		error = EINVAL;
1162 		break;
1163 	}
1164 	return (error);
1165 }
1166 
1167 int
1168 vop_stdunp_bind(struct vop_unp_bind_args *ap)
1169 {
1170 
1171 	ap->a_vp->v_unpcb = ap->a_unpcb;
1172 	return (0);
1173 }
1174 
1175 int
1176 vop_stdunp_connect(struct vop_unp_connect_args *ap)
1177 {
1178 
1179 	*ap->a_unpcb = ap->a_vp->v_unpcb;
1180 	return (0);
1181 }
1182 
1183 int
1184 vop_stdunp_detach(struct vop_unp_detach_args *ap)
1185 {
1186 
1187 	ap->a_vp->v_unpcb = NULL;
1188 	return (0);
1189 }
1190 
1191 static int
1192 vop_stdis_text(struct vop_is_text_args *ap)
1193 {
1194 
1195 	return (ap->a_vp->v_writecount < 0);
1196 }
1197 
1198 int
1199 vop_stdset_text(struct vop_set_text_args *ap)
1200 {
1201 	struct vnode *vp;
1202 	struct mount *mp;
1203 	int error;
1204 
1205 	vp = ap->a_vp;
1206 	VI_LOCK(vp);
1207 	if (vp->v_writecount > 0) {
1208 		error = ETXTBSY;
1209 	} else {
1210 		/*
1211 		 * If requested by fs, keep a use reference to the
1212 		 * vnode until the last text reference is released.
1213 		 */
1214 		mp = vp->v_mount;
1215 		if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 &&
1216 		    vp->v_writecount == 0) {
1217 			VNPASS((vp->v_iflag & VI_TEXT_REF) == 0, vp);
1218 			vp->v_iflag |= VI_TEXT_REF;
1219 			vrefl(vp);
1220 		}
1221 
1222 		vp->v_writecount--;
1223 		error = 0;
1224 	}
1225 	VI_UNLOCK(vp);
1226 	return (error);
1227 }
1228 
1229 static int
1230 vop_stdunset_text(struct vop_unset_text_args *ap)
1231 {
1232 	struct vnode *vp;
1233 	int error;
1234 	bool last;
1235 
1236 	vp = ap->a_vp;
1237 	last = false;
1238 	VI_LOCK(vp);
1239 	if (vp->v_writecount < 0) {
1240 		if ((vp->v_iflag & VI_TEXT_REF) != 0 &&
1241 		    vp->v_writecount == -1) {
1242 			last = true;
1243 			vp->v_iflag &= ~VI_TEXT_REF;
1244 		}
1245 		vp->v_writecount++;
1246 		error = 0;
1247 	} else {
1248 		error = EINVAL;
1249 	}
1250 	VI_UNLOCK(vp);
1251 	if (last)
1252 		vunref(vp);
1253 	return (error);
1254 }
1255 
1256 static int
1257 vop_stdadd_writecount(struct vop_add_writecount_args *ap)
1258 {
1259 	struct vnode *vp;
1260 	struct mount *mp;
1261 	int error;
1262 
1263 	vp = ap->a_vp;
1264 	VI_LOCK_FLAGS(vp, MTX_DUPOK);
1265 	if (vp->v_writecount < 0) {
1266 		error = ETXTBSY;
1267 	} else {
1268 		VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
1269 		    ("neg writecount increment %d", ap->a_inc));
1270 		if (vp->v_writecount == 0) {
1271 			mp = vp->v_mount;
1272 			if (mp != NULL && (mp->mnt_kern_flag & MNTK_NOMSYNC) == 0)
1273 				vlazy(vp);
1274 		}
1275 		vp->v_writecount += ap->a_inc;
1276 		error = 0;
1277 	}
1278 	VI_UNLOCK(vp);
1279 	return (error);
1280 }
1281 
1282 int
1283 vop_stdneed_inactive(struct vop_need_inactive_args *ap)
1284 {
1285 
1286 	return (1);
1287 }
1288 
1289 int
1290 vop_stdioctl(struct vop_ioctl_args *ap)
1291 {
1292 	struct vnode *vp;
1293 	struct vattr va;
1294 	off_t *offp;
1295 	int error;
1296 
1297 	switch (ap->a_command) {
1298 	case FIOSEEKDATA:
1299 	case FIOSEEKHOLE:
1300 		vp = ap->a_vp;
1301 		error = vn_lock(vp, LK_SHARED);
1302 		if (error != 0)
1303 			return (EBADF);
1304 		if (vp->v_type == VREG)
1305 			error = VOP_GETATTR(vp, &va, ap->a_cred);
1306 		else
1307 			error = ENOTTY;
1308 		if (error == 0) {
1309 			offp = ap->a_data;
1310 			if (*offp < 0 || *offp >= va.va_size)
1311 				error = ENXIO;
1312 			else if (ap->a_command == FIOSEEKHOLE)
1313 				*offp = va.va_size;
1314 		}
1315 		VOP_UNLOCK(vp);
1316 		break;
1317 	default:
1318 		error = ENOTTY;
1319 		break;
1320 	}
1321 	return (error);
1322 }
1323 
1324 /*
1325  * vfs default ops
1326  * used to fill the vfs function table to get reasonable default return values.
1327  */
1328 int
1329 vfs_stdroot (mp, flags, vpp)
1330 	struct mount *mp;
1331 	int flags;
1332 	struct vnode **vpp;
1333 {
1334 
1335 	return (EOPNOTSUPP);
1336 }
1337 
1338 int
1339 vfs_stdstatfs (mp, sbp)
1340 	struct mount *mp;
1341 	struct statfs *sbp;
1342 {
1343 
1344 	return (EOPNOTSUPP);
1345 }
1346 
1347 int
1348 vfs_stdquotactl (mp, cmds, uid, arg)
1349 	struct mount *mp;
1350 	int cmds;
1351 	uid_t uid;
1352 	void *arg;
1353 {
1354 
1355 	return (EOPNOTSUPP);
1356 }
1357 
1358 int
1359 vfs_stdsync(mp, waitfor)
1360 	struct mount *mp;
1361 	int waitfor;
1362 {
1363 	struct vnode *vp, *mvp;
1364 	struct thread *td;
1365 	int error, lockreq, allerror = 0;
1366 
1367 	td = curthread;
1368 	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1369 	if (waitfor != MNT_WAIT)
1370 		lockreq |= LK_NOWAIT;
1371 	/*
1372 	 * Force stale buffer cache information to be flushed.
1373 	 */
1374 loop:
1375 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1376 		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1377 			VI_UNLOCK(vp);
1378 			continue;
1379 		}
1380 		if ((error = vget(vp, lockreq)) != 0) {
1381 			if (error == ENOENT) {
1382 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1383 				goto loop;
1384 			}
1385 			continue;
1386 		}
1387 		error = VOP_FSYNC(vp, waitfor, td);
1388 		if (error)
1389 			allerror = error;
1390 		vput(vp);
1391 	}
1392 	return (allerror);
1393 }
1394 
1395 int
1396 vfs_stdnosync (mp, waitfor)
1397 	struct mount *mp;
1398 	int waitfor;
1399 {
1400 
1401 	return (0);
1402 }
1403 
1404 static int
1405 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap)
1406 {
1407 	int error;
1408 
1409 	error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp,
1410 	    ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred,
1411 	    ap->a_outcred, ap->a_fsizetd);
1412 	return (error);
1413 }
1414 
1415 int
1416 vfs_stdvget (mp, ino, flags, vpp)
1417 	struct mount *mp;
1418 	ino_t ino;
1419 	int flags;
1420 	struct vnode **vpp;
1421 {
1422 
1423 	return (EOPNOTSUPP);
1424 }
1425 
1426 int
1427 vfs_stdfhtovp (mp, fhp, flags, vpp)
1428 	struct mount *mp;
1429 	struct fid *fhp;
1430 	int flags;
1431 	struct vnode **vpp;
1432 {
1433 
1434 	return (EOPNOTSUPP);
1435 }
1436 
1437 int
1438 vfs_stdinit (vfsp)
1439 	struct vfsconf *vfsp;
1440 {
1441 
1442 	return (0);
1443 }
1444 
1445 int
1446 vfs_stduninit (vfsp)
1447 	struct vfsconf *vfsp;
1448 {
1449 
1450 	return(0);
1451 }
1452 
1453 int
1454 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
1455 	struct mount *mp;
1456 	int cmd;
1457 	struct vnode *filename_vp;
1458 	int attrnamespace;
1459 	const char *attrname;
1460 {
1461 
1462 	if (filename_vp != NULL)
1463 		VOP_UNLOCK(filename_vp);
1464 	return (EOPNOTSUPP);
1465 }
1466 
1467 int
1468 vfs_stdsysctl(mp, op, req)
1469 	struct mount *mp;
1470 	fsctlop_t op;
1471 	struct sysctl_req *req;
1472 {
1473 
1474 	return (EOPNOTSUPP);
1475 }
1476 
1477 static vop_bypass_t *
1478 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a)
1479 {
1480 
1481 	return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset));
1482 }
1483 
1484 int
1485 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a)
1486 {
1487 	vop_bypass_t *bp;
1488 	int prev_stops, rc;
1489 
1490 	bp = bp_by_off(vop, a);
1491 	MPASS(bp != NULL);
1492 
1493 	prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT);
1494 	rc = bp(a);
1495 	sigallowstop(prev_stops);
1496 	return (rc);
1497 }
1498 
1499 static int
1500 vop_stdstat(struct vop_stat_args *a)
1501 {
1502 	struct vattr vattr;
1503 	struct vattr *vap;
1504 	struct vnode *vp;
1505 	struct stat *sb;
1506 	int error;
1507 	u_short mode;
1508 
1509 	vp = a->a_vp;
1510 	sb = a->a_sb;
1511 
1512 	error = vop_stat_helper_pre(a);
1513 	if (error != 0)
1514 		return (error);
1515 
1516 	vap = &vattr;
1517 
1518 	/*
1519 	 * Initialize defaults for new and unusual fields, so that file
1520 	 * systems which don't support these fields don't need to know
1521 	 * about them.
1522 	 */
1523 	vap->va_birthtime.tv_sec = -1;
1524 	vap->va_birthtime.tv_nsec = 0;
1525 	vap->va_fsid = VNOVAL;
1526 	vap->va_rdev = NODEV;
1527 
1528 	error = VOP_GETATTR(vp, vap, a->a_active_cred);
1529 	if (error)
1530 		goto out;
1531 
1532 	/*
1533 	 * Zero the spare stat fields
1534 	 */
1535 	bzero(sb, sizeof *sb);
1536 
1537 	/*
1538 	 * Copy from vattr table
1539 	 */
1540 	if (vap->va_fsid != VNOVAL)
1541 		sb->st_dev = vap->va_fsid;
1542 	else
1543 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1544 	sb->st_ino = vap->va_fileid;
1545 	mode = vap->va_mode;
1546 	switch (vap->va_type) {
1547 	case VREG:
1548 		mode |= S_IFREG;
1549 		break;
1550 	case VDIR:
1551 		mode |= S_IFDIR;
1552 		break;
1553 	case VBLK:
1554 		mode |= S_IFBLK;
1555 		break;
1556 	case VCHR:
1557 		mode |= S_IFCHR;
1558 		break;
1559 	case VLNK:
1560 		mode |= S_IFLNK;
1561 		break;
1562 	case VSOCK:
1563 		mode |= S_IFSOCK;
1564 		break;
1565 	case VFIFO:
1566 		mode |= S_IFIFO;
1567 		break;
1568 	default:
1569 		error = EBADF;
1570 		goto out;
1571 	}
1572 	sb->st_mode = mode;
1573 	sb->st_nlink = vap->va_nlink;
1574 	sb->st_uid = vap->va_uid;
1575 	sb->st_gid = vap->va_gid;
1576 	sb->st_rdev = vap->va_rdev;
1577 	if (vap->va_size > OFF_MAX) {
1578 		error = EOVERFLOW;
1579 		goto out;
1580 	}
1581 	sb->st_size = vap->va_size;
1582 	sb->st_atim.tv_sec = vap->va_atime.tv_sec;
1583 	sb->st_atim.tv_nsec = vap->va_atime.tv_nsec;
1584 	sb->st_mtim.tv_sec = vap->va_mtime.tv_sec;
1585 	sb->st_mtim.tv_nsec = vap->va_mtime.tv_nsec;
1586 	sb->st_ctim.tv_sec = vap->va_ctime.tv_sec;
1587 	sb->st_ctim.tv_nsec = vap->va_ctime.tv_nsec;
1588 	sb->st_birthtim.tv_sec = vap->va_birthtime.tv_sec;
1589 	sb->st_birthtim.tv_nsec = vap->va_birthtime.tv_nsec;
1590 
1591 	/*
1592 	 * According to www.opengroup.org, the meaning of st_blksize is
1593 	 *   "a filesystem-specific preferred I/O block size for this
1594 	 *    object.  In some filesystem types, this may vary from file
1595 	 *    to file"
1596 	 * Use minimum/default of PAGE_SIZE (e.g. for VCHR).
1597 	 */
1598 
1599 	sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
1600 	sb->st_flags = vap->va_flags;
1601 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1602 	sb->st_gen = vap->va_gen;
1603 out:
1604 	return (vop_stat_helper_post(a, error));
1605 }
1606 
1607 static int
1608 vop_stdread_pgcache(struct vop_read_pgcache_args *ap __unused)
1609 {
1610 	return (EJUSTRETURN);
1611 }
1612 
1613 static int
1614 vop_stdvput_pair(struct vop_vput_pair_args *ap)
1615 {
1616 	struct vnode *dvp, *vp, **vpp;
1617 
1618 	dvp = ap->a_dvp;
1619 	vpp = ap->a_vpp;
1620 	vput(dvp);
1621 	if (vpp != NULL && ap->a_unlock_vp && (vp = *vpp) != NULL)
1622 		vput(vp);
1623 	return (0);
1624 }
1625