xref: /freebsd/sys/kern/vfs_default.c (revision f15e18a642cb3f7ebc747f8e9cdf11274140107d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed
8  * to Berkeley by John Heidemann of the UCLA Ficus project.
9  *
10  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bio.h>
43 #include <sys/buf.h>
44 #include <sys/conf.h>
45 #include <sys/event.h>
46 #include <sys/filio.h>
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/lockf.h>
51 #include <sys/malloc.h>
52 #include <sys/mount.h>
53 #include <sys/namei.h>
54 #include <sys/rwlock.h>
55 #include <sys/fcntl.h>
56 #include <sys/unistd.h>
57 #include <sys/vnode.h>
58 #include <sys/dirent.h>
59 #include <sys/poll.h>
60 #include <sys/stat.h>
61 #include <security/audit/audit.h>
62 #include <sys/priv.h>
63 
64 #include <security/mac/mac_framework.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vnode_pager.h>
74 
75 static int	vop_nolookup(struct vop_lookup_args *);
76 static int	vop_norename(struct vop_rename_args *);
77 static int	vop_nostrategy(struct vop_strategy_args *);
78 static int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
79 				char *dirbuf, int dirbuflen, off_t *off,
80 				char **cpos, int *len, int *eofflag,
81 				struct thread *td);
82 static int	dirent_exists(struct vnode *vp, const char *dirname,
83 			      struct thread *td);
84 
85 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
86 
87 static int vop_stdis_text(struct vop_is_text_args *ap);
88 static int vop_stdunset_text(struct vop_unset_text_args *ap);
89 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap);
90 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap);
91 static int vop_stdfdatasync(struct vop_fdatasync_args *ap);
92 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap);
93 static int vop_stdread_pgcache(struct vop_read_pgcache_args *ap);
94 static int vop_stdstat(struct vop_stat_args *ap);
95 static int vop_stdvput_pair(struct vop_vput_pair_args *ap);
96 
97 /*
98  * This vnode table stores what we want to do if the filesystem doesn't
99  * implement a particular VOP.
100  *
101  * If there is no specific entry here, we will return EOPNOTSUPP.
102  *
103  * Note that every filesystem has to implement either vop_access
104  * or vop_accessx; failing to do so will result in immediate crash
105  * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
106  * which calls vop_stdaccess() etc.
107  */
108 
109 struct vop_vector default_vnodeops = {
110 	.vop_default =		NULL,
111 	.vop_bypass =		VOP_EOPNOTSUPP,
112 
113 	.vop_access =		vop_stdaccess,
114 	.vop_accessx =		vop_stdaccessx,
115 	.vop_advise =		vop_stdadvise,
116 	.vop_advlock =		vop_stdadvlock,
117 	.vop_advlockasync =	vop_stdadvlockasync,
118 	.vop_advlockpurge =	vop_stdadvlockpurge,
119 	.vop_allocate =		vop_stdallocate,
120 	.vop_bmap =		vop_stdbmap,
121 	.vop_close =		VOP_NULL,
122 	.vop_fsync =		VOP_NULL,
123 	.vop_stat =		vop_stdstat,
124 	.vop_fdatasync =	vop_stdfdatasync,
125 	.vop_getpages =		vop_stdgetpages,
126 	.vop_getpages_async =	vop_stdgetpages_async,
127 	.vop_getwritemount = 	vop_stdgetwritemount,
128 	.vop_inactive =		VOP_NULL,
129 	.vop_need_inactive =	vop_stdneed_inactive,
130 	.vop_ioctl =		vop_stdioctl,
131 	.vop_kqfilter =		vop_stdkqfilter,
132 	.vop_islocked =		vop_stdislocked,
133 	.vop_lock1 =		vop_stdlock,
134 	.vop_lookup =		vop_nolookup,
135 	.vop_open =		VOP_NULL,
136 	.vop_pathconf =		VOP_EINVAL,
137 	.vop_poll =		vop_nopoll,
138 	.vop_putpages =		vop_stdputpages,
139 	.vop_readlink =		VOP_EINVAL,
140 	.vop_read_pgcache =	vop_stdread_pgcache,
141 	.vop_rename =		vop_norename,
142 	.vop_revoke =		VOP_PANIC,
143 	.vop_strategy =		vop_nostrategy,
144 	.vop_unlock =		vop_stdunlock,
145 	.vop_vptocnp =		vop_stdvptocnp,
146 	.vop_vptofh =		vop_stdvptofh,
147 	.vop_unp_bind =		vop_stdunp_bind,
148 	.vop_unp_connect =	vop_stdunp_connect,
149 	.vop_unp_detach =	vop_stdunp_detach,
150 	.vop_is_text =		vop_stdis_text,
151 	.vop_set_text =		vop_stdset_text,
152 	.vop_unset_text =	vop_stdunset_text,
153 	.vop_add_writecount =	vop_stdadd_writecount,
154 	.vop_copy_file_range =	vop_stdcopy_file_range,
155 	.vop_vput_pair =	vop_stdvput_pair,
156 };
157 VFS_VOP_VECTOR_REGISTER(default_vnodeops);
158 
159 /*
160  * Series of placeholder functions for various error returns for
161  * VOPs.
162  */
163 
164 int
165 vop_eopnotsupp(struct vop_generic_args *ap)
166 {
167 	/*
168 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
169 	*/
170 
171 	return (EOPNOTSUPP);
172 }
173 
174 int
175 vop_ebadf(struct vop_generic_args *ap)
176 {
177 
178 	return (EBADF);
179 }
180 
181 int
182 vop_enotty(struct vop_generic_args *ap)
183 {
184 
185 	return (ENOTTY);
186 }
187 
188 int
189 vop_einval(struct vop_generic_args *ap)
190 {
191 
192 	return (EINVAL);
193 }
194 
195 int
196 vop_enoent(struct vop_generic_args *ap)
197 {
198 
199 	return (ENOENT);
200 }
201 
202 int
203 vop_eagain(struct vop_generic_args *ap)
204 {
205 
206 	return (EAGAIN);
207 }
208 
209 int
210 vop_null(struct vop_generic_args *ap)
211 {
212 
213 	return (0);
214 }
215 
216 /*
217  * Helper function to panic on some bad VOPs in some filesystems.
218  */
219 int
220 vop_panic(struct vop_generic_args *ap)
221 {
222 
223 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
224 }
225 
226 /*
227  * vop_std<something> and vop_no<something> are default functions for use by
228  * filesystems that need the "default reasonable" implementation for a
229  * particular operation.
230  *
231  * The documentation for the operations they implement exists (if it exists)
232  * in the VOP_<SOMETHING>(9) manpage (all uppercase).
233  */
234 
235 /*
236  * Default vop for filesystems that do not support name lookup
237  */
238 static int
239 vop_nolookup(ap)
240 	struct vop_lookup_args /* {
241 		struct vnode *a_dvp;
242 		struct vnode **a_vpp;
243 		struct componentname *a_cnp;
244 	} */ *ap;
245 {
246 
247 	*ap->a_vpp = NULL;
248 	return (ENOTDIR);
249 }
250 
251 /*
252  * vop_norename:
253  *
254  * Handle unlock and reference counting for arguments of vop_rename
255  * for filesystems that do not implement rename operation.
256  */
257 static int
258 vop_norename(struct vop_rename_args *ap)
259 {
260 
261 	vop_rename_fail(ap);
262 	return (EOPNOTSUPP);
263 }
264 
265 /*
266  *	vop_nostrategy:
267  *
268  *	Strategy routine for VFS devices that have none.
269  *
270  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
271  *	routine.  Typically this is done for a BIO_READ strategy call.
272  *	Typically B_INVAL is assumed to already be clear prior to a write
273  *	and should not be cleared manually unless you just made the buffer
274  *	invalid.  BIO_ERROR should be cleared either way.
275  */
276 
277 static int
278 vop_nostrategy (struct vop_strategy_args *ap)
279 {
280 	printf("No strategy for buffer at %p\n", ap->a_bp);
281 	vn_printf(ap->a_vp, "vnode ");
282 	ap->a_bp->b_ioflags |= BIO_ERROR;
283 	ap->a_bp->b_error = EOPNOTSUPP;
284 	bufdone(ap->a_bp);
285 	return (EOPNOTSUPP);
286 }
287 
288 static int
289 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
290 		int dirbuflen, off_t *off, char **cpos, int *len,
291 		int *eofflag, struct thread *td)
292 {
293 	int error, reclen;
294 	struct uio uio;
295 	struct iovec iov;
296 	struct dirent *dp;
297 
298 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
299 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
300 
301 	if (*len == 0) {
302 		iov.iov_base = dirbuf;
303 		iov.iov_len = dirbuflen;
304 
305 		uio.uio_iov = &iov;
306 		uio.uio_iovcnt = 1;
307 		uio.uio_offset = *off;
308 		uio.uio_resid = dirbuflen;
309 		uio.uio_segflg = UIO_SYSSPACE;
310 		uio.uio_rw = UIO_READ;
311 		uio.uio_td = td;
312 
313 		*eofflag = 0;
314 
315 #ifdef MAC
316 		error = mac_vnode_check_readdir(td->td_ucred, vp);
317 		if (error == 0)
318 #endif
319 			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
320 		    		NULL, NULL);
321 		if (error)
322 			return (error);
323 
324 		*off = uio.uio_offset;
325 
326 		*cpos = dirbuf;
327 		*len = (dirbuflen - uio.uio_resid);
328 
329 		if (*len == 0)
330 			return (ENOENT);
331 	}
332 
333 	dp = (struct dirent *)(*cpos);
334 	reclen = dp->d_reclen;
335 	*dpp = dp;
336 
337 	/* check for malformed directory.. */
338 	if (reclen < DIRENT_MINSIZE)
339 		return (EINVAL);
340 
341 	*cpos += reclen;
342 	*len -= reclen;
343 
344 	return (0);
345 }
346 
347 /*
348  * Check if a named file exists in a given directory vnode.
349  */
350 static int
351 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
352 {
353 	char *dirbuf, *cpos;
354 	int error, eofflag, dirbuflen, len, found;
355 	off_t off;
356 	struct dirent *dp;
357 	struct vattr va;
358 
359 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
360 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
361 
362 	found = 0;
363 
364 	error = VOP_GETATTR(vp, &va, td->td_ucred);
365 	if (error)
366 		return (found);
367 
368 	dirbuflen = DEV_BSIZE;
369 	if (dirbuflen < va.va_blocksize)
370 		dirbuflen = va.va_blocksize;
371 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
372 
373 	off = 0;
374 	len = 0;
375 	do {
376 		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
377 					&cpos, &len, &eofflag, td);
378 		if (error)
379 			goto out;
380 
381 		if (dp->d_type != DT_WHT && dp->d_fileno != 0 &&
382 		    strcmp(dp->d_name, dirname) == 0) {
383 			found = 1;
384 			goto out;
385 		}
386 	} while (len > 0 || !eofflag);
387 
388 out:
389 	free(dirbuf, M_TEMP);
390 	return (found);
391 }
392 
393 int
394 vop_stdaccess(struct vop_access_args *ap)
395 {
396 
397 	KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
398 	    VAPPEND)) == 0, ("invalid bit in accmode"));
399 
400 	return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
401 }
402 
403 int
404 vop_stdaccessx(struct vop_accessx_args *ap)
405 {
406 	int error;
407 	accmode_t accmode = ap->a_accmode;
408 
409 	error = vfs_unixify_accmode(&accmode);
410 	if (error != 0)
411 		return (error);
412 
413 	if (accmode == 0)
414 		return (0);
415 
416 	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
417 }
418 
419 /*
420  * Advisory record locking support
421  */
422 int
423 vop_stdadvlock(struct vop_advlock_args *ap)
424 {
425 	struct vnode *vp;
426 	struct vattr vattr;
427 	int error;
428 
429 	vp = ap->a_vp;
430 	if (ap->a_fl->l_whence == SEEK_END) {
431 		/*
432 		 * The NFSv4 server must avoid doing a vn_lock() here, since it
433 		 * can deadlock the nfsd threads, due to a LOR.  Fortunately
434 		 * the NFSv4 server always uses SEEK_SET and this code is
435 		 * only required for the SEEK_END case.
436 		 */
437 		vn_lock(vp, LK_SHARED | LK_RETRY);
438 		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
439 		VOP_UNLOCK(vp);
440 		if (error)
441 			return (error);
442 	} else
443 		vattr.va_size = 0;
444 
445 	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
446 }
447 
448 int
449 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
450 {
451 	struct vnode *vp;
452 	struct vattr vattr;
453 	int error;
454 
455 	vp = ap->a_vp;
456 	if (ap->a_fl->l_whence == SEEK_END) {
457 		/* The size argument is only needed for SEEK_END. */
458 		vn_lock(vp, LK_SHARED | LK_RETRY);
459 		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
460 		VOP_UNLOCK(vp);
461 		if (error)
462 			return (error);
463 	} else
464 		vattr.va_size = 0;
465 
466 	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
467 }
468 
469 int
470 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
471 {
472 	struct vnode *vp;
473 
474 	vp = ap->a_vp;
475 	lf_purgelocks(vp, &vp->v_lockf);
476 	return (0);
477 }
478 
479 /*
480  * vop_stdpathconf:
481  *
482  * Standard implementation of POSIX pathconf, to get information about limits
483  * for a filesystem.
484  * Override per filesystem for the case where the filesystem has smaller
485  * limits.
486  */
487 int
488 vop_stdpathconf(ap)
489 	struct vop_pathconf_args /* {
490 	struct vnode *a_vp;
491 	int a_name;
492 	int *a_retval;
493 	} */ *ap;
494 {
495 
496 	switch (ap->a_name) {
497 		case _PC_ASYNC_IO:
498 			*ap->a_retval = _POSIX_ASYNCHRONOUS_IO;
499 			return (0);
500 		case _PC_PATH_MAX:
501 			*ap->a_retval = PATH_MAX;
502 			return (0);
503 		case _PC_ACL_EXTENDED:
504 		case _PC_ACL_NFS4:
505 		case _PC_CAP_PRESENT:
506 		case _PC_INF_PRESENT:
507 		case _PC_MAC_PRESENT:
508 			*ap->a_retval = 0;
509 			return (0);
510 		default:
511 			return (EINVAL);
512 	}
513 	/* NOTREACHED */
514 }
515 
516 /*
517  * Standard lock, unlock and islocked functions.
518  */
519 int
520 vop_stdlock(ap)
521 	struct vop_lock1_args /* {
522 		struct vnode *a_vp;
523 		int a_flags;
524 		char *file;
525 		int line;
526 	} */ *ap;
527 {
528 	struct vnode *vp = ap->a_vp;
529 	struct mtx *ilk;
530 
531 	ilk = VI_MTX(vp);
532 	return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags,
533 	    &ilk->lock_object, ap->a_file, ap->a_line));
534 }
535 
536 /* See above. */
537 int
538 vop_stdunlock(ap)
539 	struct vop_unlock_args /* {
540 		struct vnode *a_vp;
541 	} */ *ap;
542 {
543 	struct vnode *vp = ap->a_vp;
544 
545 	return (lockmgr_unlock(vp->v_vnlock));
546 }
547 
548 /* See above. */
549 int
550 vop_stdislocked(ap)
551 	struct vop_islocked_args /* {
552 		struct vnode *a_vp;
553 	} */ *ap;
554 {
555 
556 	return (lockstatus(ap->a_vp->v_vnlock));
557 }
558 
559 /*
560  * Variants of the above set.
561  *
562  * Differences are:
563  * - shared locking disablement is not supported
564  * - v_vnlock pointer is not honored
565  */
566 int
567 vop_lock(ap)
568 	struct vop_lock1_args /* {
569 		struct vnode *a_vp;
570 		int a_flags;
571 		char *file;
572 		int line;
573 	} */ *ap;
574 {
575 	struct vnode *vp = ap->a_vp;
576 	int flags = ap->a_flags;
577 	struct mtx *ilk;
578 
579 	MPASS(vp->v_vnlock == &vp->v_lock);
580 
581 	if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0))
582 		goto other;
583 
584 	switch (flags & LK_TYPE_MASK) {
585 	case LK_SHARED:
586 		return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line));
587 	case LK_EXCLUSIVE:
588 		return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line));
589 	}
590 other:
591 	ilk = VI_MTX(vp);
592 	return (lockmgr_lock_flags(&vp->v_lock, flags,
593 	    &ilk->lock_object, ap->a_file, ap->a_line));
594 }
595 
596 int
597 vop_unlock(ap)
598 	struct vop_unlock_args /* {
599 		struct vnode *a_vp;
600 	} */ *ap;
601 {
602 	struct vnode *vp = ap->a_vp;
603 
604 	MPASS(vp->v_vnlock == &vp->v_lock);
605 
606 	return (lockmgr_unlock(&vp->v_lock));
607 }
608 
609 int
610 vop_islocked(ap)
611 	struct vop_islocked_args /* {
612 		struct vnode *a_vp;
613 	} */ *ap;
614 {
615 	struct vnode *vp = ap->a_vp;
616 
617 	MPASS(vp->v_vnlock == &vp->v_lock);
618 
619 	return (lockstatus(&vp->v_lock));
620 }
621 
622 /*
623  * Return true for select/poll.
624  */
625 int
626 vop_nopoll(ap)
627 	struct vop_poll_args /* {
628 		struct vnode *a_vp;
629 		int  a_events;
630 		struct ucred *a_cred;
631 		struct thread *a_td;
632 	} */ *ap;
633 {
634 
635 	if (ap->a_events & ~POLLSTANDARD)
636 		return (POLLNVAL);
637 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
638 }
639 
640 /*
641  * Implement poll for local filesystems that support it.
642  */
643 int
644 vop_stdpoll(ap)
645 	struct vop_poll_args /* {
646 		struct vnode *a_vp;
647 		int  a_events;
648 		struct ucred *a_cred;
649 		struct thread *a_td;
650 	} */ *ap;
651 {
652 	if (ap->a_events & ~POLLSTANDARD)
653 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
654 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
655 }
656 
657 /*
658  * Return our mount point, as we will take charge of the writes.
659  */
660 int
661 vop_stdgetwritemount(ap)
662 	struct vop_getwritemount_args /* {
663 		struct vnode *a_vp;
664 		struct mount **a_mpp;
665 	} */ *ap;
666 {
667 	struct mount *mp;
668 	struct mount_pcpu *mpcpu;
669 	struct vnode *vp;
670 
671 	/*
672 	 * Note that having a reference does not prevent forced unmount from
673 	 * setting ->v_mount to NULL after the lock gets released. This is of
674 	 * no consequence for typical consumers (most notably vn_start_write)
675 	 * since in this case the vnode is VIRF_DOOMED. Unmount might have
676 	 * progressed far enough that its completion is only delayed by the
677 	 * reference obtained here. The consumer only needs to concern itself
678 	 * with releasing it.
679 	 */
680 	vp = ap->a_vp;
681 	mp = vp->v_mount;
682 	if (mp == NULL) {
683 		*(ap->a_mpp) = NULL;
684 		return (0);
685 	}
686 	if (vfs_op_thread_enter(mp, mpcpu)) {
687 		if (mp == vp->v_mount) {
688 			vfs_mp_count_add_pcpu(mpcpu, ref, 1);
689 			vfs_op_thread_exit(mp, mpcpu);
690 		} else {
691 			vfs_op_thread_exit(mp, mpcpu);
692 			mp = NULL;
693 		}
694 	} else {
695 		MNT_ILOCK(mp);
696 		if (mp == vp->v_mount) {
697 			MNT_REF(mp);
698 			MNT_IUNLOCK(mp);
699 		} else {
700 			MNT_IUNLOCK(mp);
701 			mp = NULL;
702 		}
703 	}
704 	*(ap->a_mpp) = mp;
705 	return (0);
706 }
707 
708 /*
709  * If the file system doesn't implement VOP_BMAP, then return sensible defaults:
710  * - Return the vnode's bufobj instead of any underlying device's bufobj
711  * - Calculate the physical block number as if there were equal size
712  *   consecutive blocks, but
713  * - Report no contiguous runs of blocks.
714  */
715 int
716 vop_stdbmap(ap)
717 	struct vop_bmap_args /* {
718 		struct vnode *a_vp;
719 		daddr_t  a_bn;
720 		struct bufobj **a_bop;
721 		daddr_t *a_bnp;
722 		int *a_runp;
723 		int *a_runb;
724 	} */ *ap;
725 {
726 
727 	if (ap->a_bop != NULL)
728 		*ap->a_bop = &ap->a_vp->v_bufobj;
729 	if (ap->a_bnp != NULL)
730 		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
731 	if (ap->a_runp != NULL)
732 		*ap->a_runp = 0;
733 	if (ap->a_runb != NULL)
734 		*ap->a_runb = 0;
735 	return (0);
736 }
737 
738 int
739 vop_stdfsync(ap)
740 	struct vop_fsync_args /* {
741 		struct vnode *a_vp;
742 		int a_waitfor;
743 		struct thread *a_td;
744 	} */ *ap;
745 {
746 
747 	return (vn_fsync_buf(ap->a_vp, ap->a_waitfor));
748 }
749 
750 static int
751 vop_stdfdatasync(struct vop_fdatasync_args *ap)
752 {
753 
754 	return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td));
755 }
756 
757 int
758 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap)
759 {
760 
761 	return (vn_fsync_buf(ap->a_vp, MNT_WAIT));
762 }
763 
764 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
765 int
766 vop_stdgetpages(ap)
767 	struct vop_getpages_args /* {
768 		struct vnode *a_vp;
769 		vm_page_t *a_m;
770 		int a_count;
771 		int *a_rbehind;
772 		int *a_rahead;
773 	} */ *ap;
774 {
775 
776 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
777 	    ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL);
778 }
779 
780 static int
781 vop_stdgetpages_async(struct vop_getpages_async_args *ap)
782 {
783 	int error;
784 
785 	error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
786 	    ap->a_rahead);
787 	if (ap->a_iodone != NULL)
788 		ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
789 	return (error);
790 }
791 
792 int
793 vop_stdkqfilter(struct vop_kqfilter_args *ap)
794 {
795 	return vfs_kqfilter(ap);
796 }
797 
798 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
799 int
800 vop_stdputpages(ap)
801 	struct vop_putpages_args /* {
802 		struct vnode *a_vp;
803 		vm_page_t *a_m;
804 		int a_count;
805 		int a_sync;
806 		int *a_rtvals;
807 	} */ *ap;
808 {
809 
810 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
811 	     ap->a_sync, ap->a_rtvals);
812 }
813 
814 int
815 vop_stdvptofh(struct vop_vptofh_args *ap)
816 {
817 	return (EOPNOTSUPP);
818 }
819 
820 int
821 vop_stdvptocnp(struct vop_vptocnp_args *ap)
822 {
823 	struct vnode *vp = ap->a_vp;
824 	struct vnode **dvp = ap->a_vpp;
825 	struct ucred *cred;
826 	char *buf = ap->a_buf;
827 	size_t *buflen = ap->a_buflen;
828 	char *dirbuf, *cpos;
829 	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
830 	off_t off;
831 	ino_t fileno;
832 	struct vattr va;
833 	struct nameidata nd;
834 	struct thread *td;
835 	struct dirent *dp;
836 	struct vnode *mvp;
837 
838 	i = *buflen;
839 	error = 0;
840 	covered = 0;
841 	td = curthread;
842 	cred = td->td_ucred;
843 
844 	if (vp->v_type != VDIR)
845 		return (ENOENT);
846 
847 	error = VOP_GETATTR(vp, &va, cred);
848 	if (error)
849 		return (error);
850 
851 	VREF(vp);
852 	locked = VOP_ISLOCKED(vp);
853 	VOP_UNLOCK(vp);
854 	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE,
855 	    "..", vp, td);
856 	flags = FREAD;
857 	error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
858 	if (error) {
859 		vn_lock(vp, locked | LK_RETRY);
860 		return (error);
861 	}
862 	NDFREE(&nd, NDF_ONLY_PNBUF);
863 
864 	mvp = *dvp = nd.ni_vp;
865 
866 	if (vp->v_mount != (*dvp)->v_mount &&
867 	    ((*dvp)->v_vflag & VV_ROOT) &&
868 	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
869 		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
870 		VREF(mvp);
871 		VOP_UNLOCK(mvp);
872 		vn_close(mvp, FREAD, cred, td);
873 		VREF(*dvp);
874 		vn_lock(*dvp, LK_SHARED | LK_RETRY);
875 		covered = 1;
876 	}
877 
878 	fileno = va.va_fileid;
879 
880 	dirbuflen = DEV_BSIZE;
881 	if (dirbuflen < va.va_blocksize)
882 		dirbuflen = va.va_blocksize;
883 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
884 
885 	if ((*dvp)->v_type != VDIR) {
886 		error = ENOENT;
887 		goto out;
888 	}
889 
890 	off = 0;
891 	len = 0;
892 	do {
893 		/* call VOP_READDIR of parent */
894 		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
895 					&cpos, &len, &eofflag, td);
896 		if (error)
897 			goto out;
898 
899 		if ((dp->d_type != DT_WHT) &&
900 		    (dp->d_fileno == fileno)) {
901 			if (covered) {
902 				VOP_UNLOCK(*dvp);
903 				vn_lock(mvp, LK_SHARED | LK_RETRY);
904 				if (dirent_exists(mvp, dp->d_name, td)) {
905 					error = ENOENT;
906 					VOP_UNLOCK(mvp);
907 					vn_lock(*dvp, LK_SHARED | LK_RETRY);
908 					goto out;
909 				}
910 				VOP_UNLOCK(mvp);
911 				vn_lock(*dvp, LK_SHARED | LK_RETRY);
912 			}
913 			i -= dp->d_namlen;
914 
915 			if (i < 0) {
916 				error = ENOMEM;
917 				goto out;
918 			}
919 			if (dp->d_namlen == 1 && dp->d_name[0] == '.') {
920 				error = ENOENT;
921 			} else {
922 				bcopy(dp->d_name, buf + i, dp->d_namlen);
923 				error = 0;
924 			}
925 			goto out;
926 		}
927 	} while (len > 0 || !eofflag);
928 	error = ENOENT;
929 
930 out:
931 	free(dirbuf, M_TEMP);
932 	if (!error) {
933 		*buflen = i;
934 		vref(*dvp);
935 	}
936 	if (covered) {
937 		vput(*dvp);
938 		vrele(mvp);
939 	} else {
940 		VOP_UNLOCK(mvp);
941 		vn_close(mvp, FREAD, cred, td);
942 	}
943 	vn_lock(vp, locked | LK_RETRY);
944 	return (error);
945 }
946 
947 int
948 vop_stdallocate(struct vop_allocate_args *ap)
949 {
950 #ifdef __notyet__
951 	struct statfs *sfs;
952 	off_t maxfilesize = 0;
953 #endif
954 	struct iovec aiov;
955 	struct vattr vattr, *vap;
956 	struct uio auio;
957 	off_t fsize, len, cur, offset;
958 	uint8_t *buf;
959 	struct thread *td;
960 	struct vnode *vp;
961 	size_t iosize;
962 	int error;
963 
964 	buf = NULL;
965 	error = 0;
966 	td = curthread;
967 	vap = &vattr;
968 	vp = ap->a_vp;
969 	len = *ap->a_len;
970 	offset = *ap->a_offset;
971 
972 	error = VOP_GETATTR(vp, vap, td->td_ucred);
973 	if (error != 0)
974 		goto out;
975 	fsize = vap->va_size;
976 	iosize = vap->va_blocksize;
977 	if (iosize == 0)
978 		iosize = BLKDEV_IOSIZE;
979 	if (iosize > maxphys)
980 		iosize = maxphys;
981 	buf = malloc(iosize, M_TEMP, M_WAITOK);
982 
983 #ifdef __notyet__
984 	/*
985 	 * Check if the filesystem sets f_maxfilesize; if not use
986 	 * VOP_SETATTR to perform the check.
987 	 */
988 	sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
989 	error = VFS_STATFS(vp->v_mount, sfs, td);
990 	if (error == 0)
991 		maxfilesize = sfs->f_maxfilesize;
992 	free(sfs, M_STATFS);
993 	if (error != 0)
994 		goto out;
995 	if (maxfilesize) {
996 		if (offset > maxfilesize || len > maxfilesize ||
997 		    offset + len > maxfilesize) {
998 			error = EFBIG;
999 			goto out;
1000 		}
1001 	} else
1002 #endif
1003 	if (offset + len > vap->va_size) {
1004 		/*
1005 		 * Test offset + len against the filesystem's maxfilesize.
1006 		 */
1007 		VATTR_NULL(vap);
1008 		vap->va_size = offset + len;
1009 		error = VOP_SETATTR(vp, vap, td->td_ucred);
1010 		if (error != 0)
1011 			goto out;
1012 		VATTR_NULL(vap);
1013 		vap->va_size = fsize;
1014 		error = VOP_SETATTR(vp, vap, td->td_ucred);
1015 		if (error != 0)
1016 			goto out;
1017 	}
1018 
1019 	for (;;) {
1020 		/*
1021 		 * Read and write back anything below the nominal file
1022 		 * size.  There's currently no way outside the filesystem
1023 		 * to know whether this area is sparse or not.
1024 		 */
1025 		cur = iosize;
1026 		if ((offset % iosize) != 0)
1027 			cur -= (offset % iosize);
1028 		if (cur > len)
1029 			cur = len;
1030 		if (offset < fsize) {
1031 			aiov.iov_base = buf;
1032 			aiov.iov_len = cur;
1033 			auio.uio_iov = &aiov;
1034 			auio.uio_iovcnt = 1;
1035 			auio.uio_offset = offset;
1036 			auio.uio_resid = cur;
1037 			auio.uio_segflg = UIO_SYSSPACE;
1038 			auio.uio_rw = UIO_READ;
1039 			auio.uio_td = td;
1040 			error = VOP_READ(vp, &auio, 0, td->td_ucred);
1041 			if (error != 0)
1042 				break;
1043 			if (auio.uio_resid > 0) {
1044 				bzero(buf + cur - auio.uio_resid,
1045 				    auio.uio_resid);
1046 			}
1047 		} else {
1048 			bzero(buf, cur);
1049 		}
1050 
1051 		aiov.iov_base = buf;
1052 		aiov.iov_len = cur;
1053 		auio.uio_iov = &aiov;
1054 		auio.uio_iovcnt = 1;
1055 		auio.uio_offset = offset;
1056 		auio.uio_resid = cur;
1057 		auio.uio_segflg = UIO_SYSSPACE;
1058 		auio.uio_rw = UIO_WRITE;
1059 		auio.uio_td = td;
1060 
1061 		error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
1062 		if (error != 0)
1063 			break;
1064 
1065 		len -= cur;
1066 		offset += cur;
1067 		if (len == 0)
1068 			break;
1069 		if (should_yield())
1070 			break;
1071 	}
1072 
1073  out:
1074 	*ap->a_len = len;
1075 	*ap->a_offset = offset;
1076 	free(buf, M_TEMP);
1077 	return (error);
1078 }
1079 
1080 int
1081 vop_stdadvise(struct vop_advise_args *ap)
1082 {
1083 	struct vnode *vp;
1084 	struct bufobj *bo;
1085 	daddr_t startn, endn;
1086 	off_t bstart, bend, start, end;
1087 	int bsize, error;
1088 
1089 	vp = ap->a_vp;
1090 	switch (ap->a_advice) {
1091 	case POSIX_FADV_WILLNEED:
1092 		/*
1093 		 * Do nothing for now.  Filesystems should provide a
1094 		 * custom method which starts an asynchronous read of
1095 		 * the requested region.
1096 		 */
1097 		error = 0;
1098 		break;
1099 	case POSIX_FADV_DONTNEED:
1100 		error = 0;
1101 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1102 		if (VN_IS_DOOMED(vp)) {
1103 			VOP_UNLOCK(vp);
1104 			break;
1105 		}
1106 
1107 		/*
1108 		 * Round to block boundaries (and later possibly further to
1109 		 * page boundaries).  Applications cannot reasonably be aware
1110 		 * of the boundaries, and the rounding must be to expand at
1111 		 * both extremities to cover enough.  It still doesn't cover
1112 		 * read-ahead.  For partial blocks, this gives unnecessary
1113 		 * discarding of buffers but is efficient enough since the
1114 		 * pages usually remain in VMIO for some time.
1115 		 */
1116 		bsize = vp->v_bufobj.bo_bsize;
1117 		bstart = rounddown(ap->a_start, bsize);
1118 		bend = roundup(ap->a_end, bsize);
1119 
1120 		/*
1121 		 * Deactivate pages in the specified range from the backing VM
1122 		 * object.  Pages that are resident in the buffer cache will
1123 		 * remain wired until their corresponding buffers are released
1124 		 * below.
1125 		 */
1126 		if (vp->v_object != NULL) {
1127 			start = trunc_page(bstart);
1128 			end = round_page(bend);
1129 			VM_OBJECT_RLOCK(vp->v_object);
1130 			vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start),
1131 			    OFF_TO_IDX(end));
1132 			VM_OBJECT_RUNLOCK(vp->v_object);
1133 		}
1134 
1135 		bo = &vp->v_bufobj;
1136 		BO_RLOCK(bo);
1137 		startn = bstart / bsize;
1138 		endn = bend / bsize;
1139 		error = bnoreuselist(&bo->bo_clean, bo, startn, endn);
1140 		if (error == 0)
1141 			error = bnoreuselist(&bo->bo_dirty, bo, startn, endn);
1142 		BO_RUNLOCK(bo);
1143 		VOP_UNLOCK(vp);
1144 		break;
1145 	default:
1146 		error = EINVAL;
1147 		break;
1148 	}
1149 	return (error);
1150 }
1151 
1152 int
1153 vop_stdunp_bind(struct vop_unp_bind_args *ap)
1154 {
1155 
1156 	ap->a_vp->v_unpcb = ap->a_unpcb;
1157 	return (0);
1158 }
1159 
1160 int
1161 vop_stdunp_connect(struct vop_unp_connect_args *ap)
1162 {
1163 
1164 	*ap->a_unpcb = ap->a_vp->v_unpcb;
1165 	return (0);
1166 }
1167 
1168 int
1169 vop_stdunp_detach(struct vop_unp_detach_args *ap)
1170 {
1171 
1172 	ap->a_vp->v_unpcb = NULL;
1173 	return (0);
1174 }
1175 
1176 static int
1177 vop_stdis_text(struct vop_is_text_args *ap)
1178 {
1179 
1180 	return (ap->a_vp->v_writecount < 0);
1181 }
1182 
1183 int
1184 vop_stdset_text(struct vop_set_text_args *ap)
1185 {
1186 	struct vnode *vp;
1187 	struct mount *mp;
1188 	int error;
1189 
1190 	vp = ap->a_vp;
1191 	VI_LOCK(vp);
1192 	if (vp->v_writecount > 0) {
1193 		error = ETXTBSY;
1194 	} else {
1195 		/*
1196 		 * If requested by fs, keep a use reference to the
1197 		 * vnode until the last text reference is released.
1198 		 */
1199 		mp = vp->v_mount;
1200 		if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 &&
1201 		    vp->v_writecount == 0) {
1202 			VNPASS((vp->v_iflag & VI_TEXT_REF) == 0, vp);
1203 			vp->v_iflag |= VI_TEXT_REF;
1204 			vrefl(vp);
1205 		}
1206 
1207 		vp->v_writecount--;
1208 		error = 0;
1209 	}
1210 	VI_UNLOCK(vp);
1211 	return (error);
1212 }
1213 
1214 static int
1215 vop_stdunset_text(struct vop_unset_text_args *ap)
1216 {
1217 	struct vnode *vp;
1218 	int error;
1219 	bool last;
1220 
1221 	vp = ap->a_vp;
1222 	last = false;
1223 	VI_LOCK(vp);
1224 	if (vp->v_writecount < 0) {
1225 		if ((vp->v_iflag & VI_TEXT_REF) != 0 &&
1226 		    vp->v_writecount == -1) {
1227 			last = true;
1228 			vp->v_iflag &= ~VI_TEXT_REF;
1229 		}
1230 		vp->v_writecount++;
1231 		error = 0;
1232 	} else {
1233 		error = EINVAL;
1234 	}
1235 	VI_UNLOCK(vp);
1236 	if (last)
1237 		vunref(vp);
1238 	return (error);
1239 }
1240 
1241 static int
1242 vop_stdadd_writecount(struct vop_add_writecount_args *ap)
1243 {
1244 	struct vnode *vp;
1245 	struct mount *mp;
1246 	int error;
1247 
1248 	vp = ap->a_vp;
1249 	VI_LOCK_FLAGS(vp, MTX_DUPOK);
1250 	if (vp->v_writecount < 0) {
1251 		error = ETXTBSY;
1252 	} else {
1253 		VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
1254 		    ("neg writecount increment %d", ap->a_inc));
1255 		if (vp->v_writecount == 0) {
1256 			mp = vp->v_mount;
1257 			if (mp != NULL && (mp->mnt_kern_flag & MNTK_NOMSYNC) == 0)
1258 				vlazy(vp);
1259 		}
1260 		vp->v_writecount += ap->a_inc;
1261 		error = 0;
1262 	}
1263 	VI_UNLOCK(vp);
1264 	return (error);
1265 }
1266 
1267 int
1268 vop_stdneed_inactive(struct vop_need_inactive_args *ap)
1269 {
1270 
1271 	return (1);
1272 }
1273 
1274 int
1275 vop_stdioctl(struct vop_ioctl_args *ap)
1276 {
1277 	struct vnode *vp;
1278 	struct vattr va;
1279 	off_t *offp;
1280 	int error;
1281 
1282 	switch (ap->a_command) {
1283 	case FIOSEEKDATA:
1284 	case FIOSEEKHOLE:
1285 		vp = ap->a_vp;
1286 		error = vn_lock(vp, LK_SHARED);
1287 		if (error != 0)
1288 			return (EBADF);
1289 		if (vp->v_type == VREG)
1290 			error = VOP_GETATTR(vp, &va, ap->a_cred);
1291 		else
1292 			error = ENOTTY;
1293 		if (error == 0) {
1294 			offp = ap->a_data;
1295 			if (*offp < 0 || *offp >= va.va_size)
1296 				error = ENXIO;
1297 			else if (ap->a_command == FIOSEEKHOLE)
1298 				*offp = va.va_size;
1299 		}
1300 		VOP_UNLOCK(vp);
1301 		break;
1302 	default:
1303 		error = ENOTTY;
1304 		break;
1305 	}
1306 	return (error);
1307 }
1308 
1309 /*
1310  * vfs default ops
1311  * used to fill the vfs function table to get reasonable default return values.
1312  */
1313 int
1314 vfs_stdroot (mp, flags, vpp)
1315 	struct mount *mp;
1316 	int flags;
1317 	struct vnode **vpp;
1318 {
1319 
1320 	return (EOPNOTSUPP);
1321 }
1322 
1323 int
1324 vfs_stdstatfs (mp, sbp)
1325 	struct mount *mp;
1326 	struct statfs *sbp;
1327 {
1328 
1329 	return (EOPNOTSUPP);
1330 }
1331 
1332 int
1333 vfs_stdquotactl (mp, cmds, uid, arg)
1334 	struct mount *mp;
1335 	int cmds;
1336 	uid_t uid;
1337 	void *arg;
1338 {
1339 
1340 	return (EOPNOTSUPP);
1341 }
1342 
1343 int
1344 vfs_stdsync(mp, waitfor)
1345 	struct mount *mp;
1346 	int waitfor;
1347 {
1348 	struct vnode *vp, *mvp;
1349 	struct thread *td;
1350 	int error, lockreq, allerror = 0;
1351 
1352 	td = curthread;
1353 	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1354 	if (waitfor != MNT_WAIT)
1355 		lockreq |= LK_NOWAIT;
1356 	/*
1357 	 * Force stale buffer cache information to be flushed.
1358 	 */
1359 loop:
1360 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1361 		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1362 			VI_UNLOCK(vp);
1363 			continue;
1364 		}
1365 		if ((error = vget(vp, lockreq)) != 0) {
1366 			if (error == ENOENT) {
1367 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1368 				goto loop;
1369 			}
1370 			continue;
1371 		}
1372 		error = VOP_FSYNC(vp, waitfor, td);
1373 		if (error)
1374 			allerror = error;
1375 		vput(vp);
1376 	}
1377 	return (allerror);
1378 }
1379 
1380 int
1381 vfs_stdnosync (mp, waitfor)
1382 	struct mount *mp;
1383 	int waitfor;
1384 {
1385 
1386 	return (0);
1387 }
1388 
1389 static int
1390 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap)
1391 {
1392 	int error;
1393 
1394 	error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp,
1395 	    ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred,
1396 	    ap->a_outcred, ap->a_fsizetd);
1397 	return (error);
1398 }
1399 
1400 int
1401 vfs_stdvget (mp, ino, flags, vpp)
1402 	struct mount *mp;
1403 	ino_t ino;
1404 	int flags;
1405 	struct vnode **vpp;
1406 {
1407 
1408 	return (EOPNOTSUPP);
1409 }
1410 
1411 int
1412 vfs_stdfhtovp (mp, fhp, flags, vpp)
1413 	struct mount *mp;
1414 	struct fid *fhp;
1415 	int flags;
1416 	struct vnode **vpp;
1417 {
1418 
1419 	return (EOPNOTSUPP);
1420 }
1421 
1422 int
1423 vfs_stdinit (vfsp)
1424 	struct vfsconf *vfsp;
1425 {
1426 
1427 	return (0);
1428 }
1429 
1430 int
1431 vfs_stduninit (vfsp)
1432 	struct vfsconf *vfsp;
1433 {
1434 
1435 	return(0);
1436 }
1437 
1438 int
1439 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
1440 	struct mount *mp;
1441 	int cmd;
1442 	struct vnode *filename_vp;
1443 	int attrnamespace;
1444 	const char *attrname;
1445 {
1446 
1447 	if (filename_vp != NULL)
1448 		VOP_UNLOCK(filename_vp);
1449 	return (EOPNOTSUPP);
1450 }
1451 
1452 int
1453 vfs_stdsysctl(mp, op, req)
1454 	struct mount *mp;
1455 	fsctlop_t op;
1456 	struct sysctl_req *req;
1457 {
1458 
1459 	return (EOPNOTSUPP);
1460 }
1461 
1462 static vop_bypass_t *
1463 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a)
1464 {
1465 
1466 	return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset));
1467 }
1468 
1469 int
1470 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a)
1471 {
1472 	vop_bypass_t *bp;
1473 	int prev_stops, rc;
1474 
1475 	bp = bp_by_off(vop, a);
1476 	MPASS(bp != NULL);
1477 
1478 	prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT);
1479 	rc = bp(a);
1480 	sigallowstop(prev_stops);
1481 	return (rc);
1482 }
1483 
1484 static int
1485 vop_stdstat(struct vop_stat_args *a)
1486 {
1487 	struct vattr vattr;
1488 	struct vattr *vap;
1489 	struct vnode *vp;
1490 	struct stat *sb;
1491 	int error;
1492 	u_short mode;
1493 
1494 	vp = a->a_vp;
1495 	sb = a->a_sb;
1496 
1497 	error = vop_stat_helper_pre(a);
1498 	if (error != 0)
1499 		return (error);
1500 
1501 	vap = &vattr;
1502 
1503 	/*
1504 	 * Initialize defaults for new and unusual fields, so that file
1505 	 * systems which don't support these fields don't need to know
1506 	 * about them.
1507 	 */
1508 	vap->va_birthtime.tv_sec = -1;
1509 	vap->va_birthtime.tv_nsec = 0;
1510 	vap->va_fsid = VNOVAL;
1511 	vap->va_rdev = NODEV;
1512 
1513 	error = VOP_GETATTR(vp, vap, a->a_active_cred);
1514 	if (error)
1515 		goto out;
1516 
1517 	/*
1518 	 * Zero the spare stat fields
1519 	 */
1520 	bzero(sb, sizeof *sb);
1521 
1522 	/*
1523 	 * Copy from vattr table
1524 	 */
1525 	if (vap->va_fsid != VNOVAL)
1526 		sb->st_dev = vap->va_fsid;
1527 	else
1528 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1529 	sb->st_ino = vap->va_fileid;
1530 	mode = vap->va_mode;
1531 	switch (vap->va_type) {
1532 	case VREG:
1533 		mode |= S_IFREG;
1534 		break;
1535 	case VDIR:
1536 		mode |= S_IFDIR;
1537 		break;
1538 	case VBLK:
1539 		mode |= S_IFBLK;
1540 		break;
1541 	case VCHR:
1542 		mode |= S_IFCHR;
1543 		break;
1544 	case VLNK:
1545 		mode |= S_IFLNK;
1546 		break;
1547 	case VSOCK:
1548 		mode |= S_IFSOCK;
1549 		break;
1550 	case VFIFO:
1551 		mode |= S_IFIFO;
1552 		break;
1553 	default:
1554 		error = EBADF;
1555 		goto out;
1556 	}
1557 	sb->st_mode = mode;
1558 	sb->st_nlink = vap->va_nlink;
1559 	sb->st_uid = vap->va_uid;
1560 	sb->st_gid = vap->va_gid;
1561 	sb->st_rdev = vap->va_rdev;
1562 	if (vap->va_size > OFF_MAX) {
1563 		error = EOVERFLOW;
1564 		goto out;
1565 	}
1566 	sb->st_size = vap->va_size;
1567 	sb->st_atim.tv_sec = vap->va_atime.tv_sec;
1568 	sb->st_atim.tv_nsec = vap->va_atime.tv_nsec;
1569 	sb->st_mtim.tv_sec = vap->va_mtime.tv_sec;
1570 	sb->st_mtim.tv_nsec = vap->va_mtime.tv_nsec;
1571 	sb->st_ctim.tv_sec = vap->va_ctime.tv_sec;
1572 	sb->st_ctim.tv_nsec = vap->va_ctime.tv_nsec;
1573 	sb->st_birthtim.tv_sec = vap->va_birthtime.tv_sec;
1574 	sb->st_birthtim.tv_nsec = vap->va_birthtime.tv_nsec;
1575 
1576 	/*
1577 	 * According to www.opengroup.org, the meaning of st_blksize is
1578 	 *   "a filesystem-specific preferred I/O block size for this
1579 	 *    object.  In some filesystem types, this may vary from file
1580 	 *    to file"
1581 	 * Use minimum/default of PAGE_SIZE (e.g. for VCHR).
1582 	 */
1583 
1584 	sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
1585 	sb->st_flags = vap->va_flags;
1586 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1587 	sb->st_gen = vap->va_gen;
1588 out:
1589 	return (vop_stat_helper_post(a, error));
1590 }
1591 
1592 static int
1593 vop_stdread_pgcache(struct vop_read_pgcache_args *ap __unused)
1594 {
1595 	return (EJUSTRETURN);
1596 }
1597 
1598 static int
1599 vop_stdvput_pair(struct vop_vput_pair_args *ap)
1600 {
1601 	struct vnode *dvp, *vp, **vpp;
1602 
1603 	dvp = ap->a_dvp;
1604 	vpp = ap->a_vpp;
1605 	vput(dvp);
1606 	if (vpp != NULL && ap->a_unlock_vp && (vp = *vpp) != NULL)
1607 		vput(vp);
1608 	return (0);
1609 }
1610