xref: /freebsd/sys/kern/vfs_default.c (revision 25cdacf79b06356c929e59d5074d26c9dac41bdf)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed
8  * to Berkeley by John Heidemann of the UCLA Ficus project.
9  *
10  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bio.h>
43 #include <sys/buf.h>
44 #include <sys/conf.h>
45 #include <sys/event.h>
46 #include <sys/filio.h>
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/lockf.h>
51 #include <sys/malloc.h>
52 #include <sys/mount.h>
53 #include <sys/namei.h>
54 #include <sys/rwlock.h>
55 #include <sys/fcntl.h>
56 #include <sys/unistd.h>
57 #include <sys/vnode.h>
58 #include <sys/dirent.h>
59 #include <sys/poll.h>
60 #include <sys/stat.h>
61 #include <security/audit/audit.h>
62 #include <sys/priv.h>
63 
64 #include <security/mac/mac_framework.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vnode_pager.h>
74 
75 static int	vop_nolookup(struct vop_lookup_args *);
76 static int	vop_norename(struct vop_rename_args *);
77 static int	vop_nostrategy(struct vop_strategy_args *);
78 static int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
79 				char *dirbuf, int dirbuflen, off_t *off,
80 				char **cpos, int *len, int *eofflag,
81 				struct thread *td);
82 static int	dirent_exists(struct vnode *vp, const char *dirname,
83 			      struct thread *td);
84 
85 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
86 
87 static int vop_stdis_text(struct vop_is_text_args *ap);
88 static int vop_stdunset_text(struct vop_unset_text_args *ap);
89 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap);
90 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap);
91 static int vop_stdfdatasync(struct vop_fdatasync_args *ap);
92 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap);
93 static int vop_stdread_pgcache(struct vop_read_pgcache_args *ap);
94 static int vop_stdstat(struct vop_stat_args *ap);
95 
96 /*
97  * This vnode table stores what we want to do if the filesystem doesn't
98  * implement a particular VOP.
99  *
100  * If there is no specific entry here, we will return EOPNOTSUPP.
101  *
102  * Note that every filesystem has to implement either vop_access
103  * or vop_accessx; failing to do so will result in immediate crash
104  * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
105  * which calls vop_stdaccess() etc.
106  */
107 
108 struct vop_vector default_vnodeops = {
109 	.vop_default =		NULL,
110 	.vop_bypass =		VOP_EOPNOTSUPP,
111 
112 	.vop_access =		vop_stdaccess,
113 	.vop_accessx =		vop_stdaccessx,
114 	.vop_advise =		vop_stdadvise,
115 	.vop_advlock =		vop_stdadvlock,
116 	.vop_advlockasync =	vop_stdadvlockasync,
117 	.vop_advlockpurge =	vop_stdadvlockpurge,
118 	.vop_allocate =		vop_stdallocate,
119 	.vop_bmap =		vop_stdbmap,
120 	.vop_close =		VOP_NULL,
121 	.vop_fsync =		VOP_NULL,
122 	.vop_stat =		vop_stdstat,
123 	.vop_fdatasync =	vop_stdfdatasync,
124 	.vop_getpages =		vop_stdgetpages,
125 	.vop_getpages_async =	vop_stdgetpages_async,
126 	.vop_getwritemount = 	vop_stdgetwritemount,
127 	.vop_inactive =		VOP_NULL,
128 	.vop_need_inactive =	vop_stdneed_inactive,
129 	.vop_ioctl =		vop_stdioctl,
130 	.vop_kqfilter =		vop_stdkqfilter,
131 	.vop_islocked =		vop_stdislocked,
132 	.vop_lock1 =		vop_stdlock,
133 	.vop_lookup =		vop_nolookup,
134 	.vop_open =		VOP_NULL,
135 	.vop_pathconf =		VOP_EINVAL,
136 	.vop_poll =		vop_nopoll,
137 	.vop_putpages =		vop_stdputpages,
138 	.vop_readlink =		VOP_EINVAL,
139 	.vop_read_pgcache =	vop_stdread_pgcache,
140 	.vop_rename =		vop_norename,
141 	.vop_revoke =		VOP_PANIC,
142 	.vop_strategy =		vop_nostrategy,
143 	.vop_unlock =		vop_stdunlock,
144 	.vop_vptocnp =		vop_stdvptocnp,
145 	.vop_vptofh =		vop_stdvptofh,
146 	.vop_unp_bind =		vop_stdunp_bind,
147 	.vop_unp_connect =	vop_stdunp_connect,
148 	.vop_unp_detach =	vop_stdunp_detach,
149 	.vop_is_text =		vop_stdis_text,
150 	.vop_set_text =		vop_stdset_text,
151 	.vop_unset_text =	vop_stdunset_text,
152 	.vop_add_writecount =	vop_stdadd_writecount,
153 	.vop_copy_file_range =	vop_stdcopy_file_range,
154 };
155 VFS_VOP_VECTOR_REGISTER(default_vnodeops);
156 
157 /*
158  * Series of placeholder functions for various error returns for
159  * VOPs.
160  */
161 
162 int
163 vop_eopnotsupp(struct vop_generic_args *ap)
164 {
165 	/*
166 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
167 	*/
168 
169 	return (EOPNOTSUPP);
170 }
171 
172 int
173 vop_ebadf(struct vop_generic_args *ap)
174 {
175 
176 	return (EBADF);
177 }
178 
179 int
180 vop_enotty(struct vop_generic_args *ap)
181 {
182 
183 	return (ENOTTY);
184 }
185 
186 int
187 vop_einval(struct vop_generic_args *ap)
188 {
189 
190 	return (EINVAL);
191 }
192 
193 int
194 vop_enoent(struct vop_generic_args *ap)
195 {
196 
197 	return (ENOENT);
198 }
199 
200 int
201 vop_eagain(struct vop_generic_args *ap)
202 {
203 
204 	return (EAGAIN);
205 }
206 
207 int
208 vop_null(struct vop_generic_args *ap)
209 {
210 
211 	return (0);
212 }
213 
214 /*
215  * Helper function to panic on some bad VOPs in some filesystems.
216  */
217 int
218 vop_panic(struct vop_generic_args *ap)
219 {
220 
221 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
222 }
223 
224 /*
225  * vop_std<something> and vop_no<something> are default functions for use by
226  * filesystems that need the "default reasonable" implementation for a
227  * particular operation.
228  *
229  * The documentation for the operations they implement exists (if it exists)
230  * in the VOP_<SOMETHING>(9) manpage (all uppercase).
231  */
232 
233 /*
234  * Default vop for filesystems that do not support name lookup
235  */
236 static int
237 vop_nolookup(ap)
238 	struct vop_lookup_args /* {
239 		struct vnode *a_dvp;
240 		struct vnode **a_vpp;
241 		struct componentname *a_cnp;
242 	} */ *ap;
243 {
244 
245 	*ap->a_vpp = NULL;
246 	return (ENOTDIR);
247 }
248 
249 /*
250  * vop_norename:
251  *
252  * Handle unlock and reference counting for arguments of vop_rename
253  * for filesystems that do not implement rename operation.
254  */
255 static int
256 vop_norename(struct vop_rename_args *ap)
257 {
258 
259 	vop_rename_fail(ap);
260 	return (EOPNOTSUPP);
261 }
262 
263 /*
264  *	vop_nostrategy:
265  *
266  *	Strategy routine for VFS devices that have none.
267  *
268  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
269  *	routine.  Typically this is done for a BIO_READ strategy call.
270  *	Typically B_INVAL is assumed to already be clear prior to a write
271  *	and should not be cleared manually unless you just made the buffer
272  *	invalid.  BIO_ERROR should be cleared either way.
273  */
274 
275 static int
276 vop_nostrategy (struct vop_strategy_args *ap)
277 {
278 	printf("No strategy for buffer at %p\n", ap->a_bp);
279 	vn_printf(ap->a_vp, "vnode ");
280 	ap->a_bp->b_ioflags |= BIO_ERROR;
281 	ap->a_bp->b_error = EOPNOTSUPP;
282 	bufdone(ap->a_bp);
283 	return (EOPNOTSUPP);
284 }
285 
286 static int
287 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
288 		int dirbuflen, off_t *off, char **cpos, int *len,
289 		int *eofflag, struct thread *td)
290 {
291 	int error, reclen;
292 	struct uio uio;
293 	struct iovec iov;
294 	struct dirent *dp;
295 
296 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
297 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
298 
299 	if (*len == 0) {
300 		iov.iov_base = dirbuf;
301 		iov.iov_len = dirbuflen;
302 
303 		uio.uio_iov = &iov;
304 		uio.uio_iovcnt = 1;
305 		uio.uio_offset = *off;
306 		uio.uio_resid = dirbuflen;
307 		uio.uio_segflg = UIO_SYSSPACE;
308 		uio.uio_rw = UIO_READ;
309 		uio.uio_td = td;
310 
311 		*eofflag = 0;
312 
313 #ifdef MAC
314 		error = mac_vnode_check_readdir(td->td_ucred, vp);
315 		if (error == 0)
316 #endif
317 			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
318 		    		NULL, NULL);
319 		if (error)
320 			return (error);
321 
322 		*off = uio.uio_offset;
323 
324 		*cpos = dirbuf;
325 		*len = (dirbuflen - uio.uio_resid);
326 
327 		if (*len == 0)
328 			return (ENOENT);
329 	}
330 
331 	dp = (struct dirent *)(*cpos);
332 	reclen = dp->d_reclen;
333 	*dpp = dp;
334 
335 	/* check for malformed directory.. */
336 	if (reclen < DIRENT_MINSIZE)
337 		return (EINVAL);
338 
339 	*cpos += reclen;
340 	*len -= reclen;
341 
342 	return (0);
343 }
344 
345 /*
346  * Check if a named file exists in a given directory vnode.
347  */
348 static int
349 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
350 {
351 	char *dirbuf, *cpos;
352 	int error, eofflag, dirbuflen, len, found;
353 	off_t off;
354 	struct dirent *dp;
355 	struct vattr va;
356 
357 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
358 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
359 
360 	found = 0;
361 
362 	error = VOP_GETATTR(vp, &va, td->td_ucred);
363 	if (error)
364 		return (found);
365 
366 	dirbuflen = DEV_BSIZE;
367 	if (dirbuflen < va.va_blocksize)
368 		dirbuflen = va.va_blocksize;
369 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
370 
371 	off = 0;
372 	len = 0;
373 	do {
374 		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
375 					&cpos, &len, &eofflag, td);
376 		if (error)
377 			goto out;
378 
379 		if (dp->d_type != DT_WHT && dp->d_fileno != 0 &&
380 		    strcmp(dp->d_name, dirname) == 0) {
381 			found = 1;
382 			goto out;
383 		}
384 	} while (len > 0 || !eofflag);
385 
386 out:
387 	free(dirbuf, M_TEMP);
388 	return (found);
389 }
390 
391 int
392 vop_stdaccess(struct vop_access_args *ap)
393 {
394 
395 	KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
396 	    VAPPEND)) == 0, ("invalid bit in accmode"));
397 
398 	return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
399 }
400 
401 int
402 vop_stdaccessx(struct vop_accessx_args *ap)
403 {
404 	int error;
405 	accmode_t accmode = ap->a_accmode;
406 
407 	error = vfs_unixify_accmode(&accmode);
408 	if (error != 0)
409 		return (error);
410 
411 	if (accmode == 0)
412 		return (0);
413 
414 	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
415 }
416 
417 /*
418  * Advisory record locking support
419  */
420 int
421 vop_stdadvlock(struct vop_advlock_args *ap)
422 {
423 	struct vnode *vp;
424 	struct vattr vattr;
425 	int error;
426 
427 	vp = ap->a_vp;
428 	if (ap->a_fl->l_whence == SEEK_END) {
429 		/*
430 		 * The NFSv4 server must avoid doing a vn_lock() here, since it
431 		 * can deadlock the nfsd threads, due to a LOR.  Fortunately
432 		 * the NFSv4 server always uses SEEK_SET and this code is
433 		 * only required for the SEEK_END case.
434 		 */
435 		vn_lock(vp, LK_SHARED | LK_RETRY);
436 		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
437 		VOP_UNLOCK(vp);
438 		if (error)
439 			return (error);
440 	} else
441 		vattr.va_size = 0;
442 
443 	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
444 }
445 
446 int
447 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
448 {
449 	struct vnode *vp;
450 	struct vattr vattr;
451 	int error;
452 
453 	vp = ap->a_vp;
454 	if (ap->a_fl->l_whence == SEEK_END) {
455 		/* The size argument is only needed for SEEK_END. */
456 		vn_lock(vp, LK_SHARED | LK_RETRY);
457 		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
458 		VOP_UNLOCK(vp);
459 		if (error)
460 			return (error);
461 	} else
462 		vattr.va_size = 0;
463 
464 	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
465 }
466 
467 int
468 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
469 {
470 	struct vnode *vp;
471 
472 	vp = ap->a_vp;
473 	lf_purgelocks(vp, &vp->v_lockf);
474 	return (0);
475 }
476 
477 /*
478  * vop_stdpathconf:
479  *
480  * Standard implementation of POSIX pathconf, to get information about limits
481  * for a filesystem.
482  * Override per filesystem for the case where the filesystem has smaller
483  * limits.
484  */
485 int
486 vop_stdpathconf(ap)
487 	struct vop_pathconf_args /* {
488 	struct vnode *a_vp;
489 	int a_name;
490 	int *a_retval;
491 	} */ *ap;
492 {
493 
494 	switch (ap->a_name) {
495 		case _PC_ASYNC_IO:
496 			*ap->a_retval = _POSIX_ASYNCHRONOUS_IO;
497 			return (0);
498 		case _PC_PATH_MAX:
499 			*ap->a_retval = PATH_MAX;
500 			return (0);
501 		case _PC_ACL_EXTENDED:
502 		case _PC_ACL_NFS4:
503 		case _PC_CAP_PRESENT:
504 		case _PC_INF_PRESENT:
505 		case _PC_MAC_PRESENT:
506 			*ap->a_retval = 0;
507 			return (0);
508 		default:
509 			return (EINVAL);
510 	}
511 	/* NOTREACHED */
512 }
513 
514 /*
515  * Standard lock, unlock and islocked functions.
516  */
517 int
518 vop_stdlock(ap)
519 	struct vop_lock1_args /* {
520 		struct vnode *a_vp;
521 		int a_flags;
522 		char *file;
523 		int line;
524 	} */ *ap;
525 {
526 	struct vnode *vp = ap->a_vp;
527 	struct mtx *ilk;
528 
529 	ilk = VI_MTX(vp);
530 	return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags,
531 	    &ilk->lock_object, ap->a_file, ap->a_line));
532 }
533 
534 /* See above. */
535 int
536 vop_stdunlock(ap)
537 	struct vop_unlock_args /* {
538 		struct vnode *a_vp;
539 	} */ *ap;
540 {
541 	struct vnode *vp = ap->a_vp;
542 
543 	return (lockmgr_unlock(vp->v_vnlock));
544 }
545 
546 /* See above. */
547 int
548 vop_stdislocked(ap)
549 	struct vop_islocked_args /* {
550 		struct vnode *a_vp;
551 	} */ *ap;
552 {
553 
554 	return (lockstatus(ap->a_vp->v_vnlock));
555 }
556 
557 /*
558  * Variants of the above set.
559  *
560  * Differences are:
561  * - shared locking disablement is not supported
562  * - v_vnlock pointer is not honored
563  */
564 int
565 vop_lock(ap)
566 	struct vop_lock1_args /* {
567 		struct vnode *a_vp;
568 		int a_flags;
569 		char *file;
570 		int line;
571 	} */ *ap;
572 {
573 	struct vnode *vp = ap->a_vp;
574 	int flags = ap->a_flags;
575 	struct mtx *ilk;
576 
577 	MPASS(vp->v_vnlock == &vp->v_lock);
578 
579 	if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0))
580 		goto other;
581 
582 	switch (flags & LK_TYPE_MASK) {
583 	case LK_SHARED:
584 		return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line));
585 	case LK_EXCLUSIVE:
586 		return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line));
587 	}
588 other:
589 	ilk = VI_MTX(vp);
590 	return (lockmgr_lock_flags(&vp->v_lock, flags,
591 	    &ilk->lock_object, ap->a_file, ap->a_line));
592 }
593 
594 int
595 vop_unlock(ap)
596 	struct vop_unlock_args /* {
597 		struct vnode *a_vp;
598 	} */ *ap;
599 {
600 	struct vnode *vp = ap->a_vp;
601 
602 	MPASS(vp->v_vnlock == &vp->v_lock);
603 
604 	return (lockmgr_unlock(&vp->v_lock));
605 }
606 
607 int
608 vop_islocked(ap)
609 	struct vop_islocked_args /* {
610 		struct vnode *a_vp;
611 	} */ *ap;
612 {
613 	struct vnode *vp = ap->a_vp;
614 
615 	MPASS(vp->v_vnlock == &vp->v_lock);
616 
617 	return (lockstatus(&vp->v_lock));
618 }
619 
620 /*
621  * Return true for select/poll.
622  */
623 int
624 vop_nopoll(ap)
625 	struct vop_poll_args /* {
626 		struct vnode *a_vp;
627 		int  a_events;
628 		struct ucred *a_cred;
629 		struct thread *a_td;
630 	} */ *ap;
631 {
632 
633 	if (ap->a_events & ~POLLSTANDARD)
634 		return (POLLNVAL);
635 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
636 }
637 
638 /*
639  * Implement poll for local filesystems that support it.
640  */
641 int
642 vop_stdpoll(ap)
643 	struct vop_poll_args /* {
644 		struct vnode *a_vp;
645 		int  a_events;
646 		struct ucred *a_cred;
647 		struct thread *a_td;
648 	} */ *ap;
649 {
650 	if (ap->a_events & ~POLLSTANDARD)
651 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
652 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
653 }
654 
655 /*
656  * Return our mount point, as we will take charge of the writes.
657  */
658 int
659 vop_stdgetwritemount(ap)
660 	struct vop_getwritemount_args /* {
661 		struct vnode *a_vp;
662 		struct mount **a_mpp;
663 	} */ *ap;
664 {
665 	struct mount *mp;
666 	struct mount_pcpu *mpcpu;
667 	struct vnode *vp;
668 
669 	/*
670 	 * Note that having a reference does not prevent forced unmount from
671 	 * setting ->v_mount to NULL after the lock gets released. This is of
672 	 * no consequence for typical consumers (most notably vn_start_write)
673 	 * since in this case the vnode is VIRF_DOOMED. Unmount might have
674 	 * progressed far enough that its completion is only delayed by the
675 	 * reference obtained here. The consumer only needs to concern itself
676 	 * with releasing it.
677 	 */
678 	vp = ap->a_vp;
679 	mp = vp->v_mount;
680 	if (mp == NULL) {
681 		*(ap->a_mpp) = NULL;
682 		return (0);
683 	}
684 	if (vfs_op_thread_enter(mp, mpcpu)) {
685 		if (mp == vp->v_mount) {
686 			vfs_mp_count_add_pcpu(mpcpu, ref, 1);
687 			vfs_op_thread_exit(mp, mpcpu);
688 		} else {
689 			vfs_op_thread_exit(mp, mpcpu);
690 			mp = NULL;
691 		}
692 	} else {
693 		MNT_ILOCK(mp);
694 		if (mp == vp->v_mount) {
695 			MNT_REF(mp);
696 			MNT_IUNLOCK(mp);
697 		} else {
698 			MNT_IUNLOCK(mp);
699 			mp = NULL;
700 		}
701 	}
702 	*(ap->a_mpp) = mp;
703 	return (0);
704 }
705 
706 /*
707  * If the file system doesn't implement VOP_BMAP, then return sensible defaults:
708  * - Return the vnode's bufobj instead of any underlying device's bufobj
709  * - Calculate the physical block number as if there were equal size
710  *   consecutive blocks, but
711  * - Report no contiguous runs of blocks.
712  */
713 int
714 vop_stdbmap(ap)
715 	struct vop_bmap_args /* {
716 		struct vnode *a_vp;
717 		daddr_t  a_bn;
718 		struct bufobj **a_bop;
719 		daddr_t *a_bnp;
720 		int *a_runp;
721 		int *a_runb;
722 	} */ *ap;
723 {
724 
725 	if (ap->a_bop != NULL)
726 		*ap->a_bop = &ap->a_vp->v_bufobj;
727 	if (ap->a_bnp != NULL)
728 		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
729 	if (ap->a_runp != NULL)
730 		*ap->a_runp = 0;
731 	if (ap->a_runb != NULL)
732 		*ap->a_runb = 0;
733 	return (0);
734 }
735 
736 int
737 vop_stdfsync(ap)
738 	struct vop_fsync_args /* {
739 		struct vnode *a_vp;
740 		int a_waitfor;
741 		struct thread *a_td;
742 	} */ *ap;
743 {
744 
745 	return (vn_fsync_buf(ap->a_vp, ap->a_waitfor));
746 }
747 
748 static int
749 vop_stdfdatasync(struct vop_fdatasync_args *ap)
750 {
751 
752 	return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td));
753 }
754 
755 int
756 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap)
757 {
758 
759 	return (vn_fsync_buf(ap->a_vp, MNT_WAIT));
760 }
761 
762 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
763 int
764 vop_stdgetpages(ap)
765 	struct vop_getpages_args /* {
766 		struct vnode *a_vp;
767 		vm_page_t *a_m;
768 		int a_count;
769 		int *a_rbehind;
770 		int *a_rahead;
771 	} */ *ap;
772 {
773 
774 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
775 	    ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL);
776 }
777 
778 static int
779 vop_stdgetpages_async(struct vop_getpages_async_args *ap)
780 {
781 	int error;
782 
783 	error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
784 	    ap->a_rahead);
785 	if (ap->a_iodone != NULL)
786 		ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
787 	return (error);
788 }
789 
790 int
791 vop_stdkqfilter(struct vop_kqfilter_args *ap)
792 {
793 	return vfs_kqfilter(ap);
794 }
795 
796 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
797 int
798 vop_stdputpages(ap)
799 	struct vop_putpages_args /* {
800 		struct vnode *a_vp;
801 		vm_page_t *a_m;
802 		int a_count;
803 		int a_sync;
804 		int *a_rtvals;
805 	} */ *ap;
806 {
807 
808 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
809 	     ap->a_sync, ap->a_rtvals);
810 }
811 
812 int
813 vop_stdvptofh(struct vop_vptofh_args *ap)
814 {
815 	return (EOPNOTSUPP);
816 }
817 
818 int
819 vop_stdvptocnp(struct vop_vptocnp_args *ap)
820 {
821 	struct vnode *vp = ap->a_vp;
822 	struct vnode **dvp = ap->a_vpp;
823 	struct ucred *cred;
824 	char *buf = ap->a_buf;
825 	size_t *buflen = ap->a_buflen;
826 	char *dirbuf, *cpos;
827 	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
828 	off_t off;
829 	ino_t fileno;
830 	struct vattr va;
831 	struct nameidata nd;
832 	struct thread *td;
833 	struct dirent *dp;
834 	struct vnode *mvp;
835 
836 	i = *buflen;
837 	error = 0;
838 	covered = 0;
839 	td = curthread;
840 	cred = td->td_ucred;
841 
842 	if (vp->v_type != VDIR)
843 		return (ENOENT);
844 
845 	error = VOP_GETATTR(vp, &va, cred);
846 	if (error)
847 		return (error);
848 
849 	VREF(vp);
850 	locked = VOP_ISLOCKED(vp);
851 	VOP_UNLOCK(vp);
852 	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE,
853 	    "..", vp, td);
854 	flags = FREAD;
855 	error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
856 	if (error) {
857 		vn_lock(vp, locked | LK_RETRY);
858 		return (error);
859 	}
860 	NDFREE(&nd, NDF_ONLY_PNBUF);
861 
862 	mvp = *dvp = nd.ni_vp;
863 
864 	if (vp->v_mount != (*dvp)->v_mount &&
865 	    ((*dvp)->v_vflag & VV_ROOT) &&
866 	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
867 		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
868 		VREF(mvp);
869 		VOP_UNLOCK(mvp);
870 		vn_close(mvp, FREAD, cred, td);
871 		VREF(*dvp);
872 		vn_lock(*dvp, LK_SHARED | LK_RETRY);
873 		covered = 1;
874 	}
875 
876 	fileno = va.va_fileid;
877 
878 	dirbuflen = DEV_BSIZE;
879 	if (dirbuflen < va.va_blocksize)
880 		dirbuflen = va.va_blocksize;
881 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
882 
883 	if ((*dvp)->v_type != VDIR) {
884 		error = ENOENT;
885 		goto out;
886 	}
887 
888 	off = 0;
889 	len = 0;
890 	do {
891 		/* call VOP_READDIR of parent */
892 		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
893 					&cpos, &len, &eofflag, td);
894 		if (error)
895 			goto out;
896 
897 		if ((dp->d_type != DT_WHT) &&
898 		    (dp->d_fileno == fileno)) {
899 			if (covered) {
900 				VOP_UNLOCK(*dvp);
901 				vn_lock(mvp, LK_SHARED | LK_RETRY);
902 				if (dirent_exists(mvp, dp->d_name, td)) {
903 					error = ENOENT;
904 					VOP_UNLOCK(mvp);
905 					vn_lock(*dvp, LK_SHARED | LK_RETRY);
906 					goto out;
907 				}
908 				VOP_UNLOCK(mvp);
909 				vn_lock(*dvp, LK_SHARED | LK_RETRY);
910 			}
911 			i -= dp->d_namlen;
912 
913 			if (i < 0) {
914 				error = ENOMEM;
915 				goto out;
916 			}
917 			if (dp->d_namlen == 1 && dp->d_name[0] == '.') {
918 				error = ENOENT;
919 			} else {
920 				bcopy(dp->d_name, buf + i, dp->d_namlen);
921 				error = 0;
922 			}
923 			goto out;
924 		}
925 	} while (len > 0 || !eofflag);
926 	error = ENOENT;
927 
928 out:
929 	free(dirbuf, M_TEMP);
930 	if (!error) {
931 		*buflen = i;
932 		vref(*dvp);
933 	}
934 	if (covered) {
935 		vput(*dvp);
936 		vrele(mvp);
937 	} else {
938 		VOP_UNLOCK(mvp);
939 		vn_close(mvp, FREAD, cred, td);
940 	}
941 	vn_lock(vp, locked | LK_RETRY);
942 	return (error);
943 }
944 
945 int
946 vop_stdallocate(struct vop_allocate_args *ap)
947 {
948 #ifdef __notyet__
949 	struct statfs *sfs;
950 	off_t maxfilesize = 0;
951 #endif
952 	struct iovec aiov;
953 	struct vattr vattr, *vap;
954 	struct uio auio;
955 	off_t fsize, len, cur, offset;
956 	uint8_t *buf;
957 	struct thread *td;
958 	struct vnode *vp;
959 	size_t iosize;
960 	int error;
961 
962 	buf = NULL;
963 	error = 0;
964 	td = curthread;
965 	vap = &vattr;
966 	vp = ap->a_vp;
967 	len = *ap->a_len;
968 	offset = *ap->a_offset;
969 
970 	error = VOP_GETATTR(vp, vap, td->td_ucred);
971 	if (error != 0)
972 		goto out;
973 	fsize = vap->va_size;
974 	iosize = vap->va_blocksize;
975 	if (iosize == 0)
976 		iosize = BLKDEV_IOSIZE;
977 	if (iosize > maxphys)
978 		iosize = maxphys;
979 	buf = malloc(iosize, M_TEMP, M_WAITOK);
980 
981 #ifdef __notyet__
982 	/*
983 	 * Check if the filesystem sets f_maxfilesize; if not use
984 	 * VOP_SETATTR to perform the check.
985 	 */
986 	sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
987 	error = VFS_STATFS(vp->v_mount, sfs, td);
988 	if (error == 0)
989 		maxfilesize = sfs->f_maxfilesize;
990 	free(sfs, M_STATFS);
991 	if (error != 0)
992 		goto out;
993 	if (maxfilesize) {
994 		if (offset > maxfilesize || len > maxfilesize ||
995 		    offset + len > maxfilesize) {
996 			error = EFBIG;
997 			goto out;
998 		}
999 	} else
1000 #endif
1001 	if (offset + len > vap->va_size) {
1002 		/*
1003 		 * Test offset + len against the filesystem's maxfilesize.
1004 		 */
1005 		VATTR_NULL(vap);
1006 		vap->va_size = offset + len;
1007 		error = VOP_SETATTR(vp, vap, td->td_ucred);
1008 		if (error != 0)
1009 			goto out;
1010 		VATTR_NULL(vap);
1011 		vap->va_size = fsize;
1012 		error = VOP_SETATTR(vp, vap, td->td_ucred);
1013 		if (error != 0)
1014 			goto out;
1015 	}
1016 
1017 	for (;;) {
1018 		/*
1019 		 * Read and write back anything below the nominal file
1020 		 * size.  There's currently no way outside the filesystem
1021 		 * to know whether this area is sparse or not.
1022 		 */
1023 		cur = iosize;
1024 		if ((offset % iosize) != 0)
1025 			cur -= (offset % iosize);
1026 		if (cur > len)
1027 			cur = len;
1028 		if (offset < fsize) {
1029 			aiov.iov_base = buf;
1030 			aiov.iov_len = cur;
1031 			auio.uio_iov = &aiov;
1032 			auio.uio_iovcnt = 1;
1033 			auio.uio_offset = offset;
1034 			auio.uio_resid = cur;
1035 			auio.uio_segflg = UIO_SYSSPACE;
1036 			auio.uio_rw = UIO_READ;
1037 			auio.uio_td = td;
1038 			error = VOP_READ(vp, &auio, 0, td->td_ucred);
1039 			if (error != 0)
1040 				break;
1041 			if (auio.uio_resid > 0) {
1042 				bzero(buf + cur - auio.uio_resid,
1043 				    auio.uio_resid);
1044 			}
1045 		} else {
1046 			bzero(buf, cur);
1047 		}
1048 
1049 		aiov.iov_base = buf;
1050 		aiov.iov_len = cur;
1051 		auio.uio_iov = &aiov;
1052 		auio.uio_iovcnt = 1;
1053 		auio.uio_offset = offset;
1054 		auio.uio_resid = cur;
1055 		auio.uio_segflg = UIO_SYSSPACE;
1056 		auio.uio_rw = UIO_WRITE;
1057 		auio.uio_td = td;
1058 
1059 		error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
1060 		if (error != 0)
1061 			break;
1062 
1063 		len -= cur;
1064 		offset += cur;
1065 		if (len == 0)
1066 			break;
1067 		if (should_yield())
1068 			break;
1069 	}
1070 
1071  out:
1072 	*ap->a_len = len;
1073 	*ap->a_offset = offset;
1074 	free(buf, M_TEMP);
1075 	return (error);
1076 }
1077 
1078 int
1079 vop_stdadvise(struct vop_advise_args *ap)
1080 {
1081 	struct vnode *vp;
1082 	struct bufobj *bo;
1083 	daddr_t startn, endn;
1084 	off_t bstart, bend, start, end;
1085 	int bsize, error;
1086 
1087 	vp = ap->a_vp;
1088 	switch (ap->a_advice) {
1089 	case POSIX_FADV_WILLNEED:
1090 		/*
1091 		 * Do nothing for now.  Filesystems should provide a
1092 		 * custom method which starts an asynchronous read of
1093 		 * the requested region.
1094 		 */
1095 		error = 0;
1096 		break;
1097 	case POSIX_FADV_DONTNEED:
1098 		error = 0;
1099 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1100 		if (VN_IS_DOOMED(vp)) {
1101 			VOP_UNLOCK(vp);
1102 			break;
1103 		}
1104 
1105 		/*
1106 		 * Round to block boundaries (and later possibly further to
1107 		 * page boundaries).  Applications cannot reasonably be aware
1108 		 * of the boundaries, and the rounding must be to expand at
1109 		 * both extremities to cover enough.  It still doesn't cover
1110 		 * read-ahead.  For partial blocks, this gives unnecessary
1111 		 * discarding of buffers but is efficient enough since the
1112 		 * pages usually remain in VMIO for some time.
1113 		 */
1114 		bsize = vp->v_bufobj.bo_bsize;
1115 		bstart = rounddown(ap->a_start, bsize);
1116 		bend = roundup(ap->a_end, bsize);
1117 
1118 		/*
1119 		 * Deactivate pages in the specified range from the backing VM
1120 		 * object.  Pages that are resident in the buffer cache will
1121 		 * remain wired until their corresponding buffers are released
1122 		 * below.
1123 		 */
1124 		if (vp->v_object != NULL) {
1125 			start = trunc_page(bstart);
1126 			end = round_page(bend);
1127 			VM_OBJECT_RLOCK(vp->v_object);
1128 			vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start),
1129 			    OFF_TO_IDX(end));
1130 			VM_OBJECT_RUNLOCK(vp->v_object);
1131 		}
1132 
1133 		bo = &vp->v_bufobj;
1134 		BO_RLOCK(bo);
1135 		startn = bstart / bsize;
1136 		endn = bend / bsize;
1137 		error = bnoreuselist(&bo->bo_clean, bo, startn, endn);
1138 		if (error == 0)
1139 			error = bnoreuselist(&bo->bo_dirty, bo, startn, endn);
1140 		BO_RUNLOCK(bo);
1141 		VOP_UNLOCK(vp);
1142 		break;
1143 	default:
1144 		error = EINVAL;
1145 		break;
1146 	}
1147 	return (error);
1148 }
1149 
1150 int
1151 vop_stdunp_bind(struct vop_unp_bind_args *ap)
1152 {
1153 
1154 	ap->a_vp->v_unpcb = ap->a_unpcb;
1155 	return (0);
1156 }
1157 
1158 int
1159 vop_stdunp_connect(struct vop_unp_connect_args *ap)
1160 {
1161 
1162 	*ap->a_unpcb = ap->a_vp->v_unpcb;
1163 	return (0);
1164 }
1165 
1166 int
1167 vop_stdunp_detach(struct vop_unp_detach_args *ap)
1168 {
1169 
1170 	ap->a_vp->v_unpcb = NULL;
1171 	return (0);
1172 }
1173 
1174 static int
1175 vop_stdis_text(struct vop_is_text_args *ap)
1176 {
1177 
1178 	return (ap->a_vp->v_writecount < 0);
1179 }
1180 
1181 int
1182 vop_stdset_text(struct vop_set_text_args *ap)
1183 {
1184 	struct vnode *vp;
1185 	struct mount *mp;
1186 	int error;
1187 
1188 	vp = ap->a_vp;
1189 	VI_LOCK(vp);
1190 	if (vp->v_writecount > 0) {
1191 		error = ETXTBSY;
1192 	} else {
1193 		/*
1194 		 * If requested by fs, keep a use reference to the
1195 		 * vnode until the last text reference is released.
1196 		 */
1197 		mp = vp->v_mount;
1198 		if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 &&
1199 		    vp->v_writecount == 0) {
1200 			VNPASS((vp->v_iflag & VI_TEXT_REF) == 0, vp);
1201 			vp->v_iflag |= VI_TEXT_REF;
1202 			vrefl(vp);
1203 		}
1204 
1205 		vp->v_writecount--;
1206 		error = 0;
1207 	}
1208 	VI_UNLOCK(vp);
1209 	return (error);
1210 }
1211 
1212 static int
1213 vop_stdunset_text(struct vop_unset_text_args *ap)
1214 {
1215 	struct vnode *vp;
1216 	int error;
1217 	bool last;
1218 
1219 	vp = ap->a_vp;
1220 	last = false;
1221 	VI_LOCK(vp);
1222 	if (vp->v_writecount < 0) {
1223 		if ((vp->v_iflag & VI_TEXT_REF) != 0 &&
1224 		    vp->v_writecount == -1) {
1225 			last = true;
1226 			vp->v_iflag &= ~VI_TEXT_REF;
1227 		}
1228 		vp->v_writecount++;
1229 		error = 0;
1230 	} else {
1231 		error = EINVAL;
1232 	}
1233 	VI_UNLOCK(vp);
1234 	if (last)
1235 		vunref(vp);
1236 	return (error);
1237 }
1238 
1239 static int
1240 vop_stdadd_writecount(struct vop_add_writecount_args *ap)
1241 {
1242 	struct vnode *vp;
1243 	struct mount *mp;
1244 	int error;
1245 
1246 	vp = ap->a_vp;
1247 	VI_LOCK_FLAGS(vp, MTX_DUPOK);
1248 	if (vp->v_writecount < 0) {
1249 		error = ETXTBSY;
1250 	} else {
1251 		VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
1252 		    ("neg writecount increment %d", ap->a_inc));
1253 		if (vp->v_writecount == 0) {
1254 			mp = vp->v_mount;
1255 			if (mp != NULL && (mp->mnt_kern_flag & MNTK_NOMSYNC) == 0)
1256 				vlazy(vp);
1257 		}
1258 		vp->v_writecount += ap->a_inc;
1259 		error = 0;
1260 	}
1261 	VI_UNLOCK(vp);
1262 	return (error);
1263 }
1264 
1265 int
1266 vop_stdneed_inactive(struct vop_need_inactive_args *ap)
1267 {
1268 
1269 	return (1);
1270 }
1271 
1272 int
1273 vop_stdioctl(struct vop_ioctl_args *ap)
1274 {
1275 	struct vnode *vp;
1276 	struct vattr va;
1277 	off_t *offp;
1278 	int error;
1279 
1280 	switch (ap->a_command) {
1281 	case FIOSEEKDATA:
1282 	case FIOSEEKHOLE:
1283 		vp = ap->a_vp;
1284 		error = vn_lock(vp, LK_SHARED);
1285 		if (error != 0)
1286 			return (EBADF);
1287 		if (vp->v_type == VREG)
1288 			error = VOP_GETATTR(vp, &va, ap->a_cred);
1289 		else
1290 			error = ENOTTY;
1291 		if (error == 0) {
1292 			offp = ap->a_data;
1293 			if (*offp < 0 || *offp >= va.va_size)
1294 				error = ENXIO;
1295 			else if (ap->a_command == FIOSEEKHOLE)
1296 				*offp = va.va_size;
1297 		}
1298 		VOP_UNLOCK(vp);
1299 		break;
1300 	default:
1301 		error = ENOTTY;
1302 		break;
1303 	}
1304 	return (error);
1305 }
1306 
1307 /*
1308  * vfs default ops
1309  * used to fill the vfs function table to get reasonable default return values.
1310  */
1311 int
1312 vfs_stdroot (mp, flags, vpp)
1313 	struct mount *mp;
1314 	int flags;
1315 	struct vnode **vpp;
1316 {
1317 
1318 	return (EOPNOTSUPP);
1319 }
1320 
1321 int
1322 vfs_stdstatfs (mp, sbp)
1323 	struct mount *mp;
1324 	struct statfs *sbp;
1325 {
1326 
1327 	return (EOPNOTSUPP);
1328 }
1329 
1330 int
1331 vfs_stdquotactl (mp, cmds, uid, arg)
1332 	struct mount *mp;
1333 	int cmds;
1334 	uid_t uid;
1335 	void *arg;
1336 {
1337 
1338 	return (EOPNOTSUPP);
1339 }
1340 
1341 int
1342 vfs_stdsync(mp, waitfor)
1343 	struct mount *mp;
1344 	int waitfor;
1345 {
1346 	struct vnode *vp, *mvp;
1347 	struct thread *td;
1348 	int error, lockreq, allerror = 0;
1349 
1350 	td = curthread;
1351 	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1352 	if (waitfor != MNT_WAIT)
1353 		lockreq |= LK_NOWAIT;
1354 	/*
1355 	 * Force stale buffer cache information to be flushed.
1356 	 */
1357 loop:
1358 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1359 		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1360 			VI_UNLOCK(vp);
1361 			continue;
1362 		}
1363 		if ((error = vget(vp, lockreq)) != 0) {
1364 			if (error == ENOENT) {
1365 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1366 				goto loop;
1367 			}
1368 			continue;
1369 		}
1370 		error = VOP_FSYNC(vp, waitfor, td);
1371 		if (error)
1372 			allerror = error;
1373 		vput(vp);
1374 	}
1375 	return (allerror);
1376 }
1377 
1378 int
1379 vfs_stdnosync (mp, waitfor)
1380 	struct mount *mp;
1381 	int waitfor;
1382 {
1383 
1384 	return (0);
1385 }
1386 
1387 static int
1388 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap)
1389 {
1390 	int error;
1391 
1392 	error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp,
1393 	    ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred,
1394 	    ap->a_outcred, ap->a_fsizetd);
1395 	return (error);
1396 }
1397 
1398 int
1399 vfs_stdvget (mp, ino, flags, vpp)
1400 	struct mount *mp;
1401 	ino_t ino;
1402 	int flags;
1403 	struct vnode **vpp;
1404 {
1405 
1406 	return (EOPNOTSUPP);
1407 }
1408 
1409 int
1410 vfs_stdfhtovp (mp, fhp, flags, vpp)
1411 	struct mount *mp;
1412 	struct fid *fhp;
1413 	int flags;
1414 	struct vnode **vpp;
1415 {
1416 
1417 	return (EOPNOTSUPP);
1418 }
1419 
1420 int
1421 vfs_stdinit (vfsp)
1422 	struct vfsconf *vfsp;
1423 {
1424 
1425 	return (0);
1426 }
1427 
1428 int
1429 vfs_stduninit (vfsp)
1430 	struct vfsconf *vfsp;
1431 {
1432 
1433 	return(0);
1434 }
1435 
1436 int
1437 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
1438 	struct mount *mp;
1439 	int cmd;
1440 	struct vnode *filename_vp;
1441 	int attrnamespace;
1442 	const char *attrname;
1443 {
1444 
1445 	if (filename_vp != NULL)
1446 		VOP_UNLOCK(filename_vp);
1447 	return (EOPNOTSUPP);
1448 }
1449 
1450 int
1451 vfs_stdsysctl(mp, op, req)
1452 	struct mount *mp;
1453 	fsctlop_t op;
1454 	struct sysctl_req *req;
1455 {
1456 
1457 	return (EOPNOTSUPP);
1458 }
1459 
1460 static vop_bypass_t *
1461 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a)
1462 {
1463 
1464 	return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset));
1465 }
1466 
1467 int
1468 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a)
1469 {
1470 	vop_bypass_t *bp;
1471 	int prev_stops, rc;
1472 
1473 	bp = bp_by_off(vop, a);
1474 	MPASS(bp != NULL);
1475 
1476 	prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT);
1477 	rc = bp(a);
1478 	sigallowstop(prev_stops);
1479 	return (rc);
1480 }
1481 
1482 static int
1483 vop_stdstat(struct vop_stat_args *a)
1484 {
1485 	struct vattr vattr;
1486 	struct vattr *vap;
1487 	struct vnode *vp;
1488 	struct stat *sb;
1489 	int error;
1490 	u_short mode;
1491 
1492 	vp = a->a_vp;
1493 	sb = a->a_sb;
1494 
1495 	error = vop_stat_helper_pre(a);
1496 	if (error != 0)
1497 		return (error);
1498 
1499 	vap = &vattr;
1500 
1501 	/*
1502 	 * Initialize defaults for new and unusual fields, so that file
1503 	 * systems which don't support these fields don't need to know
1504 	 * about them.
1505 	 */
1506 	vap->va_birthtime.tv_sec = -1;
1507 	vap->va_birthtime.tv_nsec = 0;
1508 	vap->va_fsid = VNOVAL;
1509 	vap->va_rdev = NODEV;
1510 
1511 	error = VOP_GETATTR(vp, vap, a->a_active_cred);
1512 	if (error)
1513 		goto out;
1514 
1515 	/*
1516 	 * Zero the spare stat fields
1517 	 */
1518 	bzero(sb, sizeof *sb);
1519 
1520 	/*
1521 	 * Copy from vattr table
1522 	 */
1523 	if (vap->va_fsid != VNOVAL)
1524 		sb->st_dev = vap->va_fsid;
1525 	else
1526 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1527 	sb->st_ino = vap->va_fileid;
1528 	mode = vap->va_mode;
1529 	switch (vap->va_type) {
1530 	case VREG:
1531 		mode |= S_IFREG;
1532 		break;
1533 	case VDIR:
1534 		mode |= S_IFDIR;
1535 		break;
1536 	case VBLK:
1537 		mode |= S_IFBLK;
1538 		break;
1539 	case VCHR:
1540 		mode |= S_IFCHR;
1541 		break;
1542 	case VLNK:
1543 		mode |= S_IFLNK;
1544 		break;
1545 	case VSOCK:
1546 		mode |= S_IFSOCK;
1547 		break;
1548 	case VFIFO:
1549 		mode |= S_IFIFO;
1550 		break;
1551 	default:
1552 		error = EBADF;
1553 		goto out;
1554 	}
1555 	sb->st_mode = mode;
1556 	sb->st_nlink = vap->va_nlink;
1557 	sb->st_uid = vap->va_uid;
1558 	sb->st_gid = vap->va_gid;
1559 	sb->st_rdev = vap->va_rdev;
1560 	if (vap->va_size > OFF_MAX) {
1561 		error = EOVERFLOW;
1562 		goto out;
1563 	}
1564 	sb->st_size = vap->va_size;
1565 	sb->st_atim.tv_sec = vap->va_atime.tv_sec;
1566 	sb->st_atim.tv_nsec = vap->va_atime.tv_nsec;
1567 	sb->st_mtim.tv_sec = vap->va_mtime.tv_sec;
1568 	sb->st_mtim.tv_nsec = vap->va_mtime.tv_nsec;
1569 	sb->st_ctim.tv_sec = vap->va_ctime.tv_sec;
1570 	sb->st_ctim.tv_nsec = vap->va_ctime.tv_nsec;
1571 	sb->st_birthtim.tv_sec = vap->va_birthtime.tv_sec;
1572 	sb->st_birthtim.tv_nsec = vap->va_birthtime.tv_nsec;
1573 
1574 	/*
1575 	 * According to www.opengroup.org, the meaning of st_blksize is
1576 	 *   "a filesystem-specific preferred I/O block size for this
1577 	 *    object.  In some filesystem types, this may vary from file
1578 	 *    to file"
1579 	 * Use minimum/default of PAGE_SIZE (e.g. for VCHR).
1580 	 */
1581 
1582 	sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
1583 	sb->st_flags = vap->va_flags;
1584 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1585 	sb->st_gen = vap->va_gen;
1586 out:
1587 	return (vop_stat_helper_post(a, error));
1588 }
1589 
1590 static int
1591 vop_stdread_pgcache(struct vop_read_pgcache_args *ap __unused)
1592 {
1593 	return (EJUSTRETURN);
1594 }
1595