xref: /freebsd/sys/kern/vfs_default.c (revision 840aca288042eaf625a23908e807abdfde0bc21d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed
8  * to Berkeley by John Heidemann of the UCLA Ficus project.
9  *
10  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bio.h>
43 #include <sys/buf.h>
44 #include <sys/conf.h>
45 #include <sys/event.h>
46 #include <sys/filio.h>
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/lockf.h>
51 #include <sys/malloc.h>
52 #include <sys/mount.h>
53 #include <sys/namei.h>
54 #include <sys/rwlock.h>
55 #include <sys/fcntl.h>
56 #include <sys/unistd.h>
57 #include <sys/vnode.h>
58 #include <sys/dirent.h>
59 #include <sys/poll.h>
60 
61 #include <security/mac/mac_framework.h>
62 
63 #include <vm/vm.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_extern.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_pager.h>
70 #include <vm/vnode_pager.h>
71 
72 static int	vop_nolookup(struct vop_lookup_args *);
73 static int	vop_norename(struct vop_rename_args *);
74 static int	vop_nostrategy(struct vop_strategy_args *);
75 static int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
76 				char *dirbuf, int dirbuflen, off_t *off,
77 				char **cpos, int *len, int *eofflag,
78 				struct thread *td);
79 static int	dirent_exists(struct vnode *vp, const char *dirname,
80 			      struct thread *td);
81 
82 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
83 
84 static int vop_stdis_text(struct vop_is_text_args *ap);
85 static int vop_stdunset_text(struct vop_unset_text_args *ap);
86 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap);
87 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap);
88 static int vop_stdfdatasync(struct vop_fdatasync_args *ap);
89 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap);
90 static int vop_stdioctl(struct vop_ioctl_args *ap);
91 
92 /*
93  * This vnode table stores what we want to do if the filesystem doesn't
94  * implement a particular VOP.
95  *
96  * If there is no specific entry here, we will return EOPNOTSUPP.
97  *
98  * Note that every filesystem has to implement either vop_access
99  * or vop_accessx; failing to do so will result in immediate crash
100  * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
101  * which calls vop_stdaccess() etc.
102  */
103 
104 struct vop_vector default_vnodeops = {
105 	.vop_default =		NULL,
106 	.vop_bypass =		VOP_EOPNOTSUPP,
107 
108 	.vop_access =		vop_stdaccess,
109 	.vop_accessx =		vop_stdaccessx,
110 	.vop_advise =		vop_stdadvise,
111 	.vop_advlock =		vop_stdadvlock,
112 	.vop_advlockasync =	vop_stdadvlockasync,
113 	.vop_advlockpurge =	vop_stdadvlockpurge,
114 	.vop_allocate =		vop_stdallocate,
115 	.vop_bmap =		vop_stdbmap,
116 	.vop_close =		VOP_NULL,
117 	.vop_fsync =		VOP_NULL,
118 	.vop_fdatasync =	vop_stdfdatasync,
119 	.vop_getpages =		vop_stdgetpages,
120 	.vop_getpages_async =	vop_stdgetpages_async,
121 	.vop_getwritemount = 	vop_stdgetwritemount,
122 	.vop_inactive =		VOP_NULL,
123 	.vop_need_inactive =	vop_stdneed_inactive,
124 	.vop_ioctl =		vop_stdioctl,
125 	.vop_kqfilter =		vop_stdkqfilter,
126 	.vop_islocked =		vop_stdislocked,
127 	.vop_lock1 =		vop_stdlock,
128 	.vop_lookup =		vop_nolookup,
129 	.vop_open =		VOP_NULL,
130 	.vop_pathconf =		VOP_EINVAL,
131 	.vop_poll =		vop_nopoll,
132 	.vop_putpages =		vop_stdputpages,
133 	.vop_readlink =		VOP_EINVAL,
134 	.vop_rename =		vop_norename,
135 	.vop_revoke =		VOP_PANIC,
136 	.vop_strategy =		vop_nostrategy,
137 	.vop_unlock =		vop_stdunlock,
138 	.vop_vptocnp =		vop_stdvptocnp,
139 	.vop_vptofh =		vop_stdvptofh,
140 	.vop_unp_bind =		vop_stdunp_bind,
141 	.vop_unp_connect =	vop_stdunp_connect,
142 	.vop_unp_detach =	vop_stdunp_detach,
143 	.vop_is_text =		vop_stdis_text,
144 	.vop_set_text =		vop_stdset_text,
145 	.vop_unset_text =	vop_stdunset_text,
146 	.vop_add_writecount =	vop_stdadd_writecount,
147 	.vop_copy_file_range =	vop_stdcopy_file_range,
148 };
149 
150 /*
151  * Series of placeholder functions for various error returns for
152  * VOPs.
153  */
154 
155 int
156 vop_eopnotsupp(struct vop_generic_args *ap)
157 {
158 	/*
159 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
160 	*/
161 
162 	return (EOPNOTSUPP);
163 }
164 
165 int
166 vop_ebadf(struct vop_generic_args *ap)
167 {
168 
169 	return (EBADF);
170 }
171 
172 int
173 vop_enotty(struct vop_generic_args *ap)
174 {
175 
176 	return (ENOTTY);
177 }
178 
179 int
180 vop_einval(struct vop_generic_args *ap)
181 {
182 
183 	return (EINVAL);
184 }
185 
186 int
187 vop_enoent(struct vop_generic_args *ap)
188 {
189 
190 	return (ENOENT);
191 }
192 
193 int
194 vop_null(struct vop_generic_args *ap)
195 {
196 
197 	return (0);
198 }
199 
200 /*
201  * Helper function to panic on some bad VOPs in some filesystems.
202  */
203 int
204 vop_panic(struct vop_generic_args *ap)
205 {
206 
207 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
208 }
209 
210 /*
211  * vop_std<something> and vop_no<something> are default functions for use by
212  * filesystems that need the "default reasonable" implementation for a
213  * particular operation.
214  *
215  * The documentation for the operations they implement exists (if it exists)
216  * in the VOP_<SOMETHING>(9) manpage (all uppercase).
217  */
218 
219 /*
220  * Default vop for filesystems that do not support name lookup
221  */
222 static int
223 vop_nolookup(ap)
224 	struct vop_lookup_args /* {
225 		struct vnode *a_dvp;
226 		struct vnode **a_vpp;
227 		struct componentname *a_cnp;
228 	} */ *ap;
229 {
230 
231 	*ap->a_vpp = NULL;
232 	return (ENOTDIR);
233 }
234 
235 /*
236  * vop_norename:
237  *
238  * Handle unlock and reference counting for arguments of vop_rename
239  * for filesystems that do not implement rename operation.
240  */
241 static int
242 vop_norename(struct vop_rename_args *ap)
243 {
244 
245 	vop_rename_fail(ap);
246 	return (EOPNOTSUPP);
247 }
248 
249 /*
250  *	vop_nostrategy:
251  *
252  *	Strategy routine for VFS devices that have none.
253  *
254  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
255  *	routine.  Typically this is done for a BIO_READ strategy call.
256  *	Typically B_INVAL is assumed to already be clear prior to a write
257  *	and should not be cleared manually unless you just made the buffer
258  *	invalid.  BIO_ERROR should be cleared either way.
259  */
260 
261 static int
262 vop_nostrategy (struct vop_strategy_args *ap)
263 {
264 	printf("No strategy for buffer at %p\n", ap->a_bp);
265 	vn_printf(ap->a_vp, "vnode ");
266 	ap->a_bp->b_ioflags |= BIO_ERROR;
267 	ap->a_bp->b_error = EOPNOTSUPP;
268 	bufdone(ap->a_bp);
269 	return (EOPNOTSUPP);
270 }
271 
272 static int
273 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
274 		int dirbuflen, off_t *off, char **cpos, int *len,
275 		int *eofflag, struct thread *td)
276 {
277 	int error, reclen;
278 	struct uio uio;
279 	struct iovec iov;
280 	struct dirent *dp;
281 
282 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
283 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
284 
285 	if (*len == 0) {
286 		iov.iov_base = dirbuf;
287 		iov.iov_len = dirbuflen;
288 
289 		uio.uio_iov = &iov;
290 		uio.uio_iovcnt = 1;
291 		uio.uio_offset = *off;
292 		uio.uio_resid = dirbuflen;
293 		uio.uio_segflg = UIO_SYSSPACE;
294 		uio.uio_rw = UIO_READ;
295 		uio.uio_td = td;
296 
297 		*eofflag = 0;
298 
299 #ifdef MAC
300 		error = mac_vnode_check_readdir(td->td_ucred, vp);
301 		if (error == 0)
302 #endif
303 			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
304 		    		NULL, NULL);
305 		if (error)
306 			return (error);
307 
308 		*off = uio.uio_offset;
309 
310 		*cpos = dirbuf;
311 		*len = (dirbuflen - uio.uio_resid);
312 
313 		if (*len == 0)
314 			return (ENOENT);
315 	}
316 
317 	dp = (struct dirent *)(*cpos);
318 	reclen = dp->d_reclen;
319 	*dpp = dp;
320 
321 	/* check for malformed directory.. */
322 	if (reclen < DIRENT_MINSIZE)
323 		return (EINVAL);
324 
325 	*cpos += reclen;
326 	*len -= reclen;
327 
328 	return (0);
329 }
330 
331 /*
332  * Check if a named file exists in a given directory vnode.
333  */
334 static int
335 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
336 {
337 	char *dirbuf, *cpos;
338 	int error, eofflag, dirbuflen, len, found;
339 	off_t off;
340 	struct dirent *dp;
341 	struct vattr va;
342 
343 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
344 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
345 
346 	found = 0;
347 
348 	error = VOP_GETATTR(vp, &va, td->td_ucred);
349 	if (error)
350 		return (found);
351 
352 	dirbuflen = DEV_BSIZE;
353 	if (dirbuflen < va.va_blocksize)
354 		dirbuflen = va.va_blocksize;
355 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
356 
357 	off = 0;
358 	len = 0;
359 	do {
360 		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
361 					&cpos, &len, &eofflag, td);
362 		if (error)
363 			goto out;
364 
365 		if (dp->d_type != DT_WHT && dp->d_fileno != 0 &&
366 		    strcmp(dp->d_name, dirname) == 0) {
367 			found = 1;
368 			goto out;
369 		}
370 	} while (len > 0 || !eofflag);
371 
372 out:
373 	free(dirbuf, M_TEMP);
374 	return (found);
375 }
376 
377 int
378 vop_stdaccess(struct vop_access_args *ap)
379 {
380 
381 	KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
382 	    VAPPEND)) == 0, ("invalid bit in accmode"));
383 
384 	return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
385 }
386 
387 int
388 vop_stdaccessx(struct vop_accessx_args *ap)
389 {
390 	int error;
391 	accmode_t accmode = ap->a_accmode;
392 
393 	error = vfs_unixify_accmode(&accmode);
394 	if (error != 0)
395 		return (error);
396 
397 	if (accmode == 0)
398 		return (0);
399 
400 	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
401 }
402 
403 /*
404  * Advisory record locking support
405  */
406 int
407 vop_stdadvlock(struct vop_advlock_args *ap)
408 {
409 	struct vnode *vp;
410 	struct vattr vattr;
411 	int error;
412 
413 	vp = ap->a_vp;
414 	if (ap->a_fl->l_whence == SEEK_END) {
415 		/*
416 		 * The NFSv4 server must avoid doing a vn_lock() here, since it
417 		 * can deadlock the nfsd threads, due to a LOR.  Fortunately
418 		 * the NFSv4 server always uses SEEK_SET and this code is
419 		 * only required for the SEEK_END case.
420 		 */
421 		vn_lock(vp, LK_SHARED | LK_RETRY);
422 		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
423 		VOP_UNLOCK(vp, 0);
424 		if (error)
425 			return (error);
426 	} else
427 		vattr.va_size = 0;
428 
429 	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
430 }
431 
432 int
433 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
434 {
435 	struct vnode *vp;
436 	struct vattr vattr;
437 	int error;
438 
439 	vp = ap->a_vp;
440 	if (ap->a_fl->l_whence == SEEK_END) {
441 		/* The size argument is only needed for SEEK_END. */
442 		vn_lock(vp, LK_SHARED | LK_RETRY);
443 		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
444 		VOP_UNLOCK(vp, 0);
445 		if (error)
446 			return (error);
447 	} else
448 		vattr.va_size = 0;
449 
450 	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
451 }
452 
453 int
454 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
455 {
456 	struct vnode *vp;
457 
458 	vp = ap->a_vp;
459 	lf_purgelocks(vp, &vp->v_lockf);
460 	return (0);
461 }
462 
463 /*
464  * vop_stdpathconf:
465  *
466  * Standard implementation of POSIX pathconf, to get information about limits
467  * for a filesystem.
468  * Override per filesystem for the case where the filesystem has smaller
469  * limits.
470  */
471 int
472 vop_stdpathconf(ap)
473 	struct vop_pathconf_args /* {
474 	struct vnode *a_vp;
475 	int a_name;
476 	int *a_retval;
477 	} */ *ap;
478 {
479 
480 	switch (ap->a_name) {
481 		case _PC_ASYNC_IO:
482 			*ap->a_retval = _POSIX_ASYNCHRONOUS_IO;
483 			return (0);
484 		case _PC_PATH_MAX:
485 			*ap->a_retval = PATH_MAX;
486 			return (0);
487 		case _PC_ACL_EXTENDED:
488 		case _PC_ACL_NFS4:
489 		case _PC_CAP_PRESENT:
490 		case _PC_INF_PRESENT:
491 		case _PC_MAC_PRESENT:
492 			*ap->a_retval = 0;
493 			return (0);
494 		default:
495 			return (EINVAL);
496 	}
497 	/* NOTREACHED */
498 }
499 
500 /*
501  * Standard lock, unlock and islocked functions.
502  */
503 int
504 vop_stdlock(ap)
505 	struct vop_lock1_args /* {
506 		struct vnode *a_vp;
507 		int a_flags;
508 		char *file;
509 		int line;
510 	} */ *ap;
511 {
512 	struct vnode *vp = ap->a_vp;
513 	struct mtx *ilk;
514 
515 	ilk = VI_MTX(vp);
516 	return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags,
517 	    &ilk->lock_object, ap->a_file, ap->a_line));
518 }
519 
520 /* See above. */
521 int
522 vop_stdunlock(ap)
523 	struct vop_unlock_args /* {
524 		struct vnode *a_vp;
525 		int a_flags;
526 	} */ *ap;
527 {
528 	struct vnode *vp = ap->a_vp;
529 	struct mtx *ilk;
530 
531 	ilk = VI_MTX(vp);
532 	return (lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags,
533 	    &ilk->lock_object));
534 }
535 
536 /* See above. */
537 int
538 vop_stdislocked(ap)
539 	struct vop_islocked_args /* {
540 		struct vnode *a_vp;
541 	} */ *ap;
542 {
543 
544 	return (lockstatus(ap->a_vp->v_vnlock));
545 }
546 
547 /*
548  * Return true for select/poll.
549  */
550 int
551 vop_nopoll(ap)
552 	struct vop_poll_args /* {
553 		struct vnode *a_vp;
554 		int  a_events;
555 		struct ucred *a_cred;
556 		struct thread *a_td;
557 	} */ *ap;
558 {
559 
560 	return (poll_no_poll(ap->a_events));
561 }
562 
563 /*
564  * Implement poll for local filesystems that support it.
565  */
566 int
567 vop_stdpoll(ap)
568 	struct vop_poll_args /* {
569 		struct vnode *a_vp;
570 		int  a_events;
571 		struct ucred *a_cred;
572 		struct thread *a_td;
573 	} */ *ap;
574 {
575 	if (ap->a_events & ~POLLSTANDARD)
576 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
577 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
578 }
579 
580 /*
581  * Return our mount point, as we will take charge of the writes.
582  */
583 int
584 vop_stdgetwritemount(ap)
585 	struct vop_getwritemount_args /* {
586 		struct vnode *a_vp;
587 		struct mount **a_mpp;
588 	} */ *ap;
589 {
590 	struct mount *mp;
591 	struct vnode *vp;
592 
593 	/*
594 	 * Note that having a reference does not prevent forced unmount from
595 	 * setting ->v_mount to NULL after the lock gets released. This is of
596 	 * no consequence for typical consumers (most notably vn_start_write)
597 	 * since in this case the vnode is VI_DOOMED. Unmount might have
598 	 * progressed far enough that its completion is only delayed by the
599 	 * reference obtained here. The consumer only needs to concern itself
600 	 * with releasing it.
601 	 */
602 	vp = ap->a_vp;
603 	mp = vp->v_mount;
604 	MNT_ILOCK(mp);
605 	if (mp != vp->v_mount) {
606 		MNT_IUNLOCK(mp);
607 		mp = NULL;
608 		goto out;
609 	}
610 	MNT_REF(mp);
611 	MNT_IUNLOCK(mp);
612 out:
613 	*(ap->a_mpp) = mp;
614 	return (0);
615 }
616 
617 /*
618  * If the file system doesn't implement VOP_BMAP, then return sensible defaults:
619  * - Return the vnode's bufobj instead of any underlying device's bufobj
620  * - Calculate the physical block number as if there were equal size
621  *   consecutive blocks, but
622  * - Report no contiguous runs of blocks.
623  */
624 int
625 vop_stdbmap(ap)
626 	struct vop_bmap_args /* {
627 		struct vnode *a_vp;
628 		daddr_t  a_bn;
629 		struct bufobj **a_bop;
630 		daddr_t *a_bnp;
631 		int *a_runp;
632 		int *a_runb;
633 	} */ *ap;
634 {
635 
636 	if (ap->a_bop != NULL)
637 		*ap->a_bop = &ap->a_vp->v_bufobj;
638 	if (ap->a_bnp != NULL)
639 		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
640 	if (ap->a_runp != NULL)
641 		*ap->a_runp = 0;
642 	if (ap->a_runb != NULL)
643 		*ap->a_runb = 0;
644 	return (0);
645 }
646 
647 int
648 vop_stdfsync(ap)
649 	struct vop_fsync_args /* {
650 		struct vnode *a_vp;
651 		int a_waitfor;
652 		struct thread *a_td;
653 	} */ *ap;
654 {
655 
656 	return (vn_fsync_buf(ap->a_vp, ap->a_waitfor));
657 }
658 
659 static int
660 vop_stdfdatasync(struct vop_fdatasync_args *ap)
661 {
662 
663 	return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td));
664 }
665 
666 int
667 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap)
668 {
669 
670 	return (vn_fsync_buf(ap->a_vp, MNT_WAIT));
671 }
672 
673 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
674 int
675 vop_stdgetpages(ap)
676 	struct vop_getpages_args /* {
677 		struct vnode *a_vp;
678 		vm_page_t *a_m;
679 		int a_count;
680 		int *a_rbehind;
681 		int *a_rahead;
682 	} */ *ap;
683 {
684 
685 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
686 	    ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL);
687 }
688 
689 static int
690 vop_stdgetpages_async(struct vop_getpages_async_args *ap)
691 {
692 	int error;
693 
694 	error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
695 	    ap->a_rahead);
696 	ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
697 	return (error);
698 }
699 
700 int
701 vop_stdkqfilter(struct vop_kqfilter_args *ap)
702 {
703 	return vfs_kqfilter(ap);
704 }
705 
706 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
707 int
708 vop_stdputpages(ap)
709 	struct vop_putpages_args /* {
710 		struct vnode *a_vp;
711 		vm_page_t *a_m;
712 		int a_count;
713 		int a_sync;
714 		int *a_rtvals;
715 	} */ *ap;
716 {
717 
718 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
719 	     ap->a_sync, ap->a_rtvals);
720 }
721 
722 int
723 vop_stdvptofh(struct vop_vptofh_args *ap)
724 {
725 	return (EOPNOTSUPP);
726 }
727 
728 int
729 vop_stdvptocnp(struct vop_vptocnp_args *ap)
730 {
731 	struct vnode *vp = ap->a_vp;
732 	struct vnode **dvp = ap->a_vpp;
733 	struct ucred *cred = ap->a_cred;
734 	char *buf = ap->a_buf;
735 	int *buflen = ap->a_buflen;
736 	char *dirbuf, *cpos;
737 	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
738 	off_t off;
739 	ino_t fileno;
740 	struct vattr va;
741 	struct nameidata nd;
742 	struct thread *td;
743 	struct dirent *dp;
744 	struct vnode *mvp;
745 
746 	i = *buflen;
747 	error = 0;
748 	covered = 0;
749 	td = curthread;
750 
751 	if (vp->v_type != VDIR)
752 		return (ENOENT);
753 
754 	error = VOP_GETATTR(vp, &va, cred);
755 	if (error)
756 		return (error);
757 
758 	VREF(vp);
759 	locked = VOP_ISLOCKED(vp);
760 	VOP_UNLOCK(vp, 0);
761 	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE,
762 	    "..", vp, td);
763 	flags = FREAD;
764 	error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
765 	if (error) {
766 		vn_lock(vp, locked | LK_RETRY);
767 		return (error);
768 	}
769 	NDFREE(&nd, NDF_ONLY_PNBUF);
770 
771 	mvp = *dvp = nd.ni_vp;
772 
773 	if (vp->v_mount != (*dvp)->v_mount &&
774 	    ((*dvp)->v_vflag & VV_ROOT) &&
775 	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
776 		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
777 		VREF(mvp);
778 		VOP_UNLOCK(mvp, 0);
779 		vn_close(mvp, FREAD, cred, td);
780 		VREF(*dvp);
781 		vn_lock(*dvp, LK_SHARED | LK_RETRY);
782 		covered = 1;
783 	}
784 
785 	fileno = va.va_fileid;
786 
787 	dirbuflen = DEV_BSIZE;
788 	if (dirbuflen < va.va_blocksize)
789 		dirbuflen = va.va_blocksize;
790 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
791 
792 	if ((*dvp)->v_type != VDIR) {
793 		error = ENOENT;
794 		goto out;
795 	}
796 
797 	off = 0;
798 	len = 0;
799 	do {
800 		/* call VOP_READDIR of parent */
801 		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
802 					&cpos, &len, &eofflag, td);
803 		if (error)
804 			goto out;
805 
806 		if ((dp->d_type != DT_WHT) &&
807 		    (dp->d_fileno == fileno)) {
808 			if (covered) {
809 				VOP_UNLOCK(*dvp, 0);
810 				vn_lock(mvp, LK_SHARED | LK_RETRY);
811 				if (dirent_exists(mvp, dp->d_name, td)) {
812 					error = ENOENT;
813 					VOP_UNLOCK(mvp, 0);
814 					vn_lock(*dvp, LK_SHARED | LK_RETRY);
815 					goto out;
816 				}
817 				VOP_UNLOCK(mvp, 0);
818 				vn_lock(*dvp, LK_SHARED | LK_RETRY);
819 			}
820 			i -= dp->d_namlen;
821 
822 			if (i < 0) {
823 				error = ENOMEM;
824 				goto out;
825 			}
826 			if (dp->d_namlen == 1 && dp->d_name[0] == '.') {
827 				error = ENOENT;
828 			} else {
829 				bcopy(dp->d_name, buf + i, dp->d_namlen);
830 				error = 0;
831 			}
832 			goto out;
833 		}
834 	} while (len > 0 || !eofflag);
835 	error = ENOENT;
836 
837 out:
838 	free(dirbuf, M_TEMP);
839 	if (!error) {
840 		*buflen = i;
841 		vref(*dvp);
842 	}
843 	if (covered) {
844 		vput(*dvp);
845 		vrele(mvp);
846 	} else {
847 		VOP_UNLOCK(mvp, 0);
848 		vn_close(mvp, FREAD, cred, td);
849 	}
850 	vn_lock(vp, locked | LK_RETRY);
851 	return (error);
852 }
853 
854 int
855 vop_stdallocate(struct vop_allocate_args *ap)
856 {
857 #ifdef __notyet__
858 	struct statfs *sfs;
859 	off_t maxfilesize = 0;
860 #endif
861 	struct iovec aiov;
862 	struct vattr vattr, *vap;
863 	struct uio auio;
864 	off_t fsize, len, cur, offset;
865 	uint8_t *buf;
866 	struct thread *td;
867 	struct vnode *vp;
868 	size_t iosize;
869 	int error;
870 
871 	buf = NULL;
872 	error = 0;
873 	td = curthread;
874 	vap = &vattr;
875 	vp = ap->a_vp;
876 	len = *ap->a_len;
877 	offset = *ap->a_offset;
878 
879 	error = VOP_GETATTR(vp, vap, td->td_ucred);
880 	if (error != 0)
881 		goto out;
882 	fsize = vap->va_size;
883 	iosize = vap->va_blocksize;
884 	if (iosize == 0)
885 		iosize = BLKDEV_IOSIZE;
886 	if (iosize > MAXPHYS)
887 		iosize = MAXPHYS;
888 	buf = malloc(iosize, M_TEMP, M_WAITOK);
889 
890 #ifdef __notyet__
891 	/*
892 	 * Check if the filesystem sets f_maxfilesize; if not use
893 	 * VOP_SETATTR to perform the check.
894 	 */
895 	sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
896 	error = VFS_STATFS(vp->v_mount, sfs, td);
897 	if (error == 0)
898 		maxfilesize = sfs->f_maxfilesize;
899 	free(sfs, M_STATFS);
900 	if (error != 0)
901 		goto out;
902 	if (maxfilesize) {
903 		if (offset > maxfilesize || len > maxfilesize ||
904 		    offset + len > maxfilesize) {
905 			error = EFBIG;
906 			goto out;
907 		}
908 	} else
909 #endif
910 	if (offset + len > vap->va_size) {
911 		/*
912 		 * Test offset + len against the filesystem's maxfilesize.
913 		 */
914 		VATTR_NULL(vap);
915 		vap->va_size = offset + len;
916 		error = VOP_SETATTR(vp, vap, td->td_ucred);
917 		if (error != 0)
918 			goto out;
919 		VATTR_NULL(vap);
920 		vap->va_size = fsize;
921 		error = VOP_SETATTR(vp, vap, td->td_ucred);
922 		if (error != 0)
923 			goto out;
924 	}
925 
926 	for (;;) {
927 		/*
928 		 * Read and write back anything below the nominal file
929 		 * size.  There's currently no way outside the filesystem
930 		 * to know whether this area is sparse or not.
931 		 */
932 		cur = iosize;
933 		if ((offset % iosize) != 0)
934 			cur -= (offset % iosize);
935 		if (cur > len)
936 			cur = len;
937 		if (offset < fsize) {
938 			aiov.iov_base = buf;
939 			aiov.iov_len = cur;
940 			auio.uio_iov = &aiov;
941 			auio.uio_iovcnt = 1;
942 			auio.uio_offset = offset;
943 			auio.uio_resid = cur;
944 			auio.uio_segflg = UIO_SYSSPACE;
945 			auio.uio_rw = UIO_READ;
946 			auio.uio_td = td;
947 			error = VOP_READ(vp, &auio, 0, td->td_ucred);
948 			if (error != 0)
949 				break;
950 			if (auio.uio_resid > 0) {
951 				bzero(buf + cur - auio.uio_resid,
952 				    auio.uio_resid);
953 			}
954 		} else {
955 			bzero(buf, cur);
956 		}
957 
958 		aiov.iov_base = buf;
959 		aiov.iov_len = cur;
960 		auio.uio_iov = &aiov;
961 		auio.uio_iovcnt = 1;
962 		auio.uio_offset = offset;
963 		auio.uio_resid = cur;
964 		auio.uio_segflg = UIO_SYSSPACE;
965 		auio.uio_rw = UIO_WRITE;
966 		auio.uio_td = td;
967 
968 		error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
969 		if (error != 0)
970 			break;
971 
972 		len -= cur;
973 		offset += cur;
974 		if (len == 0)
975 			break;
976 		if (should_yield())
977 			break;
978 	}
979 
980  out:
981 	*ap->a_len = len;
982 	*ap->a_offset = offset;
983 	free(buf, M_TEMP);
984 	return (error);
985 }
986 
987 int
988 vop_stdadvise(struct vop_advise_args *ap)
989 {
990 	struct vnode *vp;
991 	struct bufobj *bo;
992 	daddr_t startn, endn;
993 	off_t bstart, bend, start, end;
994 	int bsize, error;
995 
996 	vp = ap->a_vp;
997 	switch (ap->a_advice) {
998 	case POSIX_FADV_WILLNEED:
999 		/*
1000 		 * Do nothing for now.  Filesystems should provide a
1001 		 * custom method which starts an asynchronous read of
1002 		 * the requested region.
1003 		 */
1004 		error = 0;
1005 		break;
1006 	case POSIX_FADV_DONTNEED:
1007 		error = 0;
1008 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1009 		if (vp->v_iflag & VI_DOOMED) {
1010 			VOP_UNLOCK(vp, 0);
1011 			break;
1012 		}
1013 
1014 		/*
1015 		 * Round to block boundaries (and later possibly further to
1016 		 * page boundaries).  Applications cannot reasonably be aware
1017 		 * of the boundaries, and the rounding must be to expand at
1018 		 * both extremities to cover enough.  It still doesn't cover
1019 		 * read-ahead.  For partial blocks, this gives unnecessary
1020 		 * discarding of buffers but is efficient enough since the
1021 		 * pages usually remain in VMIO for some time.
1022 		 */
1023 		bsize = vp->v_bufobj.bo_bsize;
1024 		bstart = rounddown(ap->a_start, bsize);
1025 		bend = roundup(ap->a_end, bsize);
1026 
1027 		/*
1028 		 * Deactivate pages in the specified range from the backing VM
1029 		 * object.  Pages that are resident in the buffer cache will
1030 		 * remain wired until their corresponding buffers are released
1031 		 * below.
1032 		 */
1033 		if (vp->v_object != NULL) {
1034 			start = trunc_page(bstart);
1035 			end = round_page(bend);
1036 			VM_OBJECT_RLOCK(vp->v_object);
1037 			vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start),
1038 			    OFF_TO_IDX(end));
1039 			VM_OBJECT_RUNLOCK(vp->v_object);
1040 		}
1041 
1042 		bo = &vp->v_bufobj;
1043 		BO_RLOCK(bo);
1044 		startn = bstart / bsize;
1045 		endn = bend / bsize;
1046 		error = bnoreuselist(&bo->bo_clean, bo, startn, endn);
1047 		if (error == 0)
1048 			error = bnoreuselist(&bo->bo_dirty, bo, startn, endn);
1049 		BO_RUNLOCK(bo);
1050 		VOP_UNLOCK(vp, 0);
1051 		break;
1052 	default:
1053 		error = EINVAL;
1054 		break;
1055 	}
1056 	return (error);
1057 }
1058 
1059 int
1060 vop_stdunp_bind(struct vop_unp_bind_args *ap)
1061 {
1062 
1063 	ap->a_vp->v_unpcb = ap->a_unpcb;
1064 	return (0);
1065 }
1066 
1067 int
1068 vop_stdunp_connect(struct vop_unp_connect_args *ap)
1069 {
1070 
1071 	*ap->a_unpcb = ap->a_vp->v_unpcb;
1072 	return (0);
1073 }
1074 
1075 int
1076 vop_stdunp_detach(struct vop_unp_detach_args *ap)
1077 {
1078 
1079 	ap->a_vp->v_unpcb = NULL;
1080 	return (0);
1081 }
1082 
1083 static int
1084 vop_stdis_text(struct vop_is_text_args *ap)
1085 {
1086 
1087 	return (ap->a_vp->v_writecount < 0);
1088 }
1089 
1090 int
1091 vop_stdset_text(struct vop_set_text_args *ap)
1092 {
1093 	struct vnode *vp;
1094 	struct mount *mp;
1095 	int error;
1096 
1097 	vp = ap->a_vp;
1098 	VI_LOCK(vp);
1099 	if (vp->v_writecount > 0) {
1100 		error = ETXTBSY;
1101 	} else {
1102 		/*
1103 		 * If requested by fs, keep a use reference to the
1104 		 * vnode until the last text reference is released.
1105 		 */
1106 		mp = vp->v_mount;
1107 		if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 &&
1108 		    vp->v_writecount == 0) {
1109 			vp->v_iflag |= VI_TEXT_REF;
1110 			vrefl(vp);
1111 		}
1112 
1113 		vp->v_writecount--;
1114 		error = 0;
1115 	}
1116 	VI_UNLOCK(vp);
1117 	return (error);
1118 }
1119 
1120 static int
1121 vop_stdunset_text(struct vop_unset_text_args *ap)
1122 {
1123 	struct vnode *vp;
1124 	int error;
1125 	bool last;
1126 
1127 	vp = ap->a_vp;
1128 	last = false;
1129 	VI_LOCK(vp);
1130 	if (vp->v_writecount < 0) {
1131 		if ((vp->v_iflag & VI_TEXT_REF) != 0 &&
1132 		    vp->v_writecount == -1) {
1133 			last = true;
1134 			vp->v_iflag &= ~VI_TEXT_REF;
1135 		}
1136 		vp->v_writecount++;
1137 		error = 0;
1138 	} else {
1139 		error = EINVAL;
1140 	}
1141 	VI_UNLOCK(vp);
1142 	if (last)
1143 		vunref(vp);
1144 	return (error);
1145 }
1146 
1147 static int
1148 vop_stdadd_writecount(struct vop_add_writecount_args *ap)
1149 {
1150 	struct vnode *vp;
1151 	int error;
1152 
1153 	vp = ap->a_vp;
1154 	VI_LOCK_FLAGS(vp, MTX_DUPOK);
1155 	if (vp->v_writecount < 0) {
1156 		error = ETXTBSY;
1157 	} else {
1158 		VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
1159 		    ("neg writecount increment %d", ap->a_inc));
1160 		vp->v_writecount += ap->a_inc;
1161 		error = 0;
1162 	}
1163 	VI_UNLOCK(vp);
1164 	return (error);
1165 }
1166 
1167 int
1168 vop_stdneed_inactive(struct vop_need_inactive_args *ap)
1169 {
1170 
1171 	return (1);
1172 }
1173 
1174 static int
1175 vop_stdioctl(struct vop_ioctl_args *ap)
1176 {
1177 	struct vnode *vp;
1178 	struct vattr va;
1179 	off_t *offp;
1180 	int error;
1181 
1182 	switch (ap->a_command) {
1183 	case FIOSEEKDATA:
1184 	case FIOSEEKHOLE:
1185 		vp = ap->a_vp;
1186 		error = vn_lock(vp, LK_SHARED);
1187 		if (error != 0)
1188 			return (EBADF);
1189 		if (vp->v_type == VREG)
1190 			error = VOP_GETATTR(vp, &va, ap->a_cred);
1191 		else
1192 			error = ENOTTY;
1193 		if (error == 0) {
1194 			offp = ap->a_data;
1195 			if (*offp < 0 || *offp >= va.va_size)
1196 				error = ENXIO;
1197 			else if (ap->a_command == FIOSEEKHOLE)
1198 				*offp = va.va_size;
1199 		}
1200 		VOP_UNLOCK(vp, 0);
1201 		break;
1202 	default:
1203 		error = ENOTTY;
1204 		break;
1205 	}
1206 	return (error);
1207 }
1208 
1209 /*
1210  * vfs default ops
1211  * used to fill the vfs function table to get reasonable default return values.
1212  */
1213 int
1214 vfs_stdroot (mp, flags, vpp)
1215 	struct mount *mp;
1216 	int flags;
1217 	struct vnode **vpp;
1218 {
1219 
1220 	return (EOPNOTSUPP);
1221 }
1222 
1223 int
1224 vfs_stdstatfs (mp, sbp)
1225 	struct mount *mp;
1226 	struct statfs *sbp;
1227 {
1228 
1229 	return (EOPNOTSUPP);
1230 }
1231 
1232 int
1233 vfs_stdquotactl (mp, cmds, uid, arg)
1234 	struct mount *mp;
1235 	int cmds;
1236 	uid_t uid;
1237 	void *arg;
1238 {
1239 
1240 	return (EOPNOTSUPP);
1241 }
1242 
1243 int
1244 vfs_stdsync(mp, waitfor)
1245 	struct mount *mp;
1246 	int waitfor;
1247 {
1248 	struct vnode *vp, *mvp;
1249 	struct thread *td;
1250 	int error, lockreq, allerror = 0;
1251 
1252 	td = curthread;
1253 	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1254 	if (waitfor != MNT_WAIT)
1255 		lockreq |= LK_NOWAIT;
1256 	/*
1257 	 * Force stale buffer cache information to be flushed.
1258 	 */
1259 loop:
1260 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1261 		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1262 			VI_UNLOCK(vp);
1263 			continue;
1264 		}
1265 		if ((error = vget(vp, lockreq, td)) != 0) {
1266 			if (error == ENOENT) {
1267 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1268 				goto loop;
1269 			}
1270 			continue;
1271 		}
1272 		error = VOP_FSYNC(vp, waitfor, td);
1273 		if (error)
1274 			allerror = error;
1275 		vput(vp);
1276 	}
1277 	return (allerror);
1278 }
1279 
1280 int
1281 vfs_stdnosync (mp, waitfor)
1282 	struct mount *mp;
1283 	int waitfor;
1284 {
1285 
1286 	return (0);
1287 }
1288 
1289 static int
1290 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap)
1291 {
1292 	int error;
1293 
1294 	error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp,
1295 	    ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred,
1296 	    ap->a_outcred, ap->a_fsizetd);
1297 	return (error);
1298 }
1299 
1300 int
1301 vfs_stdvget (mp, ino, flags, vpp)
1302 	struct mount *mp;
1303 	ino_t ino;
1304 	int flags;
1305 	struct vnode **vpp;
1306 {
1307 
1308 	return (EOPNOTSUPP);
1309 }
1310 
1311 int
1312 vfs_stdfhtovp (mp, fhp, flags, vpp)
1313 	struct mount *mp;
1314 	struct fid *fhp;
1315 	int flags;
1316 	struct vnode **vpp;
1317 {
1318 
1319 	return (EOPNOTSUPP);
1320 }
1321 
1322 int
1323 vfs_stdinit (vfsp)
1324 	struct vfsconf *vfsp;
1325 {
1326 
1327 	return (0);
1328 }
1329 
1330 int
1331 vfs_stduninit (vfsp)
1332 	struct vfsconf *vfsp;
1333 {
1334 
1335 	return(0);
1336 }
1337 
1338 int
1339 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
1340 	struct mount *mp;
1341 	int cmd;
1342 	struct vnode *filename_vp;
1343 	int attrnamespace;
1344 	const char *attrname;
1345 {
1346 
1347 	if (filename_vp != NULL)
1348 		VOP_UNLOCK(filename_vp, 0);
1349 	return (EOPNOTSUPP);
1350 }
1351 
1352 int
1353 vfs_stdsysctl(mp, op, req)
1354 	struct mount *mp;
1355 	fsctlop_t op;
1356 	struct sysctl_req *req;
1357 {
1358 
1359 	return (EOPNOTSUPP);
1360 }
1361 
1362 static vop_bypass_t *
1363 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a)
1364 {
1365 
1366 	return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset));
1367 }
1368 
1369 int
1370 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a)
1371 {
1372 	vop_bypass_t *bp;
1373 	int prev_stops, rc;
1374 
1375 	for (; vop != NULL; vop = vop->vop_default) {
1376 		bp = bp_by_off(vop, a);
1377 		if (bp != NULL)
1378 			break;
1379 
1380 		/*
1381 		 * Bypass is not really supported.  It is done for
1382 		 * fallback to unimplemented vops in the default
1383 		 * vector.
1384 		 */
1385 		bp = vop->vop_bypass;
1386 		if (bp != NULL)
1387 			break;
1388 	}
1389 	MPASS(bp != NULL);
1390 
1391 	prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT);
1392 	rc = bp(a);
1393 	sigallowstop(prev_stops);
1394 	return (rc);
1395 }
1396