xref: /freebsd/sys/kern/vfs_default.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/event.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/mutex.h>
50 #include <sys/unistd.h>
51 #include <sys/vnode.h>
52 #include <sys/poll.h>
53 
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_extern.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
61 #include <vm/vnode_pager.h>
62 
63 static int	vop_nolookup(struct vop_lookup_args *);
64 static int	vop_nostrategy(struct vop_strategy_args *);
65 
66 /*
67  * This vnode table stores what we want to do if the filesystem doesn't
68  * implement a particular VOP.
69  *
70  * If there is no specific entry here, we will return EOPNOTSUPP.
71  *
72  */
73 
74 struct vop_vector default_vnodeops = {
75 	.vop_default =		NULL,
76 	.vop_bypass =		VOP_EOPNOTSUPP,
77 
78 	.vop_advlock =		VOP_EINVAL,
79 	.vop_bmap =		vop_stdbmap,
80 	.vop_close =		VOP_NULL,
81 	.vop_fsync =		VOP_NULL,
82 	.vop_getpages =		vop_stdgetpages,
83 	.vop_getwritemount = 	vop_stdgetwritemount,
84 	.vop_inactive =		VOP_NULL,
85 	.vop_ioctl =		VOP_ENOTTY,
86 	.vop_kqfilter =		vop_stdkqfilter,
87 	.vop_islocked =		vop_stdislocked,
88 	.vop_lease =		VOP_NULL,
89 	.vop_lock1 =		vop_stdlock,
90 	.vop_lookup =		vop_nolookup,
91 	.vop_open =		VOP_NULL,
92 	.vop_pathconf =		VOP_EINVAL,
93 	.vop_poll =		vop_nopoll,
94 	.vop_putpages =		vop_stdputpages,
95 	.vop_readlink =		VOP_EINVAL,
96 	.vop_revoke =		VOP_PANIC,
97 	.vop_strategy =		vop_nostrategy,
98 	.vop_unlock =		vop_stdunlock,
99 	.vop_vptofh =		vop_stdvptofh,
100 };
101 
102 /*
103  * Series of placeholder functions for various error returns for
104  * VOPs.
105  */
106 
107 int
108 vop_eopnotsupp(struct vop_generic_args *ap)
109 {
110 	/*
111 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
112 	*/
113 
114 	return (EOPNOTSUPP);
115 }
116 
117 int
118 vop_ebadf(struct vop_generic_args *ap)
119 {
120 
121 	return (EBADF);
122 }
123 
124 int
125 vop_enotty(struct vop_generic_args *ap)
126 {
127 
128 	return (ENOTTY);
129 }
130 
131 int
132 vop_einval(struct vop_generic_args *ap)
133 {
134 
135 	return (EINVAL);
136 }
137 
138 int
139 vop_null(struct vop_generic_args *ap)
140 {
141 
142 	return (0);
143 }
144 
145 /*
146  * Helper function to panic on some bad VOPs in some filesystems.
147  */
148 int
149 vop_panic(struct vop_generic_args *ap)
150 {
151 
152 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
153 }
154 
155 /*
156  * vop_std<something> and vop_no<something> are default functions for use by
157  * filesystems that need the "default reasonable" implementation for a
158  * particular operation.
159  *
160  * The documentation for the operations they implement exists (if it exists)
161  * in the VOP_<SOMETHING>(9) manpage (all uppercase).
162  */
163 
164 /*
165  * Default vop for filesystems that do not support name lookup
166  */
167 static int
168 vop_nolookup(ap)
169 	struct vop_lookup_args /* {
170 		struct vnode *a_dvp;
171 		struct vnode **a_vpp;
172 		struct componentname *a_cnp;
173 	} */ *ap;
174 {
175 
176 	*ap->a_vpp = NULL;
177 	return (ENOTDIR);
178 }
179 
180 /*
181  *	vop_nostrategy:
182  *
183  *	Strategy routine for VFS devices that have none.
184  *
185  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
186  *	routine.  Typically this is done for a BIO_READ strategy call.
187  *	Typically B_INVAL is assumed to already be clear prior to a write
188  *	and should not be cleared manually unless you just made the buffer
189  *	invalid.  BIO_ERROR should be cleared either way.
190  */
191 
192 static int
193 vop_nostrategy (struct vop_strategy_args *ap)
194 {
195 	printf("No strategy for buffer at %p\n", ap->a_bp);
196 	vprint("vnode", ap->a_vp);
197 	ap->a_bp->b_ioflags |= BIO_ERROR;
198 	ap->a_bp->b_error = EOPNOTSUPP;
199 	bufdone(ap->a_bp);
200 	return (EOPNOTSUPP);
201 }
202 
203 /*
204  * vop_stdpathconf:
205  *
206  * Standard implementation of POSIX pathconf, to get information about limits
207  * for a filesystem.
208  * Override per filesystem for the case where the filesystem has smaller
209  * limits.
210  */
211 int
212 vop_stdpathconf(ap)
213 	struct vop_pathconf_args /* {
214 	struct vnode *a_vp;
215 	int a_name;
216 	int *a_retval;
217 	} */ *ap;
218 {
219 
220 	switch (ap->a_name) {
221 		case _PC_NAME_MAX:
222 			*ap->a_retval = NAME_MAX;
223 			return (0);
224 		case _PC_PATH_MAX:
225 			*ap->a_retval = PATH_MAX;
226 			return (0);
227 		case _PC_LINK_MAX:
228 			*ap->a_retval = LINK_MAX;
229 			return (0);
230 		case _PC_MAX_CANON:
231 			*ap->a_retval = MAX_CANON;
232 			return (0);
233 		case _PC_MAX_INPUT:
234 			*ap->a_retval = MAX_INPUT;
235 			return (0);
236 		case _PC_PIPE_BUF:
237 			*ap->a_retval = PIPE_BUF;
238 			return (0);
239 		case _PC_CHOWN_RESTRICTED:
240 			*ap->a_retval = 1;
241 			return (0);
242 		case _PC_VDISABLE:
243 			*ap->a_retval = _POSIX_VDISABLE;
244 			return (0);
245 		default:
246 			return (EINVAL);
247 	}
248 	/* NOTREACHED */
249 }
250 
251 /*
252  * Standard lock, unlock and islocked functions.
253  */
254 int
255 vop_stdlock(ap)
256 	struct vop_lock1_args /* {
257 		struct vnode *a_vp;
258 		int a_flags;
259 		struct thread *a_td;
260 		char *file;
261 		int line;
262 	} */ *ap;
263 {
264 	struct vnode *vp = ap->a_vp;
265 
266 	return (_lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), curthread,
267 	     ap->a_file, ap->a_line));
268 }
269 
270 /* See above. */
271 int
272 vop_stdunlock(ap)
273 	struct vop_unlock_args /* {
274 		struct vnode *a_vp;
275 		int a_flags;
276 		struct thread *a_td;
277 	} */ *ap;
278 {
279 	struct vnode *vp = ap->a_vp;
280 
281 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
282 	    curthread));
283 }
284 
285 /* See above. */
286 int
287 vop_stdislocked(ap)
288 	struct vop_islocked_args /* {
289 		struct vnode *a_vp;
290 		struct thread *a_td;
291 	} */ *ap;
292 {
293 
294 	return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
295 }
296 
297 /*
298  * Return true for select/poll.
299  */
300 int
301 vop_nopoll(ap)
302 	struct vop_poll_args /* {
303 		struct vnode *a_vp;
304 		int  a_events;
305 		struct ucred *a_cred;
306 		struct thread *a_td;
307 	} */ *ap;
308 {
309 	/*
310 	 * Return true for read/write.  If the user asked for something
311 	 * special, return POLLNVAL, so that clients have a way of
312 	 * determining reliably whether or not the extended
313 	 * functionality is present without hard-coding knowledge
314 	 * of specific filesystem implementations.
315 	 * Stay in sync with kern_conf.c::no_poll().
316 	 */
317 	if (ap->a_events & ~POLLSTANDARD)
318 		return (POLLNVAL);
319 
320 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
321 }
322 
323 /*
324  * Implement poll for local filesystems that support it.
325  */
326 int
327 vop_stdpoll(ap)
328 	struct vop_poll_args /* {
329 		struct vnode *a_vp;
330 		int  a_events;
331 		struct ucred *a_cred;
332 		struct thread *a_td;
333 	} */ *ap;
334 {
335 	if (ap->a_events & ~POLLSTANDARD)
336 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
337 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
338 }
339 
340 /*
341  * Return our mount point, as we will take charge of the writes.
342  */
343 int
344 vop_stdgetwritemount(ap)
345 	struct vop_getwritemount_args /* {
346 		struct vnode *a_vp;
347 		struct mount **a_mpp;
348 	} */ *ap;
349 {
350 	struct mount *mp;
351 
352 	/*
353 	 * XXX Since this is called unlocked we may be recycled while
354 	 * attempting to ref the mount.  If this is the case or mountpoint
355 	 * will be set to NULL.  We only have to prevent this call from
356 	 * returning with a ref to an incorrect mountpoint.  It is not
357 	 * harmful to return with a ref to our previous mountpoint.
358 	 */
359 	mp = ap->a_vp->v_mount;
360 	if (mp != NULL) {
361 		vfs_ref(mp);
362 		if (mp != ap->a_vp->v_mount) {
363 			vfs_rel(mp);
364 			mp = NULL;
365 		}
366 	}
367 	*(ap->a_mpp) = mp;
368 	return (0);
369 }
370 
371 /* XXX Needs good comment and VOP_BMAP(9) manpage */
372 int
373 vop_stdbmap(ap)
374 	struct vop_bmap_args /* {
375 		struct vnode *a_vp;
376 		daddr_t  a_bn;
377 		struct bufobj **a_bop;
378 		daddr_t *a_bnp;
379 		int *a_runp;
380 		int *a_runb;
381 	} */ *ap;
382 {
383 
384 	if (ap->a_bop != NULL)
385 		*ap->a_bop = &ap->a_vp->v_bufobj;
386 	if (ap->a_bnp != NULL)
387 		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
388 	if (ap->a_runp != NULL)
389 		*ap->a_runp = 0;
390 	if (ap->a_runb != NULL)
391 		*ap->a_runb = 0;
392 	return (0);
393 }
394 
395 int
396 vop_stdfsync(ap)
397 	struct vop_fsync_args /* {
398 		struct vnode *a_vp;
399 		struct ucred *a_cred;
400 		int a_waitfor;
401 		struct thread *a_td;
402 	} */ *ap;
403 {
404 	struct vnode *vp = ap->a_vp;
405 	struct buf *bp;
406 	struct bufobj *bo;
407 	struct buf *nbp;
408 	int error = 0;
409 	int maxretry = 1000;     /* large, arbitrarily chosen */
410 
411 	VI_LOCK(vp);
412 loop1:
413 	/*
414 	 * MARK/SCAN initialization to avoid infinite loops.
415 	 */
416         TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
417                 bp->b_vflags &= ~BV_SCANNED;
418 		bp->b_error = 0;
419 	}
420 
421 	/*
422 	 * Flush all dirty buffers associated with a vnode.
423 	 */
424 loop2:
425 	TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
426 		if ((bp->b_vflags & BV_SCANNED) != 0)
427 			continue;
428 		bp->b_vflags |= BV_SCANNED;
429 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
430 			continue;
431 		VI_UNLOCK(vp);
432 		KASSERT(bp->b_bufobj == &vp->v_bufobj,
433 		    ("bp %p wrong b_bufobj %p should be %p",
434 		    bp, bp->b_bufobj, &vp->v_bufobj));
435 		if ((bp->b_flags & B_DELWRI) == 0)
436 			panic("fsync: not dirty");
437 		if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
438 			vfs_bio_awrite(bp);
439 		} else {
440 			bremfree(bp);
441 			bawrite(bp);
442 		}
443 		VI_LOCK(vp);
444 		goto loop2;
445 	}
446 
447 	/*
448 	 * If synchronous the caller expects us to completely resolve all
449 	 * dirty buffers in the system.  Wait for in-progress I/O to
450 	 * complete (which could include background bitmap writes), then
451 	 * retry if dirty blocks still exist.
452 	 */
453 	if (ap->a_waitfor == MNT_WAIT) {
454 		bo = &vp->v_bufobj;
455 		bufobj_wwait(bo, 0, 0);
456 		if (bo->bo_dirty.bv_cnt > 0) {
457 			/*
458 			 * If we are unable to write any of these buffers
459 			 * then we fail now rather than trying endlessly
460 			 * to write them out.
461 			 */
462 			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
463 				if ((error = bp->b_error) == 0)
464 					continue;
465 			if (error == 0 && --maxretry >= 0)
466 				goto loop1;
467 			error = EAGAIN;
468 		}
469 	}
470 	VI_UNLOCK(vp);
471 	if (error == EAGAIN)
472 		vprint("fsync: giving up on dirty", vp);
473 
474 	return (error);
475 }
476 
477 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
478 int
479 vop_stdgetpages(ap)
480 	struct vop_getpages_args /* {
481 		struct vnode *a_vp;
482 		vm_page_t *a_m;
483 		int a_count;
484 		int a_reqpage;
485 		vm_ooffset_t a_offset;
486 	} */ *ap;
487 {
488 
489 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
490 	    ap->a_count, ap->a_reqpage);
491 }
492 
493 int
494 vop_stdkqfilter(struct vop_kqfilter_args *ap)
495 {
496 	return vfs_kqfilter(ap);
497 }
498 
499 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
500 int
501 vop_stdputpages(ap)
502 	struct vop_putpages_args /* {
503 		struct vnode *a_vp;
504 		vm_page_t *a_m;
505 		int a_count;
506 		int a_sync;
507 		int *a_rtvals;
508 		vm_ooffset_t a_offset;
509 	} */ *ap;
510 {
511 
512 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
513 	     ap->a_sync, ap->a_rtvals);
514 }
515 
516 int
517 vop_stdvptofh(struct vop_vptofh_args *ap)
518 {
519 	return (EOPNOTSUPP);
520 }
521 
522 /*
523  * vfs default ops
524  * used to fill the vfs function table to get reasonable default return values.
525  */
526 int
527 vfs_stdroot (mp, flags, vpp, td)
528 	struct mount *mp;
529 	int flags;
530 	struct vnode **vpp;
531 	struct thread *td;
532 {
533 
534 	return (EOPNOTSUPP);
535 }
536 
537 int
538 vfs_stdstatfs (mp, sbp, td)
539 	struct mount *mp;
540 	struct statfs *sbp;
541 	struct thread *td;
542 {
543 
544 	return (EOPNOTSUPP);
545 }
546 
547 int
548 vfs_stdquotactl (mp, cmds, uid, arg, td)
549 	struct mount *mp;
550 	int cmds;
551 	uid_t uid;
552 	void *arg;
553 	struct thread *td;
554 {
555 
556 	return (EOPNOTSUPP);
557 }
558 
559 int
560 vfs_stdsync(mp, waitfor, td)
561 	struct mount *mp;
562 	int waitfor;
563 	struct thread *td;
564 {
565 	struct vnode *vp, *mvp;
566 	int error, lockreq, allerror = 0;
567 
568 	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
569 	if (waitfor != MNT_WAIT)
570 		lockreq |= LK_NOWAIT;
571 	/*
572 	 * Force stale buffer cache information to be flushed.
573 	 */
574 	MNT_ILOCK(mp);
575 loop:
576 	MNT_VNODE_FOREACH(vp, mp, mvp) {
577 
578 		VI_LOCK(vp);
579 		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
580 			VI_UNLOCK(vp);
581 			continue;
582 		}
583 		MNT_IUNLOCK(mp);
584 
585 		if ((error = vget(vp, lockreq, td)) != 0) {
586 			MNT_ILOCK(mp);
587 			if (error == ENOENT) {
588 				MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
589 				goto loop;
590 			}
591 			continue;
592 		}
593 		error = VOP_FSYNC(vp, waitfor, td);
594 		if (error)
595 			allerror = error;
596 
597 		/* Do not turn this into vput.  td is not always curthread. */
598 		VOP_UNLOCK(vp, 0);
599 		vrele(vp);
600 		MNT_ILOCK(mp);
601 	}
602 	MNT_IUNLOCK(mp);
603 	return (allerror);
604 }
605 
606 int
607 vfs_stdnosync (mp, waitfor, td)
608 	struct mount *mp;
609 	int waitfor;
610 	struct thread *td;
611 {
612 
613 	return (0);
614 }
615 
616 int
617 vfs_stdvget (mp, ino, flags, vpp)
618 	struct mount *mp;
619 	ino_t ino;
620 	int flags;
621 	struct vnode **vpp;
622 {
623 
624 	return (EOPNOTSUPP);
625 }
626 
627 int
628 vfs_stdfhtovp (mp, fhp, vpp)
629 	struct mount *mp;
630 	struct fid *fhp;
631 	struct vnode **vpp;
632 {
633 
634 	return (EOPNOTSUPP);
635 }
636 
637 int
638 vfs_stdinit (vfsp)
639 	struct vfsconf *vfsp;
640 {
641 
642 	return (0);
643 }
644 
645 int
646 vfs_stduninit (vfsp)
647 	struct vfsconf *vfsp;
648 {
649 
650 	return(0);
651 }
652 
653 int
654 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
655 	struct mount *mp;
656 	int cmd;
657 	struct vnode *filename_vp;
658 	int attrnamespace;
659 	const char *attrname;
660 	struct thread *td;
661 {
662 
663 	if (filename_vp != NULL)
664 		VOP_UNLOCK(filename_vp, 0);
665 	return (EOPNOTSUPP);
666 }
667 
668 int
669 vfs_stdsysctl(mp, op, req)
670 	struct mount *mp;
671 	fsctlop_t op;
672 	struct sysctl_req *req;
673 {
674 
675 	return (EOPNOTSUPP);
676 }
677 
678 /* end of vfs default ops */
679