xref: /freebsd/sys/kern/vfs_default.c (revision f35e5d0ef0a10ebda81a076bbd838d12b916dab5)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *
39  * $FreeBSD$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/buf.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/unistd.h>
50 #include <sys/vnode.h>
51 #include <sys/poll.h>
52 
53 static int vop_nostrategy __P((struct vop_strategy_args *));
54 
55 /*
56  * This vnode table stores what we want to do if the filesystem doesn't
57  * implement a particular VOP.
58  *
59  * If there is no specific entry here, we will return EOPNOTSUPP.
60  *
61  */
62 
63 vop_t **default_vnodeop_p;
64 static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
65 	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
66 	{ &vop_abortop_desc,		(vop_t *) vop_null },
67 	{ &vop_advlock_desc,		(vop_t *) vop_einval },
68 	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
69 	{ &vop_close_desc,		(vop_t *) vop_null },
70 	{ &vop_fsync_desc,		(vop_t *) vop_null },
71 	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
72 	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
73 	{ &vop_lease_desc,		(vop_t *) vop_null },
74 	{ &vop_lock_desc,		(vop_t *) vop_nolock },
75 	{ &vop_mmap_desc,		(vop_t *) vop_einval },
76 	{ &vop_open_desc,		(vop_t *) vop_null },
77 	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
78 	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
79 	{ &vop_readlink_desc,		(vop_t *) vop_einval },
80 	{ &vop_reallocblks_desc,	(vop_t *) vop_eopnotsupp },
81 	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
82 	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
83 	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
84 	{ NULL, NULL }
85 };
86 
87 static struct vnodeopv_desc default_vnodeop_opv_desc =
88         { &default_vnodeop_p, default_vnodeop_entries };
89 
90 VNODEOP_SET(default_vnodeop_opv_desc);
91 
92 int
93 vop_eopnotsupp(struct vop_generic_args *ap)
94 {
95 	/*
96 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
97 	*/
98 
99 	return (EOPNOTSUPP);
100 }
101 
102 int
103 vop_ebadf(struct vop_generic_args *ap)
104 {
105 
106 	return (EBADF);
107 }
108 
109 int
110 vop_enotty(struct vop_generic_args *ap)
111 {
112 
113 	return (ENOTTY);
114 }
115 
116 int
117 vop_einval(struct vop_generic_args *ap)
118 {
119 
120 	return (EINVAL);
121 }
122 
123 int
124 vop_null(struct vop_generic_args *ap)
125 {
126 
127 	return (0);
128 }
129 
130 int
131 vop_defaultop(struct vop_generic_args *ap)
132 {
133 
134 	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
135 }
136 
137 int
138 vop_panic(struct vop_generic_args *ap)
139 {
140 
141 	panic("illegal vnode op called");
142 }
143 
144 /*
145  *	vop_nostrategy:
146  *
147  *	Strategy routine for VFS devices that have none.
148  *
149  *	B_ERROR and B_INVAL must be cleared prior to calling any strategy
150  *	routine.  Typically this is done for a B_READ strategy call.  Typically
151  *	B_INVAL is assumed to already be clear prior to a write and should not
152  *	be cleared manually unless you just made the buffer invalid.  B_ERROR
153  *	should be cleared either way.
154  */
155 
156 static int
157 vop_nostrategy (struct vop_strategy_args *ap)
158 {
159 	printf("No strategy for buffer at %p\n", ap->a_bp);
160 	vprint("", ap->a_vp);
161 	vprint("", ap->a_bp->b_vp);
162 	ap->a_bp->b_flags |= B_ERROR;
163 	ap->a_bp->b_error = EOPNOTSUPP;
164 	biodone(ap->a_bp);
165 	return (EOPNOTSUPP);
166 }
167 
168 int
169 vop_stdpathconf(ap)
170 	struct vop_pathconf_args /* {
171 	struct vnode *a_vp;
172 	int a_name;
173 	int *a_retval;
174 	} */ *ap;
175 {
176 
177 	switch (ap->a_name) {
178 		case _PC_LINK_MAX:
179 			*ap->a_retval = LINK_MAX;
180 			return (0);
181 		case _PC_MAX_CANON:
182 			*ap->a_retval = MAX_CANON;
183 			return (0);
184 		case _PC_MAX_INPUT:
185 			*ap->a_retval = MAX_INPUT;
186 			return (0);
187 		case _PC_PIPE_BUF:
188 			*ap->a_retval = PIPE_BUF;
189 			return (0);
190 		case _PC_CHOWN_RESTRICTED:
191 			*ap->a_retval = 1;
192 			return (0);
193 		case _PC_VDISABLE:
194 			*ap->a_retval = _POSIX_VDISABLE;
195 			return (0);
196 		default:
197 			return (EINVAL);
198 	}
199 	/* NOTREACHED */
200 }
201 
202 /*
203  * Standard lock, unlock and islocked functions.
204  *
205  * These depend on the lock structure being the first element in the
206  * inode, ie: vp->v_data points to the the lock!
207  */
208 int
209 vop_stdlock(ap)
210 	struct vop_lock_args /* {
211 		struct vnode *a_vp;
212 		int a_flags;
213 		struct proc *a_p;
214 	} */ *ap;
215 {
216 	struct lock *l;
217 
218 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
219 		if (ap->a_flags & LK_INTERLOCK)
220 			simple_unlock(&ap->a_vp->v_interlock);
221 		return 0;
222 	}
223 
224 #ifndef	DEBUG_LOCKS
225 	return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p));
226 #else
227 	return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p,
228 	    "vop_stdlock", ap->a_vp->filename, ap->a_vp->line));
229 #endif
230 }
231 
232 int
233 vop_stdunlock(ap)
234 	struct vop_unlock_args /* {
235 		struct vnode *a_vp;
236 		int a_flags;
237 		struct proc *a_p;
238 	} */ *ap;
239 {
240 	struct lock *l;
241 
242 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
243 		if (ap->a_flags & LK_INTERLOCK)
244 			simple_unlock(&ap->a_vp->v_interlock);
245 		return 0;
246 	}
247 
248 	return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock,
249 	    ap->a_p));
250 }
251 
252 int
253 vop_stdislocked(ap)
254 	struct vop_islocked_args /* {
255 		struct vnode *a_vp;
256 	} */ *ap;
257 {
258 	struct lock *l;
259 
260 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
261 		return 0;
262 
263 	return (lockstatus(l));
264 }
265 
266 /*
267  * Return true for select/poll.
268  */
269 int
270 vop_nopoll(ap)
271 	struct vop_poll_args /* {
272 		struct vnode *a_vp;
273 		int  a_events;
274 		struct ucred *a_cred;
275 		struct proc *a_p;
276 	} */ *ap;
277 {
278 	/*
279 	 * Return true for read/write.  If the user asked for something
280 	 * special, return POLLNVAL, so that clients have a way of
281 	 * determining reliably whether or not the extended
282 	 * functionality is present without hard-coding knowledge
283 	 * of specific filesystem implementations.
284 	 */
285 	if (ap->a_events & ~POLLSTANDARD)
286 		return (POLLNVAL);
287 
288 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
289 }
290 
291 /*
292  * Implement poll for local filesystems that support it.
293  */
294 int
295 vop_stdpoll(ap)
296 	struct vop_poll_args /* {
297 		struct vnode *a_vp;
298 		int  a_events;
299 		struct ucred *a_cred;
300 		struct proc *a_p;
301 	} */ *ap;
302 {
303 	if ((ap->a_events & ~POLLSTANDARD) == 0)
304 		return (ap->a_events & (POLLRDNORM|POLLWRNORM));
305 	return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
306 }
307 
308 int
309 vop_stdbwrite(ap)
310 	struct vop_bwrite_args *ap;
311 {
312 	return (bwrite(ap->a_bp));
313 }
314 
315 /*
316  * Stubs to use when there is no locking to be done on the underlying object.
317  * A minimal shared lock is necessary to ensure that the underlying object
318  * is not revoked while an operation is in progress. So, an active shared
319  * count is maintained in an auxillary vnode lock structure.
320  */
321 int
322 vop_sharedlock(ap)
323 	struct vop_lock_args /* {
324 		struct vnode *a_vp;
325 		int a_flags;
326 		struct proc *a_p;
327 	} */ *ap;
328 {
329 	/*
330 	 * This code cannot be used until all the non-locking filesystems
331 	 * (notably NFS) are converted to properly lock and release nodes.
332 	 * Also, certain vnode operations change the locking state within
333 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
334 	 * and symlink). Ideally these operations should not change the
335 	 * lock state, but should be changed to let the caller of the
336 	 * function unlock them. Otherwise all intermediate vnode layers
337 	 * (such as union, umapfs, etc) must catch these functions to do
338 	 * the necessary locking at their layer. Note that the inactive
339 	 * and lookup operations also change their lock state, but this
340 	 * cannot be avoided, so these two operations will always need
341 	 * to be handled in intermediate layers.
342 	 */
343 	struct vnode *vp = ap->a_vp;
344 	int vnflags, flags = ap->a_flags;
345 
346 	if (vp->v_vnlock == NULL) {
347 		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
348 			return (0);
349 		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
350 		    M_VNODE, M_WAITOK);
351 		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
352 	}
353 	switch (flags & LK_TYPE_MASK) {
354 	case LK_DRAIN:
355 		vnflags = LK_DRAIN;
356 		break;
357 	case LK_EXCLUSIVE:
358 #ifdef DEBUG_VFS_LOCKS
359 		/*
360 		 * Normally, we use shared locks here, but that confuses
361 		 * the locking assertions.
362 		 */
363 		vnflags = LK_EXCLUSIVE;
364 		break;
365 #endif
366 	case LK_SHARED:
367 		vnflags = LK_SHARED;
368 		break;
369 	case LK_UPGRADE:
370 	case LK_EXCLUPGRADE:
371 	case LK_DOWNGRADE:
372 		return (0);
373 	case LK_RELEASE:
374 	default:
375 		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
376 	}
377 	if (flags & LK_INTERLOCK)
378 		vnflags |= LK_INTERLOCK;
379 #ifndef	DEBUG_LOCKS
380 	return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
381 #else
382 	return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p,
383 	    "vop_sharedlock", vp->filename, vp->line));
384 #endif
385 }
386 
387 /*
388  * Stubs to use when there is no locking to be done on the underlying object.
389  * A minimal shared lock is necessary to ensure that the underlying object
390  * is not revoked while an operation is in progress. So, an active shared
391  * count is maintained in an auxillary vnode lock structure.
392  */
393 int
394 vop_nolock(ap)
395 	struct vop_lock_args /* {
396 		struct vnode *a_vp;
397 		int a_flags;
398 		struct proc *a_p;
399 	} */ *ap;
400 {
401 #ifdef notyet
402 	/*
403 	 * This code cannot be used until all the non-locking filesystems
404 	 * (notably NFS) are converted to properly lock and release nodes.
405 	 * Also, certain vnode operations change the locking state within
406 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
407 	 * and symlink). Ideally these operations should not change the
408 	 * lock state, but should be changed to let the caller of the
409 	 * function unlock them. Otherwise all intermediate vnode layers
410 	 * (such as union, umapfs, etc) must catch these functions to do
411 	 * the necessary locking at their layer. Note that the inactive
412 	 * and lookup operations also change their lock state, but this
413 	 * cannot be avoided, so these two operations will always need
414 	 * to be handled in intermediate layers.
415 	 */
416 	struct vnode *vp = ap->a_vp;
417 	int vnflags, flags = ap->a_flags;
418 
419 	if (vp->v_vnlock == NULL) {
420 		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
421 			return (0);
422 		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
423 		    M_VNODE, M_WAITOK);
424 		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
425 	}
426 	switch (flags & LK_TYPE_MASK) {
427 	case LK_DRAIN:
428 		vnflags = LK_DRAIN;
429 		break;
430 	case LK_EXCLUSIVE:
431 	case LK_SHARED:
432 		vnflags = LK_SHARED;
433 		break;
434 	case LK_UPGRADE:
435 	case LK_EXCLUPGRADE:
436 	case LK_DOWNGRADE:
437 		return (0);
438 	case LK_RELEASE:
439 	default:
440 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
441 	}
442 	if (flags & LK_INTERLOCK)
443 		vnflags |= LK_INTERLOCK;
444 	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
445 #else /* for now */
446 	/*
447 	 * Since we are not using the lock manager, we must clear
448 	 * the interlock here.
449 	 */
450 	if (ap->a_flags & LK_INTERLOCK)
451 		simple_unlock(&ap->a_vp->v_interlock);
452 	return (0);
453 #endif
454 }
455 
456 /*
457  * Do the inverse of vop_nolock, handling the interlock in a compatible way.
458  */
459 int
460 vop_nounlock(ap)
461 	struct vop_unlock_args /* {
462 		struct vnode *a_vp;
463 		int a_flags;
464 		struct proc *a_p;
465 	} */ *ap;
466 {
467 	struct vnode *vp = ap->a_vp;
468 
469 	if (vp->v_vnlock == NULL) {
470 		if (ap->a_flags & LK_INTERLOCK)
471 			simple_unlock(&ap->a_vp->v_interlock);
472 		return (0);
473 	}
474 	return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
475 		&ap->a_vp->v_interlock, ap->a_p));
476 }
477 
478 /*
479  * Return whether or not the node is in use.
480  */
481 int
482 vop_noislocked(ap)
483 	struct vop_islocked_args /* {
484 		struct vnode *a_vp;
485 	} */ *ap;
486 {
487 	struct vnode *vp = ap->a_vp;
488 
489 	if (vp->v_vnlock == NULL)
490 		return (0);
491 	return (lockstatus(vp->v_vnlock));
492 }
493 
494 /*
495  * vfs default ops
496  * used to fill the vfs fucntion table to get reasonable default return values.
497  */
498 int
499 vfs_stdmount (mp, path, data, ndp, p)
500 	struct mount *mp;
501 	char *path;
502 	caddr_t data;
503 	struct nameidata *ndp;
504 	struct proc *p;
505 {
506 	return (0);
507 }
508 
509 int
510 vfs_stdunmount (mp, mntflags, p)
511 	struct mount *mp;
512 	int mntflags;
513 	struct proc *p;
514 {
515 	return (0);
516 }
517 
518 int
519 vfs_stdroot (mp, vpp)
520 	struct mount *mp;
521 	struct vnode **vpp;
522 {
523 	return (EOPNOTSUPP);
524 }
525 
526 int
527 vfs_stdstatfs (mp, sbp, p)
528 	struct mount *mp;
529 	struct statfs *sbp;
530 	struct proc *p;
531 {
532 	return (EOPNOTSUPP);
533 }
534 
535 int
536 vfs_stdvptofh (vp, fhp)
537 	struct vnode *vp;
538 	struct fid *fhp;
539 {
540 	return (EOPNOTSUPP);
541 }
542 
543 int
544 vfs_stdstart (mp, flags, p)
545 	struct mount *mp;
546 	int flags;
547 	struct proc *p;
548 {
549 	return (0);
550 }
551 
552 int
553 vfs_stdquotactl (mp, cmds, uid, arg, p)
554 	struct mount *mp;
555 	int cmds;
556 	uid_t uid;
557 	caddr_t arg;
558 	struct proc *p;
559 {
560 	return (EOPNOTSUPP);
561 }
562 
563 int
564 vfs_stdsync (mp, waitfor, cred, p)
565 	struct mount *mp;
566 	int waitfor;
567 	struct ucred *cred;
568 	struct proc *p;
569 {
570 	return (0);
571 }
572 
573 int
574 vfs_stdvget (mp, ino, vpp)
575 	struct mount *mp;
576 	ino_t ino;
577 	struct vnode **vpp;
578 {
579 	return (EOPNOTSUPP);
580 }
581 
582 int
583 vfs_stdfhtovp (mp, fhp, vpp)
584 	struct mount *mp;
585 	struct fid *fhp;
586 	struct vnode **vpp;
587 {
588 	return (EOPNOTSUPP);
589 }
590 
591 int
592 vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
593 	struct mount *mp;
594 	struct sockaddr *nam;
595 	int *extflagsp;
596 	struct ucred **credanonp;
597 {
598 	return (EOPNOTSUPP);
599 }
600 
601 int
602 vfs_stdinit (vfsp)
603 	struct vfsconf *vfsp;
604 {
605 	return (0);
606 }
607 
608 int
609 vfs_stduninit (vfsp)
610 	struct vfsconf *vfsp;
611 {
612 	return(0);
613 }
614 
615 /* end of vfs default ops */
616