xref: /freebsd/sys/kern/vfs_default.c (revision 6e8394b8baa7d5d9153ab90de6824bcd19b3b4e1)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *
39  *	$Id$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/buf.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/unistd.h>
49 #include <sys/vnode.h>
50 #include <sys/poll.h>
51 
52 static int vop_nostrategy __P((struct vop_strategy_args *));
53 
54 /*
55  * This vnode table stores what we want to do if the filesystem doesn't
56  * implement a particular VOP.
57  *
58  * If there is no specific entry here, we will return EOPNOTSUPP.
59  *
60  */
61 
62 vop_t **default_vnodeop_p;
63 static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
64 	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
65 	{ &vop_abortop_desc,		(vop_t *) vop_null },
66 	{ &vop_advlock_desc,		(vop_t *) vop_einval },
67 	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
68 	{ &vop_close_desc,		(vop_t *) vop_null },
69 	{ &vop_fsync_desc,		(vop_t *) vop_null },
70 	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
71 	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
72 	{ &vop_lease_desc,		(vop_t *) vop_null },
73 	{ &vop_lock_desc,		(vop_t *) vop_nolock },
74 	{ &vop_mmap_desc,		(vop_t *) vop_einval },
75 	{ &vop_open_desc,		(vop_t *) vop_null },
76 	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
77 	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
78 	{ &vop_readlink_desc,		(vop_t *) vop_einval },
79 	{ &vop_reallocblks_desc,	(vop_t *) vop_eopnotsupp },
80 	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
81 	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
82 	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
83 	{ NULL, NULL }
84 };
85 
86 static struct vnodeopv_desc default_vnodeop_opv_desc =
87         { &default_vnodeop_p, default_vnodeop_entries };
88 
89 VNODEOP_SET(default_vnodeop_opv_desc);
90 
91 int
92 vop_eopnotsupp(struct vop_generic_args *ap)
93 {
94 	/*
95 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
96 	*/
97 
98 	return (EOPNOTSUPP);
99 }
100 
101 int
102 vop_ebadf(struct vop_generic_args *ap)
103 {
104 
105 	return (EBADF);
106 }
107 
108 int
109 vop_enotty(struct vop_generic_args *ap)
110 {
111 
112 	return (ENOTTY);
113 }
114 
115 int
116 vop_einval(struct vop_generic_args *ap)
117 {
118 
119 	return (EINVAL);
120 }
121 
122 int
123 vop_null(struct vop_generic_args *ap)
124 {
125 
126 	return (0);
127 }
128 
129 int
130 vop_defaultop(struct vop_generic_args *ap)
131 {
132 
133 	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
134 }
135 
136 int
137 vop_panic(struct vop_generic_args *ap)
138 {
139 
140 	panic("illegal vnode op called");
141 }
142 
143 /*
144  *	vop_nostrategy:
145  *
146  *	Strategy routine for VFS devices that have none.
147  *
148  *	B_ERROR and B_INVAL must be cleared prior to calling any strategy
149  *	routine.  Typically this is done for a B_READ strategy call.  Typically
150  *	B_INVAL is assumed to already be clear prior to a write and should not
151  *	be cleared manually unless you just made the buffer invalid.  B_ERROR
152  *	should be cleared either way.
153  */
154 
155 static int
156 vop_nostrategy (struct vop_strategy_args *ap)
157 {
158 	printf("No strategy for buffer at %p\n", ap->a_bp);
159 	vprint("", ap->a_vp);
160 	vprint("", ap->a_bp->b_vp);
161 	ap->a_bp->b_flags |= B_ERROR;
162 	ap->a_bp->b_error = EOPNOTSUPP;
163 	biodone(ap->a_bp);
164 	return (EOPNOTSUPP);
165 }
166 
167 int
168 vop_stdpathconf(ap)
169 	struct vop_pathconf_args /* {
170 	struct vnode *a_vp;
171 	int a_name;
172 	int *a_retval;
173 	} */ *ap;
174 {
175 
176 	switch (ap->a_name) {
177 		case _PC_LINK_MAX:
178 			*ap->a_retval = LINK_MAX;
179 			return (0);
180 		case _PC_MAX_CANON:
181 			*ap->a_retval = MAX_CANON;
182 			return (0);
183 		case _PC_MAX_INPUT:
184 			*ap->a_retval = MAX_INPUT;
185 			return (0);
186 		case _PC_PIPE_BUF:
187 			*ap->a_retval = PIPE_BUF;
188 			return (0);
189 		case _PC_CHOWN_RESTRICTED:
190 			*ap->a_retval = 1;
191 			return (0);
192 		case _PC_VDISABLE:
193 			*ap->a_retval = _POSIX_VDISABLE;
194 			return (0);
195 		default:
196 			return (EINVAL);
197 	}
198 	/* NOTREACHED */
199 }
200 
201 /*
202  * Standard lock, unlock and islocked functions.
203  *
204  * These depend on the lock structure being the first element in the
205  * inode, ie: vp->v_data points to the the lock!
206  */
207 int
208 vop_stdlock(ap)
209 	struct vop_lock_args /* {
210 		struct vnode *a_vp;
211 		int a_flags;
212 		struct proc *a_p;
213 	} */ *ap;
214 {
215 	struct lock *l;
216 
217 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
218 		if (ap->a_flags & LK_INTERLOCK)
219 			simple_unlock(&ap->a_vp->v_interlock);
220 		return 0;
221 	}
222 
223 #ifndef	DEBUG_LOCKS
224 	return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p));
225 #else
226 	return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p,
227 	    "vop_stdlock", ap->a_vp->filename, ap->a_vp->line));
228 #endif
229 }
230 
231 int
232 vop_stdunlock(ap)
233 	struct vop_unlock_args /* {
234 		struct vnode *a_vp;
235 		int a_flags;
236 		struct proc *a_p;
237 	} */ *ap;
238 {
239 	struct lock *l;
240 
241 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
242 		if (ap->a_flags & LK_INTERLOCK)
243 			simple_unlock(&ap->a_vp->v_interlock);
244 		return 0;
245 	}
246 
247 	return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock,
248 	    ap->a_p));
249 }
250 
251 int
252 vop_stdislocked(ap)
253 	struct vop_islocked_args /* {
254 		struct vnode *a_vp;
255 	} */ *ap;
256 {
257 	struct lock *l;
258 
259 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
260 		return 0;
261 
262 	return (lockstatus(l));
263 }
264 
265 /*
266  * Return true for select/poll.
267  */
268 int
269 vop_nopoll(ap)
270 	struct vop_poll_args /* {
271 		struct vnode *a_vp;
272 		int  a_events;
273 		struct ucred *a_cred;
274 		struct proc *a_p;
275 	} */ *ap;
276 {
277 	/*
278 	 * Return true for read/write.  If the user asked for something
279 	 * special, return POLLNVAL, so that clients have a way of
280 	 * determining reliably whether or not the extended
281 	 * functionality is present without hard-coding knowledge
282 	 * of specific filesystem implementations.
283 	 */
284 	if (ap->a_events & ~POLLSTANDARD)
285 		return (POLLNVAL);
286 
287 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
288 }
289 
290 /*
291  * Implement poll for local filesystems that support it.
292  */
293 int
294 vop_stdpoll(ap)
295 	struct vop_poll_args /* {
296 		struct vnode *a_vp;
297 		int  a_events;
298 		struct ucred *a_cred;
299 		struct proc *a_p;
300 	} */ *ap;
301 {
302 	if ((ap->a_events & ~POLLSTANDARD) == 0)
303 		return (ap->a_events & (POLLRDNORM|POLLWRNORM));
304 	return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
305 }
306 
307 int
308 vop_stdbwrite(ap)
309 	struct vop_bwrite_args *ap;
310 {
311 	return (bwrite(ap->a_bp));
312 }
313 
314 /*
315  * Stubs to use when there is no locking to be done on the underlying object.
316  * A minimal shared lock is necessary to ensure that the underlying object
317  * is not revoked while an operation is in progress. So, an active shared
318  * count is maintained in an auxillary vnode lock structure.
319  */
320 int
321 vop_sharedlock(ap)
322 	struct vop_lock_args /* {
323 		struct vnode *a_vp;
324 		int a_flags;
325 		struct proc *a_p;
326 	} */ *ap;
327 {
328 	/*
329 	 * This code cannot be used until all the non-locking filesystems
330 	 * (notably NFS) are converted to properly lock and release nodes.
331 	 * Also, certain vnode operations change the locking state within
332 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
333 	 * and symlink). Ideally these operations should not change the
334 	 * lock state, but should be changed to let the caller of the
335 	 * function unlock them. Otherwise all intermediate vnode layers
336 	 * (such as union, umapfs, etc) must catch these functions to do
337 	 * the necessary locking at their layer. Note that the inactive
338 	 * and lookup operations also change their lock state, but this
339 	 * cannot be avoided, so these two operations will always need
340 	 * to be handled in intermediate layers.
341 	 */
342 	struct vnode *vp = ap->a_vp;
343 	int vnflags, flags = ap->a_flags;
344 
345 	if (vp->v_vnlock == NULL) {
346 		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
347 			return (0);
348 		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
349 		    M_VNODE, M_WAITOK);
350 		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
351 	}
352 	switch (flags & LK_TYPE_MASK) {
353 	case LK_DRAIN:
354 		vnflags = LK_DRAIN;
355 		break;
356 	case LK_EXCLUSIVE:
357 #ifdef DEBUG_VFS_LOCKS
358 		/*
359 		 * Normally, we use shared locks here, but that confuses
360 		 * the locking assertions.
361 		 */
362 		vnflags = LK_EXCLUSIVE;
363 		break;
364 #endif
365 	case LK_SHARED:
366 		vnflags = LK_SHARED;
367 		break;
368 	case LK_UPGRADE:
369 	case LK_EXCLUPGRADE:
370 	case LK_DOWNGRADE:
371 		return (0);
372 	case LK_RELEASE:
373 	default:
374 		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
375 	}
376 	if (flags & LK_INTERLOCK)
377 		vnflags |= LK_INTERLOCK;
378 #ifndef	DEBUG_LOCKS
379 	return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
380 #else
381 	return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p,
382 	    "vop_sharedlock", vp->filename, vp->line));
383 #endif
384 }
385 
386 /*
387  * Stubs to use when there is no locking to be done on the underlying object.
388  * A minimal shared lock is necessary to ensure that the underlying object
389  * is not revoked while an operation is in progress. So, an active shared
390  * count is maintained in an auxillary vnode lock structure.
391  */
392 int
393 vop_nolock(ap)
394 	struct vop_lock_args /* {
395 		struct vnode *a_vp;
396 		int a_flags;
397 		struct proc *a_p;
398 	} */ *ap;
399 {
400 #ifdef notyet
401 	/*
402 	 * This code cannot be used until all the non-locking filesystems
403 	 * (notably NFS) are converted to properly lock and release nodes.
404 	 * Also, certain vnode operations change the locking state within
405 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
406 	 * and symlink). Ideally these operations should not change the
407 	 * lock state, but should be changed to let the caller of the
408 	 * function unlock them. Otherwise all intermediate vnode layers
409 	 * (such as union, umapfs, etc) must catch these functions to do
410 	 * the necessary locking at their layer. Note that the inactive
411 	 * and lookup operations also change their lock state, but this
412 	 * cannot be avoided, so these two operations will always need
413 	 * to be handled in intermediate layers.
414 	 */
415 	struct vnode *vp = ap->a_vp;
416 	int vnflags, flags = ap->a_flags;
417 
418 	if (vp->v_vnlock == NULL) {
419 		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
420 			return (0);
421 		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
422 		    M_VNODE, M_WAITOK);
423 		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
424 	}
425 	switch (flags & LK_TYPE_MASK) {
426 	case LK_DRAIN:
427 		vnflags = LK_DRAIN;
428 		break;
429 	case LK_EXCLUSIVE:
430 	case LK_SHARED:
431 		vnflags = LK_SHARED;
432 		break;
433 	case LK_UPGRADE:
434 	case LK_EXCLUPGRADE:
435 	case LK_DOWNGRADE:
436 		return (0);
437 	case LK_RELEASE:
438 	default:
439 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
440 	}
441 	if (flags & LK_INTERLOCK)
442 		vnflags |= LK_INTERLOCK;
443 	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
444 #else /* for now */
445 	/*
446 	 * Since we are not using the lock manager, we must clear
447 	 * the interlock here.
448 	 */
449 	if (ap->a_flags & LK_INTERLOCK)
450 		simple_unlock(&ap->a_vp->v_interlock);
451 	return (0);
452 #endif
453 }
454 
455 /*
456  * Do the inverse of vop_nolock, handling the interlock in a compatible way.
457  */
458 int
459 vop_nounlock(ap)
460 	struct vop_unlock_args /* {
461 		struct vnode *a_vp;
462 		int a_flags;
463 		struct proc *a_p;
464 	} */ *ap;
465 {
466 	struct vnode *vp = ap->a_vp;
467 
468 	if (vp->v_vnlock == NULL) {
469 		if (ap->a_flags & LK_INTERLOCK)
470 			simple_unlock(&ap->a_vp->v_interlock);
471 		return (0);
472 	}
473 	return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
474 		&ap->a_vp->v_interlock, ap->a_p));
475 }
476 
477 /*
478  * Return whether or not the node is in use.
479  */
480 int
481 vop_noislocked(ap)
482 	struct vop_islocked_args /* {
483 		struct vnode *a_vp;
484 	} */ *ap;
485 {
486 	struct vnode *vp = ap->a_vp;
487 
488 	if (vp->v_vnlock == NULL)
489 		return (0);
490 	return (lockstatus(vp->v_vnlock));
491 }
492 
493