xref: /freebsd/sys/kern/vfs_default.c (revision 2ad872c5794e4c26fdf6ed219ad3f09ca0d5304a)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/unistd.h>
46 #include <sys/vnode.h>
47 #include <sys/poll.h>
48 
49 static int vop_nostrategy __P((struct vop_strategy_args *));
50 
51 /*
52  * This vnode table stores what we want to do if the filesystem doesn't
53  * implement a particular VOP.
54  *
55  * If there is no specific entry here, we will return EOPNOTSUPP.
56  *
57  */
58 
59 vop_t **default_vnodeop_p;
60 static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
61 	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
62 	{ &vop_abortop_desc,		(vop_t *) vop_null },
63 	{ &vop_advlock_desc,		(vop_t *) vop_einval },
64 	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
65 	{ &vop_close_desc,		(vop_t *) vop_null },
66 	{ &vop_fsync_desc,		(vop_t *) vop_null },
67 	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
68 	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
69 	{ &vop_lease_desc,		(vop_t *) vop_null },
70 	{ &vop_lock_desc,		(vop_t *) vop_nolock },
71 	{ &vop_mmap_desc,		(vop_t *) vop_einval },
72 	{ &vop_open_desc,		(vop_t *) vop_null },
73 	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
74 	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
75 	{ &vop_readlink_desc,		(vop_t *) vop_einval },
76 	{ &vop_reallocblks_desc,	(vop_t *) vop_eopnotsupp },
77 	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
78 	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
79 	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
80 	{ NULL, NULL }
81 };
82 
83 static struct vnodeopv_desc default_vnodeop_opv_desc =
84         { &default_vnodeop_p, default_vnodeop_entries };
85 
86 VNODEOP_SET(default_vnodeop_opv_desc);
87 
88 int
89 vop_eopnotsupp(struct vop_generic_args *ap)
90 {
91 	/*
92 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
93 	*/
94 
95 	return (EOPNOTSUPP);
96 }
97 
98 int
99 vop_ebadf(struct vop_generic_args *ap)
100 {
101 
102 	return (EBADF);
103 }
104 
105 int
106 vop_enotty(struct vop_generic_args *ap)
107 {
108 
109 	return (ENOTTY);
110 }
111 
112 int
113 vop_einval(struct vop_generic_args *ap)
114 {
115 
116 	return (EINVAL);
117 }
118 
119 int
120 vop_null(struct vop_generic_args *ap)
121 {
122 
123 	return (0);
124 }
125 
126 int
127 vop_defaultop(struct vop_generic_args *ap)
128 {
129 
130 	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
131 }
132 
133 int
134 vop_panic(struct vop_generic_args *ap)
135 {
136 
137 	panic("illegal vnode op called");
138 }
139 
140 static int
141 vop_nostrategy (struct vop_strategy_args *ap)
142 {
143 	printf("No strategy for buffer at %p\n", ap->a_bp);
144 	vprint("", ap->a_vp);
145 	vprint("", ap->a_bp->b_vp);
146 	ap->a_bp->b_flags |= B_ERROR;
147 	ap->a_bp->b_error = EOPNOTSUPP;
148 	biodone(ap->a_bp);
149 	return (EOPNOTSUPP);
150 }
151 
152 int
153 vop_stdpathconf(ap)
154 	struct vop_pathconf_args /* {
155 	struct vnode *a_vp;
156 	int a_name;
157 	int *a_retval;
158 	} */ *ap;
159 {
160 
161 	switch (ap->a_name) {
162 		case _PC_LINK_MAX:
163 			*ap->a_retval = LINK_MAX;
164 			return (0);
165 		case _PC_MAX_CANON:
166 			*ap->a_retval = MAX_CANON;
167 			return (0);
168 		case _PC_MAX_INPUT:
169 			*ap->a_retval = MAX_INPUT;
170 			return (0);
171 		case _PC_PIPE_BUF:
172 			*ap->a_retval = PIPE_BUF;
173 			return (0);
174 		case _PC_CHOWN_RESTRICTED:
175 			*ap->a_retval = 1;
176 			return (0);
177 		case _PC_VDISABLE:
178 			*ap->a_retval = _POSIX_VDISABLE;
179 			return (0);
180 		default:
181 			return (EINVAL);
182 	}
183 	/* NOTREACHED */
184 }
185 
186 /*
187  * Standard lock, unlock and islocked functions.
188  *
189  * These depend on the lock structure being the first element in the
190  * inode, ie: vp->v_data points to the the lock!
191  */
192 int
193 vop_stdlock(ap)
194 	struct vop_lock_args /* {
195 		struct vnode *a_vp;
196 		int a_flags;
197 		struct proc *a_p;
198 	} */ *ap;
199 {
200 	struct lock *l;
201 
202 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
203 		if (ap->a_flags & LK_INTERLOCK)
204 			simple_unlock(&ap->a_vp->v_interlock);
205 		return 0;
206 	}
207 
208 	return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p));
209 }
210 
211 int
212 vop_stdunlock(ap)
213 	struct vop_unlock_args /* {
214 		struct vnode *a_vp;
215 		int a_flags;
216 		struct proc *a_p;
217 	} */ *ap;
218 {
219 	struct lock *l;
220 
221 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
222 		if (ap->a_flags & LK_INTERLOCK)
223 			simple_unlock(&ap->a_vp->v_interlock);
224 		return 0;
225 	}
226 
227 	return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock,
228 	    ap->a_p));
229 }
230 
231 int
232 vop_stdislocked(ap)
233 	struct vop_islocked_args /* {
234 		struct vnode *a_vp;
235 	} */ *ap;
236 {
237 	struct lock *l;
238 
239 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
240 		return 0;
241 
242 	return (lockstatus(l));
243 }
244 
245 /*
246  * Return true for select/poll.
247  */
248 int
249 vop_nopoll(ap)
250 	struct vop_poll_args /* {
251 		struct vnode *a_vp;
252 		int  a_events;
253 		struct ucred *a_cred;
254 		struct proc *a_p;
255 	} */ *ap;
256 {
257 	/*
258 	 * Return true for read/write.  If the user asked for something
259 	 * special, return POLLNVAL, so that clients have a way of
260 	 * determining reliably whether or not the extended
261 	 * functionality is present without hard-coding knowledge
262 	 * of specific filesystem implementations.
263 	 */
264 	if (ap->a_events & ~POLLSTANDARD)
265 		return (POLLNVAL);
266 
267 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
268 }
269 
270 /*
271  * Implement poll for local filesystems that support it.
272  */
273 int
274 vop_stdpoll(ap)
275 	struct vop_poll_args /* {
276 		struct vnode *a_vp;
277 		int  a_events;
278 		struct ucred *a_cred;
279 		struct proc *a_p;
280 	} */ *ap;
281 {
282 	if ((ap->a_events & ~POLLSTANDARD) == 0)
283 		return (ap->a_events & (POLLRDNORM|POLLWRNORM));
284 	return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
285 }
286 
287 int
288 vop_stdbwrite(ap)
289 	struct vop_bwrite_args *ap;
290 {
291 	return (bwrite(ap->a_bp));
292 }
293 
294 /*
295  * Stubs to use when there is no locking to be done on the underlying object.
296  * A minimal shared lock is necessary to ensure that the underlying object
297  * is not revoked while an operation is in progress. So, an active shared
298  * count is maintained in an auxillary vnode lock structure.
299  */
300 int
301 vop_sharedlock(ap)
302 	struct vop_lock_args /* {
303 		struct vnode *a_vp;
304 		int a_flags;
305 		struct proc *a_p;
306 	} */ *ap;
307 {
308 	/*
309 	 * This code cannot be used until all the non-locking filesystems
310 	 * (notably NFS) are converted to properly lock and release nodes.
311 	 * Also, certain vnode operations change the locking state within
312 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
313 	 * and symlink). Ideally these operations should not change the
314 	 * lock state, but should be changed to let the caller of the
315 	 * function unlock them. Otherwise all intermediate vnode layers
316 	 * (such as union, umapfs, etc) must catch these functions to do
317 	 * the necessary locking at their layer. Note that the inactive
318 	 * and lookup operations also change their lock state, but this
319 	 * cannot be avoided, so these two operations will always need
320 	 * to be handled in intermediate layers.
321 	 */
322 	struct vnode *vp = ap->a_vp;
323 	int vnflags, flags = ap->a_flags;
324 
325 	if (vp->v_vnlock == NULL) {
326 		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
327 			return (0);
328 		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
329 		    M_VNODE, M_WAITOK);
330 		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
331 	}
332 	switch (flags & LK_TYPE_MASK) {
333 	case LK_DRAIN:
334 		vnflags = LK_DRAIN;
335 		break;
336 	case LK_EXCLUSIVE:
337 #ifdef DEBUG_VFS_LOCKS
338 		/*
339 		 * Normally, we use shared locks here, but that confuses
340 		 * the locking assertions.
341 		 */
342 		vnflags = LK_EXCLUSIVE;
343 		break;
344 #endif
345 	case LK_SHARED:
346 		vnflags = LK_SHARED;
347 		break;
348 	case LK_UPGRADE:
349 	case LK_EXCLUPGRADE:
350 	case LK_DOWNGRADE:
351 		return (0);
352 	case LK_RELEASE:
353 	default:
354 		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
355 	}
356 	if (flags & LK_INTERLOCK)
357 		vnflags |= LK_INTERLOCK;
358 	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
359 }
360 
361 /*
362  * Stubs to use when there is no locking to be done on the underlying object.
363  * A minimal shared lock is necessary to ensure that the underlying object
364  * is not revoked while an operation is in progress. So, an active shared
365  * count is maintained in an auxillary vnode lock structure.
366  */
367 int
368 vop_nolock(ap)
369 	struct vop_lock_args /* {
370 		struct vnode *a_vp;
371 		int a_flags;
372 		struct proc *a_p;
373 	} */ *ap;
374 {
375 #ifdef notyet
376 	/*
377 	 * This code cannot be used until all the non-locking filesystems
378 	 * (notably NFS) are converted to properly lock and release nodes.
379 	 * Also, certain vnode operations change the locking state within
380 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
381 	 * and symlink). Ideally these operations should not change the
382 	 * lock state, but should be changed to let the caller of the
383 	 * function unlock them. Otherwise all intermediate vnode layers
384 	 * (such as union, umapfs, etc) must catch these functions to do
385 	 * the necessary locking at their layer. Note that the inactive
386 	 * and lookup operations also change their lock state, but this
387 	 * cannot be avoided, so these two operations will always need
388 	 * to be handled in intermediate layers.
389 	 */
390 	struct vnode *vp = ap->a_vp;
391 	int vnflags, flags = ap->a_flags;
392 
393 	if (vp->v_vnlock == NULL) {
394 		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
395 			return (0);
396 		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
397 		    M_VNODE, M_WAITOK);
398 		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
399 	}
400 	switch (flags & LK_TYPE_MASK) {
401 	case LK_DRAIN:
402 		vnflags = LK_DRAIN;
403 		break;
404 	case LK_EXCLUSIVE:
405 	case LK_SHARED:
406 		vnflags = LK_SHARED;
407 		break;
408 	case LK_UPGRADE:
409 	case LK_EXCLUPGRADE:
410 	case LK_DOWNGRADE:
411 		return (0);
412 	case LK_RELEASE:
413 	default:
414 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
415 	}
416 	if (flags & LK_INTERLOCK)
417 		vnflags |= LK_INTERLOCK;
418 	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
419 #else /* for now */
420 	/*
421 	 * Since we are not using the lock manager, we must clear
422 	 * the interlock here.
423 	 */
424 	if (ap->a_flags & LK_INTERLOCK)
425 		simple_unlock(&ap->a_vp->v_interlock);
426 	return (0);
427 #endif
428 }
429 
430 /*
431  * Do the inverse of vop_nolock, handling the interlock in a compatible way.
432  */
433 int
434 vop_nounlock(ap)
435 	struct vop_unlock_args /* {
436 		struct vnode *a_vp;
437 		int a_flags;
438 		struct proc *a_p;
439 	} */ *ap;
440 {
441 	struct vnode *vp = ap->a_vp;
442 
443 	if (vp->v_vnlock == NULL) {
444 		if (ap->a_flags & LK_INTERLOCK)
445 			simple_unlock(&ap->a_vp->v_interlock);
446 		return (0);
447 	}
448 	return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
449 		&ap->a_vp->v_interlock, ap->a_p));
450 }
451 
452 /*
453  * Return whether or not the node is in use.
454  */
455 int
456 vop_noislocked(ap)
457 	struct vop_islocked_args /* {
458 		struct vnode *a_vp;
459 	} */ *ap;
460 {
461 	struct vnode *vp = ap->a_vp;
462 
463 	if (vp->v_vnlock == NULL)
464 		return (0);
465 	return (lockstatus(vp->v_vnlock));
466 }
467 
468