xref: /freebsd/sys/fs/nullfs/null_vnops.c (revision afe61c15161c324a7af299a9b8457aba5afc92db)
1 /*
2  * Copyright (c) 1992, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * John Heidemann of the UCLA Ficus project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)null_vnops.c	8.1 (Berkeley) 6/10/93
37  *
38  * Ancestors:
39  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
40  *	$Id: lofs_vnops.c,v 1.11 1992/05/30 10:05:43 jsp Exp jsp $
41  *	...and...
42  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
43  */
44 
45 /*
46  * Null Layer
47  *
48  * (See mount_null(8) for more information.)
49  *
50  * The null layer duplicates a portion of the file system
51  * name space under a new name.  In this respect, it is
52  * similar to the loopback file system.  It differs from
53  * the loopback fs in two respects:  it is implemented using
54  * a stackable layers techniques, and it's "null-node"s stack above
55  * all lower-layer vnodes, not just over directory vnodes.
56  *
57  * The null layer has two purposes.  First, it serves as a demonstration
58  * of layering by proving a layer which does nothing.  (It actually
59  * does everything the loopback file system does, which is slightly
60  * more than nothing.)  Second, the null layer can serve as a prototype
61  * layer.  Since it provides all necessary layer framework,
62  * new file system layers can be created very easily be starting
63  * with a null layer.
64  *
65  * The remainder of this man page examines the null layer as a basis
66  * for constructing new layers.
67  *
68  *
69  * INSTANTIATING NEW NULL LAYERS
70  *
71  * New null layers are created with mount_null(8).
72  * Mount_null(8) takes two arguments, the pathname
73  * of the lower vfs (target-pn) and the pathname where the null
74  * layer will appear in the namespace (alias-pn).  After
75  * the null layer is put into place, the contents
76  * of target-pn subtree will be aliased under alias-pn.
77  *
78  *
79  * OPERATION OF A NULL LAYER
80  *
81  * The null layer is the minimum file system layer,
82  * simply bypassing all possible operations to the lower layer
83  * for processing there.  The majority of its activity centers
84  * on the bypass routine, though which nearly all vnode operations
85  * pass.
86  *
87  * The bypass routine accepts arbitrary vnode operations for
88  * handling by the lower layer.  It begins by examing vnode
89  * operation arguments and replacing any null-nodes by their
90  * lower-layer equivlants.  It then invokes the operation
91  * on the lower layer.  Finally, it replaces the null-nodes
92  * in the arguments and, if a vnode is return by the operation,
93  * stacks a null-node on top of the returned vnode.
94  *
95  * Although bypass handles most operations,
96  * vop_getattr, _inactive, _reclaim, and _print are not bypassed.
97  * Vop_getattr must change the fsid being returned.
98  * Vop_inactive and vop_reclaim are not bypassed so that
99  * they can handle freeing null-layer specific data.
100  * Vop_print is not bypassed to avoid excessive debugging
101  * information.
102  *
103  *
104  * INSTANTIATING VNODE STACKS
105  *
106  * Mounting associates the null layer with a lower layer,
107  * effect stacking two VFSes.  Vnode stacks are instead
108  * created on demand as files are accessed.
109  *
110  * The initial mount creates a single vnode stack for the
111  * root of the new null layer.  All other vnode stacks
112  * are created as a result of vnode operations on
113  * this or other null vnode stacks.
114  *
115  * New vnode stacks come into existance as a result of
116  * an operation which returns a vnode.
117  * The bypass routine stacks a null-node above the new
118  * vnode before returning it to the caller.
119  *
120  * For example, imagine mounting a null layer with
121  * "mount_null /usr/include /dev/layer/null".
122  * Changing directory to /dev/layer/null will assign
123  * the root null-node (which was created when the null layer was mounted).
124  * Now consider opening "sys".  A vop_lookup would be
125  * done on the root null-node.  This operation would bypass through
126  * to the lower layer which would return a vnode representing
127  * the UFS "sys".  Null_bypass then builds a null-node
128  * aliasing the UFS "sys" and returns this to the caller.
129  * Later operations on the null-node "sys" will repeat this
130  * process when constructing other vnode stacks.
131  *
132  *
133  * CREATING OTHER FILE SYSTEM LAYERS
134  *
135  * One of the easiest ways to construct new file system layers is to make
136  * a copy of the null layer, rename all files and variables, and
137  * then begin modifing the copy.  Sed can be used to easily rename
138  * all variables.
139  *
140  * The umap layer is an example of a layer descended from the
141  * null layer.
142  *
143  *
144  * INVOKING OPERATIONS ON LOWER LAYERS
145  *
146  * There are two techniques to invoke operations on a lower layer
147  * when the operation cannot be completely bypassed.  Each method
148  * is appropriate in different situations.  In both cases,
149  * it is the responsibility of the aliasing layer to make
150  * the operation arguments "correct" for the lower layer
151  * by mapping an vnode arguments to the lower layer.
152  *
153  * The first approach is to call the aliasing layer's bypass routine.
154  * This method is most suitable when you wish to invoke the operation
155  * currently being hanldled on the lower layer.  It has the advantage
156  * that the bypass routine already must do argument mapping.
157  * An example of this is null_getattrs in the null layer.
158  *
159  * A second approach is to directly invoked vnode operations on
160  * the lower layer with the VOP_OPERATIONNAME interface.
161  * The advantage of this method is that it is easy to invoke
162  * arbitrary operations on the lower layer.  The disadvantage
163  * is that vnodes arguments must be manualy mapped.
164  *
165  */
166 
167 #include <sys/param.h>
168 #include <sys/systm.h>
169 #include <sys/proc.h>
170 #include <sys/time.h>
171 #include <sys/types.h>
172 #include <sys/vnode.h>
173 #include <sys/mount.h>
174 #include <sys/namei.h>
175 #include <sys/malloc.h>
176 #include <sys/buf.h>
177 #include <miscfs/nullfs/null.h>
178 
179 
180 int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
181 
182 /*
183  * This is the 10-Apr-92 bypass routine.
184  *    This version has been optimized for speed, throwing away some
185  * safety checks.  It should still always work, but it's not as
186  * robust to programmer errors.
187  *    Define SAFETY to include some error checking code.
188  *
189  * In general, we map all vnodes going down and unmap them on the way back.
190  * As an exception to this, vnodes can be marked "unmapped" by setting
191  * the Nth bit in operation's vdesc_flags.
192  *
193  * Also, some BSD vnode operations have the side effect of vrele'ing
194  * their arguments.  With stacking, the reference counts are held
195  * by the upper node, not the lower one, so we must handle these
196  * side-effects here.  This is not of concern in Sun-derived systems
197  * since there are no such side-effects.
198  *
199  * This makes the following assumptions:
200  * - only one returned vpp
201  * - no INOUT vpp's (Sun's vop_open has one of these)
202  * - the vnode operation vector of the first vnode should be used
203  *   to determine what implementation of the op should be invoked
204  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
205  *   problems on rmdir'ing mount points and renaming?)
206  */
207 int
208 null_bypass(ap)
209 	struct vop_generic_args /* {
210 		struct vnodeop_desc *a_desc;
211 		<other random data follows, presumably>
212 	} */ *ap;
213 {
214 	extern int (**null_vnodeop_p)();  /* not extern, really "forward" */
215 	register struct vnode **this_vp_p;
216 	int error;
217 	struct vnode *old_vps[VDESC_MAX_VPS];
218 	struct vnode **vps_p[VDESC_MAX_VPS];
219 	struct vnode ***vppp;
220 	struct vnodeop_desc *descp = ap->a_desc;
221 	int reles, i;
222 
223 	if (null_bug_bypass)
224 		printf ("null_bypass: %s\n", descp->vdesc_name);
225 
226 #ifdef SAFETY
227 	/*
228 	 * We require at least one vp.
229 	 */
230 	if (descp->vdesc_vp_offsets == NULL ||
231 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
232 		panic ("null_bypass: no vp's in map.\n");
233 #endif
234 
235 	/*
236 	 * Map the vnodes going in.
237 	 * Later, we'll invoke the operation based on
238 	 * the first mapped vnode's operation vector.
239 	 */
240 	reles = descp->vdesc_flags;
241 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
242 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
243 			break;   /* bail out at end of list */
244 		vps_p[i] = this_vp_p =
245 			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
246 		/*
247 		 * We're not guaranteed that any but the first vnode
248 		 * are of our type.  Check for and don't map any
249 		 * that aren't.  (We must always map first vp or vclean fails.)
250 		 */
251 		if (i && (*this_vp_p)->v_op != null_vnodeop_p) {
252 			old_vps[i] = NULL;
253 		} else {
254 			old_vps[i] = *this_vp_p;
255 			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
256 			/*
257 			 * XXX - Several operations have the side effect
258 			 * of vrele'ing their vp's.  We must account for
259 			 * that.  (This should go away in the future.)
260 			 */
261 			if (reles & 1)
262 				VREF(*this_vp_p);
263 		}
264 
265 	}
266 
267 	/*
268 	 * Call the operation on the lower layer
269 	 * with the modified argument structure.
270 	 */
271 	error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
272 
273 	/*
274 	 * Maintain the illusion of call-by-value
275 	 * by restoring vnodes in the argument structure
276 	 * to their original value.
277 	 */
278 	reles = descp->vdesc_flags;
279 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
280 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
281 			break;   /* bail out at end of list */
282 		if (old_vps[i]) {
283 			*(vps_p[i]) = old_vps[i];
284 			if (reles & 1)
285 				vrele(*(vps_p[i]));
286 		}
287 	}
288 
289 	/*
290 	 * Map the possible out-going vpp
291 	 * (Assumes that the lower layer always returns
292 	 * a VREF'ed vpp unless it gets an error.)
293 	 */
294 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
295 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
296 	    !error) {
297 		/*
298 		 * XXX - even though some ops have vpp returned vp's,
299 		 * several ops actually vrele this before returning.
300 		 * We must avoid these ops.
301 		 * (This should go away when these ops are regularized.)
302 		 */
303 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
304 			goto out;
305 		vppp = VOPARG_OFFSETTO(struct vnode***,
306 				 descp->vdesc_vpp_offset,ap);
307 		error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp);
308 	}
309 
310  out:
311 	return (error);
312 }
313 
314 
315 /*
316  *  We handle getattr only to change the fsid.
317  */
318 int
319 null_getattr(ap)
320 	struct vop_getattr_args /* {
321 		struct vnode *a_vp;
322 		struct vattr *a_vap;
323 		struct ucred *a_cred;
324 		struct proc *a_p;
325 	} */ *ap;
326 {
327 	int error;
328 	if (error = null_bypass(ap))
329 		return (error);
330 	/* Requires that arguments be restored. */
331 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
332 	return (0);
333 }
334 
335 
336 int
337 null_inactive(ap)
338 	struct vop_inactive_args /* {
339 		struct vnode *a_vp;
340 	} */ *ap;
341 {
342 	/*
343 	 * Do nothing (and _don't_ bypass).
344 	 * Wait to vrele lowervp until reclaim,
345 	 * so that until then our null_node is in the
346 	 * cache and reusable.
347 	 *
348 	 * NEEDSWORK: Someday, consider inactive'ing
349 	 * the lowervp and then trying to reactivate it
350 	 * with capabilities (v_id)
351 	 * like they do in the name lookup cache code.
352 	 * That's too much work for now.
353 	 */
354 	return (0);
355 }
356 
357 int
358 null_reclaim(ap)
359 	struct vop_reclaim_args /* {
360 		struct vnode *a_vp;
361 	} */ *ap;
362 {
363 	struct vnode *vp = ap->a_vp;
364 	struct null_node *xp = VTONULL(vp);
365 	struct vnode *lowervp = xp->null_lowervp;
366 
367 	/*
368 	 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p,
369 	 * so we can't call VOPs on ourself.
370 	 */
371 	/* After this assignment, this node will not be re-used. */
372 	xp->null_lowervp = NULL;
373 	remque(xp);
374 	FREE(vp->v_data, M_TEMP);
375 	vp->v_data = NULL;
376 	vrele (lowervp);
377 	return (0);
378 }
379 
380 
381 int
382 null_print(ap)
383 	struct vop_print_args /* {
384 		struct vnode *a_vp;
385 	} */ *ap;
386 {
387 	register struct vnode *vp = ap->a_vp;
388 	printf ("\ttag VT_NULLFS, vp=%x, lowervp=%x\n", vp, NULLVPTOLOWERVP(vp));
389 	return (0);
390 }
391 
392 
393 /*
394  * XXX - vop_strategy must be hand coded because it has no
395  * vnode in its arguments.
396  * This goes away with a merged VM/buffer cache.
397  */
398 int
399 null_strategy(ap)
400 	struct vop_strategy_args /* {
401 		struct buf *a_bp;
402 	} */ *ap;
403 {
404 	struct buf *bp = ap->a_bp;
405 	int error;
406 	struct vnode *savedvp;
407 
408 	savedvp = bp->b_vp;
409 	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
410 
411 	error = VOP_STRATEGY(bp);
412 
413 	bp->b_vp = savedvp;
414 
415 	return (error);
416 }
417 
418 
419 /*
420  * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
421  * vnode in its arguments.
422  * This goes away with a merged VM/buffer cache.
423  */
424 int
425 null_bwrite(ap)
426 	struct vop_bwrite_args /* {
427 		struct buf *a_bp;
428 	} */ *ap;
429 {
430 	struct buf *bp = ap->a_bp;
431 	int error;
432 	struct vnode *savedvp;
433 
434 	savedvp = bp->b_vp;
435 	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
436 
437 	error = VOP_BWRITE(bp);
438 
439 	bp->b_vp = savedvp;
440 
441 	return (error);
442 }
443 
444 /*
445  * Global vfs data structures
446  */
447 int (**null_vnodeop_p)();
448 struct vnodeopv_entry_desc null_vnodeop_entries[] = {
449 	{ &vop_default_desc, null_bypass },
450 
451 	{ &vop_getattr_desc, null_getattr },
452 	{ &vop_inactive_desc, null_inactive },
453 	{ &vop_reclaim_desc, null_reclaim },
454 	{ &vop_print_desc, null_print },
455 
456 	{ &vop_strategy_desc, null_strategy },
457 	{ &vop_bwrite_desc, null_bwrite },
458 
459 	{ (struct vnodeop_desc*)NULL, (int(*)())NULL }
460 };
461 struct vnodeopv_desc null_vnodeop_opv_desc =
462 	{ &null_vnodeop_p, null_vnodeop_entries };
463