xref: /freebsd/sys/fs/nullfs/null_vnops.c (revision 4fd2d3b6927878771635a3628ae1623daf810d39)
1 /*-
2  * Copyright (c) 1992, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * John Heidemann of the UCLA Ficus project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
33  *
34  * Ancestors:
35  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
36  *	...and...
37  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
38  *
39  * $FreeBSD$
40  */
41 
42 /*
43  * Null Layer
44  *
45  * (See mount_nullfs(8) for more information.)
46  *
47  * The null layer duplicates a portion of the filesystem
48  * name space under a new name.  In this respect, it is
49  * similar to the loopback filesystem.  It differs from
50  * the loopback fs in two respects:  it is implemented using
51  * a stackable layers techniques, and its "null-node"s stack above
52  * all lower-layer vnodes, not just over directory vnodes.
53  *
54  * The null layer has two purposes.  First, it serves as a demonstration
55  * of layering by proving a layer which does nothing.  (It actually
56  * does everything the loopback filesystem does, which is slightly
57  * more than nothing.)  Second, the null layer can serve as a prototype
58  * layer.  Since it provides all necessary layer framework,
59  * new filesystem layers can be created very easily be starting
60  * with a null layer.
61  *
62  * The remainder of this man page examines the null layer as a basis
63  * for constructing new layers.
64  *
65  *
66  * INSTANTIATING NEW NULL LAYERS
67  *
68  * New null layers are created with mount_nullfs(8).
69  * Mount_nullfs(8) takes two arguments, the pathname
70  * of the lower vfs (target-pn) and the pathname where the null
71  * layer will appear in the namespace (alias-pn).  After
72  * the null layer is put into place, the contents
73  * of target-pn subtree will be aliased under alias-pn.
74  *
75  *
76  * OPERATION OF A NULL LAYER
77  *
78  * The null layer is the minimum filesystem layer,
79  * simply bypassing all possible operations to the lower layer
80  * for processing there.  The majority of its activity centers
81  * on the bypass routine, through which nearly all vnode operations
82  * pass.
83  *
84  * The bypass routine accepts arbitrary vnode operations for
85  * handling by the lower layer.  It begins by examing vnode
86  * operation arguments and replacing any null-nodes by their
87  * lower-layer equivlants.  It then invokes the operation
88  * on the lower layer.  Finally, it replaces the null-nodes
89  * in the arguments and, if a vnode is return by the operation,
90  * stacks a null-node on top of the returned vnode.
91  *
92  * Although bypass handles most operations, vop_getattr, vop_lock,
93  * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
94  * bypassed. Vop_getattr must change the fsid being returned.
95  * Vop_lock and vop_unlock must handle any locking for the
96  * current vnode as well as pass the lock request down.
97  * Vop_inactive and vop_reclaim are not bypassed so that
98  * they can handle freeing null-layer specific data. Vop_print
99  * is not bypassed to avoid excessive debugging information.
100  * Also, certain vnode operations change the locking state within
101  * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
102  * and symlink). Ideally these operations should not change the
103  * lock state, but should be changed to let the caller of the
104  * function unlock them. Otherwise all intermediate vnode layers
105  * (such as union, umapfs, etc) must catch these functions to do
106  * the necessary locking at their layer.
107  *
108  *
109  * INSTANTIATING VNODE STACKS
110  *
111  * Mounting associates the null layer with a lower layer,
112  * effect stacking two VFSes.  Vnode stacks are instead
113  * created on demand as files are accessed.
114  *
115  * The initial mount creates a single vnode stack for the
116  * root of the new null layer.  All other vnode stacks
117  * are created as a result of vnode operations on
118  * this or other null vnode stacks.
119  *
120  * New vnode stacks come into existance as a result of
121  * an operation which returns a vnode.
122  * The bypass routine stacks a null-node above the new
123  * vnode before returning it to the caller.
124  *
125  * For example, imagine mounting a null layer with
126  * "mount_nullfs /usr/include /dev/layer/null".
127  * Changing directory to /dev/layer/null will assign
128  * the root null-node (which was created when the null layer was mounted).
129  * Now consider opening "sys".  A vop_lookup would be
130  * done on the root null-node.  This operation would bypass through
131  * to the lower layer which would return a vnode representing
132  * the UFS "sys".  Null_bypass then builds a null-node
133  * aliasing the UFS "sys" and returns this to the caller.
134  * Later operations on the null-node "sys" will repeat this
135  * process when constructing other vnode stacks.
136  *
137  *
138  * CREATING OTHER FILE SYSTEM LAYERS
139  *
140  * One of the easiest ways to construct new filesystem layers is to make
141  * a copy of the null layer, rename all files and variables, and
142  * then begin modifing the copy.  Sed can be used to easily rename
143  * all variables.
144  *
145  * The umap layer is an example of a layer descended from the
146  * null layer.
147  *
148  *
149  * INVOKING OPERATIONS ON LOWER LAYERS
150  *
151  * There are two techniques to invoke operations on a lower layer
152  * when the operation cannot be completely bypassed.  Each method
153  * is appropriate in different situations.  In both cases,
154  * it is the responsibility of the aliasing layer to make
155  * the operation arguments "correct" for the lower layer
156  * by mapping a vnode arguments to the lower layer.
157  *
158  * The first approach is to call the aliasing layer's bypass routine.
159  * This method is most suitable when you wish to invoke the operation
160  * currently being handled on the lower layer.  It has the advantage
161  * that the bypass routine already must do argument mapping.
162  * An example of this is null_getattrs in the null layer.
163  *
164  * A second approach is to directly invoke vnode operations on
165  * the lower layer with the VOP_OPERATIONNAME interface.
166  * The advantage of this method is that it is easy to invoke
167  * arbitrary operations on the lower layer.  The disadvantage
168  * is that vnode arguments must be manualy mapped.
169  *
170  */
171 
172 #include <sys/param.h>
173 #include <sys/systm.h>
174 #include <sys/conf.h>
175 #include <sys/kernel.h>
176 #include <sys/lock.h>
177 #include <sys/malloc.h>
178 #include <sys/mount.h>
179 #include <sys/mutex.h>
180 #include <sys/namei.h>
181 #include <sys/sysctl.h>
182 #include <sys/vnode.h>
183 
184 #include <fs/nullfs/null.h>
185 
186 #include <vm/vm.h>
187 #include <vm/vm_extern.h>
188 #include <vm/vm_object.h>
189 #include <vm/vnode_pager.h>
190 
191 static int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
192 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
193 	&null_bug_bypass, 0, "");
194 
195 /*
196  * This is the 10-Apr-92 bypass routine.
197  *    This version has been optimized for speed, throwing away some
198  * safety checks.  It should still always work, but it's not as
199  * robust to programmer errors.
200  *
201  * In general, we map all vnodes going down and unmap them on the way back.
202  * As an exception to this, vnodes can be marked "unmapped" by setting
203  * the Nth bit in operation's vdesc_flags.
204  *
205  * Also, some BSD vnode operations have the side effect of vrele'ing
206  * their arguments.  With stacking, the reference counts are held
207  * by the upper node, not the lower one, so we must handle these
208  * side-effects here.  This is not of concern in Sun-derived systems
209  * since there are no such side-effects.
210  *
211  * This makes the following assumptions:
212  * - only one returned vpp
213  * - no INOUT vpp's (Sun's vop_open has one of these)
214  * - the vnode operation vector of the first vnode should be used
215  *   to determine what implementation of the op should be invoked
216  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
217  *   problems on rmdir'ing mount points and renaming?)
218  */
219 int
220 null_bypass(struct vop_generic_args *ap)
221 {
222 	struct vnode **this_vp_p;
223 	int error;
224 	struct vnode *old_vps[VDESC_MAX_VPS];
225 	struct vnode **vps_p[VDESC_MAX_VPS];
226 	struct vnode ***vppp;
227 	struct vnodeop_desc *descp = ap->a_desc;
228 	int reles, i;
229 
230 	if (null_bug_bypass)
231 		printf ("null_bypass: %s\n", descp->vdesc_name);
232 
233 #ifdef DIAGNOSTIC
234 	/*
235 	 * We require at least one vp.
236 	 */
237 	if (descp->vdesc_vp_offsets == NULL ||
238 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
239 		panic ("null_bypass: no vp's in map");
240 #endif
241 
242 	/*
243 	 * Map the vnodes going in.
244 	 * Later, we'll invoke the operation based on
245 	 * the first mapped vnode's operation vector.
246 	 */
247 	reles = descp->vdesc_flags;
248 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
249 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
250 			break;   /* bail out at end of list */
251 		vps_p[i] = this_vp_p =
252 			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
253 		/*
254 		 * We're not guaranteed that any but the first vnode
255 		 * are of our type.  Check for and don't map any
256 		 * that aren't.  (We must always map first vp or vclean fails.)
257 		 */
258 		if (i && (*this_vp_p == NULLVP ||
259 		    (*this_vp_p)->v_op != &null_vnodeops)) {
260 			old_vps[i] = NULLVP;
261 		} else {
262 			old_vps[i] = *this_vp_p;
263 			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
264 			/*
265 			 * XXX - Several operations have the side effect
266 			 * of vrele'ing their vp's.  We must account for
267 			 * that.  (This should go away in the future.)
268 			 */
269 			if (reles & VDESC_VP0_WILLRELE)
270 				VREF(*this_vp_p);
271 		}
272 
273 	}
274 
275 	/*
276 	 * Call the operation on the lower layer
277 	 * with the modified argument structure.
278 	 */
279 	if (vps_p[0] && *vps_p[0])
280 		error = VCALL(ap);
281 	else {
282 		printf("null_bypass: no map for %s\n", descp->vdesc_name);
283 		error = EINVAL;
284 	}
285 
286 	/*
287 	 * Maintain the illusion of call-by-value
288 	 * by restoring vnodes in the argument structure
289 	 * to their original value.
290 	 */
291 	reles = descp->vdesc_flags;
292 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
293 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
294 			break;   /* bail out at end of list */
295 		if (old_vps[i]) {
296 			*(vps_p[i]) = old_vps[i];
297 #if 0
298 			if (reles & VDESC_VP0_WILLUNLOCK)
299 				VOP_UNLOCK(*(vps_p[i]), 0);
300 #endif
301 			if (reles & VDESC_VP0_WILLRELE)
302 				vrele(*(vps_p[i]));
303 		}
304 	}
305 
306 	/*
307 	 * Map the possible out-going vpp
308 	 * (Assumes that the lower layer always returns
309 	 * a VREF'ed vpp unless it gets an error.)
310 	 */
311 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
312 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
313 	    !error) {
314 		/*
315 		 * XXX - even though some ops have vpp returned vp's,
316 		 * several ops actually vrele this before returning.
317 		 * We must avoid these ops.
318 		 * (This should go away when these ops are regularized.)
319 		 */
320 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
321 			goto out;
322 		vppp = VOPARG_OFFSETTO(struct vnode***,
323 				 descp->vdesc_vpp_offset,ap);
324 		if (*vppp)
325 			error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp);
326 	}
327 
328  out:
329 	return (error);
330 }
331 
332 /*
333  * We have to carry on the locking protocol on the null layer vnodes
334  * as we progress through the tree. We also have to enforce read-only
335  * if this layer is mounted read-only.
336  */
337 static int
338 null_lookup(struct vop_lookup_args *ap)
339 {
340 	struct componentname *cnp = ap->a_cnp;
341 	struct vnode *dvp = ap->a_dvp;
342 	int flags = cnp->cn_flags;
343 	struct vnode *vp, *ldvp, *lvp;
344 	int error;
345 
346 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
347 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
348 		return (EROFS);
349 	/*
350 	 * Although it is possible to call null_bypass(), we'll do
351 	 * a direct call to reduce overhead
352 	 */
353 	ldvp = NULLVPTOLOWERVP(dvp);
354 	vp = lvp = NULL;
355 	error = VOP_LOOKUP(ldvp, &lvp, cnp);
356 	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
357 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
358 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
359 		error = EROFS;
360 
361 	if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
362 		if (ldvp == lvp) {
363 			*ap->a_vpp = dvp;
364 			VREF(dvp);
365 			vrele(lvp);
366 		} else {
367 			error = null_nodeget(dvp->v_mount, lvp, &vp);
368 			if (error) {
369 				/* XXX Cleanup needed... */
370 				panic("null_nodeget failed");
371 			}
372 			*ap->a_vpp = vp;
373 		}
374 	}
375 	return (error);
376 }
377 
378 static int
379 null_open(struct vop_open_args *ap)
380 {
381 	int retval;
382 	struct vnode *vp, *ldvp;
383 
384 	vp = ap->a_vp;
385 	ldvp = NULLVPTOLOWERVP(vp);
386 	retval = null_bypass(&ap->a_gen);
387 	if (retval == 0)
388 		vp->v_object = ldvp->v_object;
389 	return (retval);
390 }
391 
392 /*
393  * Setattr call. Disallow write attempts if the layer is mounted read-only.
394  */
395 static int
396 null_setattr(struct vop_setattr_args *ap)
397 {
398 	struct vnode *vp = ap->a_vp;
399 	struct vattr *vap = ap->a_vap;
400 
401   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
402 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
403 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
404 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
405 		return (EROFS);
406 	if (vap->va_size != VNOVAL) {
407  		switch (vp->v_type) {
408  		case VDIR:
409  			return (EISDIR);
410  		case VCHR:
411  		case VBLK:
412  		case VSOCK:
413  		case VFIFO:
414 			if (vap->va_flags != VNOVAL)
415 				return (EOPNOTSUPP);
416 			return (0);
417 		case VREG:
418 		case VLNK:
419  		default:
420 			/*
421 			 * Disallow write attempts if the filesystem is
422 			 * mounted read-only.
423 			 */
424 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
425 				return (EROFS);
426 		}
427 	}
428 
429 	return (null_bypass((struct vop_generic_args *)ap));
430 }
431 
432 /*
433  *  We handle getattr only to change the fsid.
434  */
435 static int
436 null_getattr(struct vop_getattr_args *ap)
437 {
438 	int error;
439 
440 	if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
441 		return (error);
442 
443 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
444 	return (0);
445 }
446 
447 /*
448  * Handle to disallow write access if mounted read-only.
449  */
450 static int
451 null_access(struct vop_access_args *ap)
452 {
453 	struct vnode *vp = ap->a_vp;
454 	mode_t mode = ap->a_mode;
455 
456 	/*
457 	 * Disallow write attempts on read-only layers;
458 	 * unless the file is a socket, fifo, or a block or
459 	 * character device resident on the filesystem.
460 	 */
461 	if (mode & VWRITE) {
462 		switch (vp->v_type) {
463 		case VDIR:
464 		case VLNK:
465 		case VREG:
466 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
467 				return (EROFS);
468 			break;
469 		default:
470 			break;
471 		}
472 	}
473 	return (null_bypass((struct vop_generic_args *)ap));
474 }
475 
476 /*
477  * We handle this to eliminate null FS to lower FS
478  * file moving. Don't know why we don't allow this,
479  * possibly we should.
480  */
481 static int
482 null_rename(struct vop_rename_args *ap)
483 {
484 	struct vnode *tdvp = ap->a_tdvp;
485 	struct vnode *fvp = ap->a_fvp;
486 	struct vnode *fdvp = ap->a_fdvp;
487 	struct vnode *tvp = ap->a_tvp;
488 
489 	/* Check for cross-device rename. */
490 	if ((fvp->v_mount != tdvp->v_mount) ||
491 	    (tvp && (fvp->v_mount != tvp->v_mount))) {
492 		if (tdvp == tvp)
493 			vrele(tdvp);
494 		else
495 			vput(tdvp);
496 		if (tvp)
497 			vput(tvp);
498 		vrele(fdvp);
499 		vrele(fvp);
500 		return (EXDEV);
501 	}
502 
503 	return (null_bypass((struct vop_generic_args *)ap));
504 }
505 
506 /*
507  * We need to process our own vnode lock and then clear the
508  * interlock flag as it applies only to our vnode, not the
509  * vnodes below us on the stack.
510  */
511 static int
512 null_lock(struct vop_lock1_args *ap)
513 {
514 	struct vnode *vp = ap->a_vp;
515 	int flags = ap->a_flags;
516 	struct null_node *nn;
517 	struct vnode *lvp;
518 	int error;
519 
520 
521 	if ((flags & LK_INTERLOCK) == 0) {
522 		VI_LOCK(vp);
523 		ap->a_flags = flags |= LK_INTERLOCK;
524 	}
525 	nn = VTONULL(vp);
526 	/*
527 	 * If we're still active we must ask the lower layer to
528 	 * lock as ffs has special lock considerations in it's
529 	 * vop lock.
530 	 */
531 	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
532 		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
533 		VI_UNLOCK(vp);
534 		/*
535 		 * We have to hold the vnode here to solve a potential
536 		 * reclaim race.  If we're forcibly vgone'd while we
537 		 * still have refs, a thread could be sleeping inside
538 		 * the lowervp's vop_lock routine.  When we vgone we will
539 		 * drop our last ref to the lowervp, which would allow it
540 		 * to be reclaimed.  The lowervp could then be recycled,
541 		 * in which case it is not legal to be sleeping in it's VOP.
542 		 * We prevent it from being recycled by holding the vnode
543 		 * here.
544 		 */
545 		vholdl(lvp);
546 		error = VOP_LOCK(lvp, flags);
547 
548 		/*
549 		 * We might have slept to get the lock and someone might have
550 		 * clean our vnode already, switching vnode lock from one in
551 		 * lowervp to v_lock in our own vnode structure.  Handle this
552 		 * case by reacquiring correct lock in requested mode.
553 		 */
554 		if (VTONULL(vp) == NULL && error == 0) {
555 			ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
556 			switch (flags & LK_TYPE_MASK) {
557 			case LK_SHARED:
558 				ap->a_flags |= LK_SHARED;
559 				break;
560 			case LK_UPGRADE:
561 			case LK_EXCLUSIVE:
562 				ap->a_flags |= LK_EXCLUSIVE;
563 				break;
564 			default:
565 				panic("Unsupported lock request %d\n",
566 				    ap->a_flags);
567 			}
568 			VOP_UNLOCK(lvp, 0);
569 			error = vop_stdlock(ap);
570 		}
571 		vdrop(lvp);
572 	} else
573 		error = vop_stdlock(ap);
574 
575 	return (error);
576 }
577 
578 /*
579  * We need to process our own vnode unlock and then clear the
580  * interlock flag as it applies only to our vnode, not the
581  * vnodes below us on the stack.
582  */
583 static int
584 null_unlock(struct vop_unlock_args *ap)
585 {
586 	struct vnode *vp = ap->a_vp;
587 	int flags = ap->a_flags;
588 	int mtxlkflag = 0;
589 	struct null_node *nn;
590 	struct vnode *lvp;
591 	int error;
592 
593 	if ((flags & LK_INTERLOCK) != 0)
594 		mtxlkflag = 1;
595 	else if (mtx_owned(VI_MTX(vp)) == 0) {
596 		VI_LOCK(vp);
597 		mtxlkflag = 2;
598 	}
599 	nn = VTONULL(vp);
600 	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
601 		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
602 		flags |= LK_INTERLOCK;
603 		vholdl(lvp);
604 		VI_UNLOCK(vp);
605 		error = VOP_UNLOCK(lvp, flags);
606 		vdrop(lvp);
607 		if (mtxlkflag == 0)
608 			VI_LOCK(vp);
609 	} else {
610 		if (mtxlkflag == 2)
611 			VI_UNLOCK(vp);
612 		error = vop_stdunlock(ap);
613 	}
614 
615 	return (error);
616 }
617 
618 static int
619 null_islocked(struct vop_islocked_args *ap)
620 {
621 	struct vnode *vp = ap->a_vp;
622 
623 	return (lockstatus(vp->v_vnlock));
624 }
625 
626 /*
627  * There is no way to tell that someone issued remove/rmdir operation
628  * on the underlying filesystem. For now we just have to release lowevrp
629  * as soon as possible.
630  *
631  * Note, we can't release any resources nor remove vnode from hash before
632  * appropriate VXLOCK stuff is is done because other process can find this
633  * vnode in hash during inactivation and may be sitting in vget() and waiting
634  * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM.
635  */
636 static int
637 null_inactive(struct vop_inactive_args *ap)
638 {
639 	struct vnode *vp = ap->a_vp;
640 	struct thread *td = ap->a_td;
641 
642 	vp->v_object = NULL;
643 
644 	/*
645 	 * If this is the last reference, then free up the vnode
646 	 * so as not to tie up the lower vnodes.
647 	 */
648 	vrecycle(vp, td);
649 
650 	return (0);
651 }
652 
653 /*
654  * Now, the VXLOCK is in force and we're free to destroy the null vnode.
655  */
656 static int
657 null_reclaim(struct vop_reclaim_args *ap)
658 {
659 	struct vnode *vp = ap->a_vp;
660 	struct null_node *xp = VTONULL(vp);
661 	struct vnode *lowervp = xp->null_lowervp;
662 	struct lock *vnlock;
663 
664 	if (lowervp)
665 		null_hashrem(xp);
666 	/*
667 	 * Use the interlock to protect the clearing of v_data to
668 	 * prevent faults in null_lock().
669 	 */
670 	VI_LOCK(vp);
671 	vp->v_data = NULL;
672 	vp->v_object = NULL;
673 	vnlock = vp->v_vnlock;
674 	vp->v_vnlock = &vp->v_lock;
675 	if (lowervp) {
676 		lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_INTERLOCK, VI_MTX(vp));
677 		vput(lowervp);
678 	} else
679 		panic("null_reclaim: reclaiming an node with now lowervp");
680 	FREE(xp, M_NULLFSNODE);
681 
682 	return (0);
683 }
684 
685 static int
686 null_print(struct vop_print_args *ap)
687 {
688 	struct vnode *vp = ap->a_vp;
689 
690 	printf("\tvp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp));
691 	return (0);
692 }
693 
694 /* ARGSUSED */
695 static int
696 null_getwritemount(struct vop_getwritemount_args *ap)
697 {
698 	struct null_node *xp;
699 	struct vnode *lowervp;
700 	struct vnode *vp;
701 
702 	vp = ap->a_vp;
703 	VI_LOCK(vp);
704 	xp = VTONULL(vp);
705 	if (xp && (lowervp = xp->null_lowervp)) {
706 		VI_LOCK_FLAGS(lowervp, MTX_DUPOK);
707 		VI_UNLOCK(vp);
708 		vholdl(lowervp);
709 		VI_UNLOCK(lowervp);
710 		VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
711 		vdrop(lowervp);
712 	} else {
713 		VI_UNLOCK(vp);
714 		*(ap->a_mpp) = NULL;
715 	}
716 	return (0);
717 }
718 
719 static int
720 null_vptofh(struct vop_vptofh_args *ap)
721 {
722 	struct vnode *lvp;
723 
724 	lvp = NULLVPTOLOWERVP(ap->a_vp);
725 	return VOP_VPTOFH(lvp, ap->a_fhp);
726 }
727 
728 /*
729  * Global vfs data structures
730  */
731 struct vop_vector null_vnodeops = {
732 	.vop_bypass =		null_bypass,
733 	.vop_access =		null_access,
734 	.vop_bmap =		VOP_EOPNOTSUPP,
735 	.vop_getattr =		null_getattr,
736 	.vop_getwritemount =	null_getwritemount,
737 	.vop_inactive =		null_inactive,
738 	.vop_islocked =		null_islocked,
739 	.vop_lock1 =		null_lock,
740 	.vop_lookup =		null_lookup,
741 	.vop_open =		null_open,
742 	.vop_print =		null_print,
743 	.vop_reclaim =		null_reclaim,
744 	.vop_rename =		null_rename,
745 	.vop_setattr =		null_setattr,
746 	.vop_strategy =		VOP_EOPNOTSUPP,
747 	.vop_unlock =		null_unlock,
748 	.vop_vptofh =		null_vptofh,
749 };
750