xref: /freebsd/sys/fs/nullfs/null_vnops.c (revision edf8578117e8844e02c0121147f45e4609b30680)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * John Heidemann of the UCLA Ficus project.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
35  *
36  * Ancestors:
37  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
38  *	...and...
39  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
40  */
41 
42 /*
43  * Null Layer
44  *
45  * (See mount_nullfs(8) for more information.)
46  *
47  * The null layer duplicates a portion of the filesystem
48  * name space under a new name.  In this respect, it is
49  * similar to the loopback filesystem.  It differs from
50  * the loopback fs in two respects:  it is implemented using
51  * a stackable layers techniques, and its "null-node"s stack above
52  * all lower-layer vnodes, not just over directory vnodes.
53  *
54  * The null layer has two purposes.  First, it serves as a demonstration
55  * of layering by proving a layer which does nothing.  (It actually
56  * does everything the loopback filesystem does, which is slightly
57  * more than nothing.)  Second, the null layer can serve as a prototype
58  * layer.  Since it provides all necessary layer framework,
59  * new filesystem layers can be created very easily be starting
60  * with a null layer.
61  *
62  * The remainder of this man page examines the null layer as a basis
63  * for constructing new layers.
64  *
65  *
66  * INSTANTIATING NEW NULL LAYERS
67  *
68  * New null layers are created with mount_nullfs(8).
69  * Mount_nullfs(8) takes two arguments, the pathname
70  * of the lower vfs (target-pn) and the pathname where the null
71  * layer will appear in the namespace (alias-pn).  After
72  * the null layer is put into place, the contents
73  * of target-pn subtree will be aliased under alias-pn.
74  *
75  *
76  * OPERATION OF A NULL LAYER
77  *
78  * The null layer is the minimum filesystem layer,
79  * simply bypassing all possible operations to the lower layer
80  * for processing there.  The majority of its activity centers
81  * on the bypass routine, through which nearly all vnode operations
82  * pass.
83  *
84  * The bypass routine accepts arbitrary vnode operations for
85  * handling by the lower layer.  It begins by examining vnode
86  * operation arguments and replacing any null-nodes by their
87  * lower-layer equivlants.  It then invokes the operation
88  * on the lower layer.  Finally, it replaces the null-nodes
89  * in the arguments and, if a vnode is return by the operation,
90  * stacks a null-node on top of the returned vnode.
91  *
92  * Although bypass handles most operations, vop_getattr, vop_lock,
93  * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
94  * bypassed. Vop_getattr must change the fsid being returned.
95  * Vop_lock and vop_unlock must handle any locking for the
96  * current vnode as well as pass the lock request down.
97  * Vop_inactive and vop_reclaim are not bypassed so that
98  * they can handle freeing null-layer specific data. Vop_print
99  * is not bypassed to avoid excessive debugging information.
100  * Also, certain vnode operations change the locking state within
101  * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
102  * and symlink). Ideally these operations should not change the
103  * lock state, but should be changed to let the caller of the
104  * function unlock them. Otherwise all intermediate vnode layers
105  * (such as union, umapfs, etc) must catch these functions to do
106  * the necessary locking at their layer.
107  *
108  *
109  * INSTANTIATING VNODE STACKS
110  *
111  * Mounting associates the null layer with a lower layer,
112  * effect stacking two VFSes.  Vnode stacks are instead
113  * created on demand as files are accessed.
114  *
115  * The initial mount creates a single vnode stack for the
116  * root of the new null layer.  All other vnode stacks
117  * are created as a result of vnode operations on
118  * this or other null vnode stacks.
119  *
120  * New vnode stacks come into existence as a result of
121  * an operation which returns a vnode.
122  * The bypass routine stacks a null-node above the new
123  * vnode before returning it to the caller.
124  *
125  * For example, imagine mounting a null layer with
126  * "mount_nullfs /usr/include /dev/layer/null".
127  * Changing directory to /dev/layer/null will assign
128  * the root null-node (which was created when the null layer was mounted).
129  * Now consider opening "sys".  A vop_lookup would be
130  * done on the root null-node.  This operation would bypass through
131  * to the lower layer which would return a vnode representing
132  * the UFS "sys".  Null_bypass then builds a null-node
133  * aliasing the UFS "sys" and returns this to the caller.
134  * Later operations on the null-node "sys" will repeat this
135  * process when constructing other vnode stacks.
136  *
137  *
138  * CREATING OTHER FILE SYSTEM LAYERS
139  *
140  * One of the easiest ways to construct new filesystem layers is to make
141  * a copy of the null layer, rename all files and variables, and
142  * then begin modifing the copy.  Sed can be used to easily rename
143  * all variables.
144  *
145  * The umap layer is an example of a layer descended from the
146  * null layer.
147  *
148  *
149  * INVOKING OPERATIONS ON LOWER LAYERS
150  *
151  * There are two techniques to invoke operations on a lower layer
152  * when the operation cannot be completely bypassed.  Each method
153  * is appropriate in different situations.  In both cases,
154  * it is the responsibility of the aliasing layer to make
155  * the operation arguments "correct" for the lower layer
156  * by mapping a vnode arguments to the lower layer.
157  *
158  * The first approach is to call the aliasing layer's bypass routine.
159  * This method is most suitable when you wish to invoke the operation
160  * currently being handled on the lower layer.  It has the advantage
161  * that the bypass routine already must do argument mapping.
162  * An example of this is null_getattrs in the null layer.
163  *
164  * A second approach is to directly invoke vnode operations on
165  * the lower layer with the VOP_OPERATIONNAME interface.
166  * The advantage of this method is that it is easy to invoke
167  * arbitrary operations on the lower layer.  The disadvantage
168  * is that vnode arguments must be manualy mapped.
169  *
170  */
171 
172 #include <sys/param.h>
173 #include <sys/systm.h>
174 #include <sys/conf.h>
175 #include <sys/kernel.h>
176 #include <sys/lock.h>
177 #include <sys/malloc.h>
178 #include <sys/mount.h>
179 #include <sys/mutex.h>
180 #include <sys/namei.h>
181 #include <sys/sysctl.h>
182 #include <sys/vnode.h>
183 #include <sys/stat.h>
184 
185 #include <fs/nullfs/null.h>
186 
187 #include <vm/vm.h>
188 #include <vm/vm_extern.h>
189 #include <vm/vm_object.h>
190 #include <vm/vnode_pager.h>
191 
192 static int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
193 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
194 	&null_bug_bypass, 0, "");
195 
196 /*
197  * This is the 10-Apr-92 bypass routine.
198  *    This version has been optimized for speed, throwing away some
199  * safety checks.  It should still always work, but it's not as
200  * robust to programmer errors.
201  *
202  * In general, we map all vnodes going down and unmap them on the way back.
203  * As an exception to this, vnodes can be marked "unmapped" by setting
204  * the Nth bit in operation's vdesc_flags.
205  *
206  * Also, some BSD vnode operations have the side effect of vrele'ing
207  * their arguments.  With stacking, the reference counts are held
208  * by the upper node, not the lower one, so we must handle these
209  * side-effects here.  This is not of concern in Sun-derived systems
210  * since there are no such side-effects.
211  *
212  * This makes the following assumptions:
213  * - only one returned vpp
214  * - no INOUT vpp's (Sun's vop_open has one of these)
215  * - the vnode operation vector of the first vnode should be used
216  *   to determine what implementation of the op should be invoked
217  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
218  *   problems on rmdir'ing mount points and renaming?)
219  */
220 int
221 null_bypass(struct vop_generic_args *ap)
222 {
223 	struct vnode **this_vp_p;
224 	struct vnode *old_vps[VDESC_MAX_VPS];
225 	struct vnode **vps_p[VDESC_MAX_VPS];
226 	struct vnode ***vppp;
227 	struct vnode *lvp;
228 	struct vnodeop_desc *descp = ap->a_desc;
229 	int error, i, reles;
230 
231 	if (null_bug_bypass)
232 		printf ("null_bypass: %s\n", descp->vdesc_name);
233 
234 #ifdef DIAGNOSTIC
235 	/*
236 	 * We require at least one vp.
237 	 */
238 	if (descp->vdesc_vp_offsets == NULL ||
239 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
240 		panic ("null_bypass: no vp's in map");
241 #endif
242 
243 	/*
244 	 * Map the vnodes going in.
245 	 * Later, we'll invoke the operation based on
246 	 * the first mapped vnode's operation vector.
247 	 */
248 	reles = descp->vdesc_flags;
249 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
250 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
251 			break;   /* bail out at end of list */
252 		vps_p[i] = this_vp_p = VOPARG_OFFSETTO(struct vnode **,
253 		    descp->vdesc_vp_offsets[i], ap);
254 
255 		/*
256 		 * We're not guaranteed that any but the first vnode
257 		 * are of our type.  Check for and don't map any
258 		 * that aren't.  (We must always map first vp or vclean fails.)
259 		 */
260 		if (i != 0 && (*this_vp_p == NULLVP ||
261 		    (*this_vp_p)->v_op != &null_vnodeops)) {
262 			old_vps[i] = NULLVP;
263 		} else {
264 			old_vps[i] = *this_vp_p;
265 			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
266 
267 			/*
268 			 * The upper vnode reference to the lower
269 			 * vnode is the only reference that keeps our
270 			 * pointer to the lower vnode alive.  If lower
271 			 * vnode is relocked during the VOP call,
272 			 * upper vnode might become unlocked and
273 			 * reclaimed, which invalidates our reference.
274 			 * Add a transient hold around VOP call.
275 			 */
276 			vhold(*this_vp_p);
277 
278 			/*
279 			 * XXX - Several operations have the side effect
280 			 * of vrele'ing their vp's.  We must account for
281 			 * that.  (This should go away in the future.)
282 			 */
283 			if (reles & VDESC_VP0_WILLRELE)
284 				vref(*this_vp_p);
285 		}
286 	}
287 
288 	/*
289 	 * Call the operation on the lower layer
290 	 * with the modified argument structure.
291 	 */
292 	if (vps_p[0] != NULL && *vps_p[0] != NULL) {
293 		error = VCALL(ap);
294 	} else {
295 		printf("null_bypass: no map for %s\n", descp->vdesc_name);
296 		error = EINVAL;
297 	}
298 
299 	/*
300 	 * Maintain the illusion of call-by-value
301 	 * by restoring vnodes in the argument structure
302 	 * to their original value.
303 	 */
304 	reles = descp->vdesc_flags;
305 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
306 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
307 			break;   /* bail out at end of list */
308 		if (old_vps[i] != NULL) {
309 			lvp = *(vps_p[i]);
310 
311 			/*
312 			 * Get rid of the transient hold on lvp.
313 			 * If lowervp was unlocked during VOP
314 			 * operation, nullfs upper vnode could have
315 			 * been reclaimed, which changes its v_vnlock
316 			 * back to private v_lock.  In this case we
317 			 * must move lock ownership from lower to
318 			 * upper (reclaimed) vnode.
319 			 */
320 			if (lvp != NULLVP) {
321 				if (VOP_ISLOCKED(lvp) == LK_EXCLUSIVE &&
322 				    old_vps[i]->v_vnlock != lvp->v_vnlock) {
323 					VOP_UNLOCK(lvp);
324 					VOP_LOCK(old_vps[i], LK_EXCLUSIVE |
325 					    LK_RETRY);
326 				}
327 				vdrop(lvp);
328 			}
329 
330 			*(vps_p[i]) = old_vps[i];
331 #if 0
332 			if (reles & VDESC_VP0_WILLUNLOCK)
333 				VOP_UNLOCK(*(vps_p[i]), 0);
334 #endif
335 			if (reles & VDESC_VP0_WILLRELE)
336 				vrele(*(vps_p[i]));
337 		}
338 	}
339 
340 	/*
341 	 * Map the possible out-going vpp
342 	 * (Assumes that the lower layer always returns
343 	 * a VREF'ed vpp unless it gets an error.)
344 	 */
345 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && error == 0) {
346 		/*
347 		 * XXX - even though some ops have vpp returned vp's,
348 		 * several ops actually vrele this before returning.
349 		 * We must avoid these ops.
350 		 * (This should go away when these ops are regularized.)
351 		 */
352 		vppp = VOPARG_OFFSETTO(struct vnode ***,
353 		    descp->vdesc_vpp_offset, ap);
354 		if (*vppp != NULL)
355 			error = null_nodeget(old_vps[0]->v_mount, **vppp,
356 			    *vppp);
357 	}
358 
359 	return (error);
360 }
361 
362 static int
363 null_add_writecount(struct vop_add_writecount_args *ap)
364 {
365 	struct vnode *lvp, *vp;
366 	int error;
367 
368 	vp = ap->a_vp;
369 	lvp = NULLVPTOLOWERVP(vp);
370 	VI_LOCK(vp);
371 	/* text refs are bypassed to lowervp */
372 	VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount"));
373 	VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
374 	    ("wrong writecount inc %d", ap->a_inc));
375 	error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc);
376 	if (error == 0)
377 		vp->v_writecount += ap->a_inc;
378 	VI_UNLOCK(vp);
379 	return (error);
380 }
381 
382 /*
383  * We have to carry on the locking protocol on the null layer vnodes
384  * as we progress through the tree. We also have to enforce read-only
385  * if this layer is mounted read-only.
386  */
387 static int
388 null_lookup(struct vop_lookup_args *ap)
389 {
390 	struct componentname *cnp = ap->a_cnp;
391 	struct vnode *dvp = ap->a_dvp;
392 	int flags = cnp->cn_flags;
393 	struct vnode *vp, *ldvp, *lvp;
394 	struct mount *mp;
395 	int error;
396 
397 	mp = dvp->v_mount;
398 	if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 &&
399 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
400 		return (EROFS);
401 	/*
402 	 * Although it is possible to call null_bypass(), we'll do
403 	 * a direct call to reduce overhead
404 	 */
405 	ldvp = NULLVPTOLOWERVP(dvp);
406 	vp = lvp = NULL;
407 
408 	/*
409 	 * Renames in the lower mounts might create an inconsistent
410 	 * configuration where lower vnode is moved out of the
411 	 * directory tree remounted by our null mount.  Do not try to
412 	 * handle it fancy, just avoid VOP_LOOKUP() with DOTDOT name
413 	 * which cannot be handled by VOP, at least passing over lower
414 	 * root.
415 	 */
416 	if ((ldvp->v_vflag & VV_ROOT) != 0 && (flags & ISDOTDOT) != 0) {
417 		KASSERT((dvp->v_vflag & VV_ROOT) == 0,
418 		    ("ldvp %p fl %#x dvp %p fl %#x flags %#x",
419 		    ldvp, ldvp->v_vflag, dvp, dvp->v_vflag, flags));
420 		return (ENOENT);
421 	}
422 
423 	/*
424 	 * Hold ldvp.  The reference on it, owned by dvp, is lost in
425 	 * case of dvp reclamation, and we need ldvp to move our lock
426 	 * from ldvp to dvp.
427 	 */
428 	vhold(ldvp);
429 
430 	error = VOP_LOOKUP(ldvp, &lvp, cnp);
431 
432 	/*
433 	 * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows
434 	 * dvp to be reclaimed due to shared v_vnlock.  Check for the
435 	 * doomed state and return error.
436 	 */
437 	if (VN_IS_DOOMED(dvp)) {
438 		if (error == 0 || error == EJUSTRETURN) {
439 			if (lvp != NULL)
440 				vput(lvp);
441 			error = ENOENT;
442 		}
443 
444 		/*
445 		 * If vgone() did reclaimed dvp before curthread
446 		 * relocked ldvp, the locks of dvp and ldpv are no
447 		 * longer shared.  In this case, relock of ldvp in
448 		 * lower fs VOP_LOOKUP() does not restore the locking
449 		 * state of dvp.  Compensate for this by unlocking
450 		 * ldvp and locking dvp, which is also correct if the
451 		 * locks are still shared.
452 		 */
453 		VOP_UNLOCK(ldvp);
454 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
455 	}
456 	vdrop(ldvp);
457 
458 	if (error == EJUSTRETURN && (flags & ISLASTCN) != 0 &&
459 	    (mp->mnt_flag & MNT_RDONLY) != 0 &&
460 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
461 		error = EROFS;
462 
463 	if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
464 		if (ldvp == lvp) {
465 			*ap->a_vpp = dvp;
466 			VREF(dvp);
467 			vrele(lvp);
468 		} else {
469 			error = null_nodeget(mp, lvp, &vp);
470 			if (error == 0)
471 				*ap->a_vpp = vp;
472 		}
473 	}
474 	return (error);
475 }
476 
477 static int
478 null_open(struct vop_open_args *ap)
479 {
480 	int retval;
481 	struct vnode *vp, *ldvp;
482 
483 	vp = ap->a_vp;
484 	ldvp = NULLVPTOLOWERVP(vp);
485 	retval = null_bypass(&ap->a_gen);
486 	if (retval == 0) {
487 		vp->v_object = ldvp->v_object;
488 		if ((vn_irflag_read(ldvp) & VIRF_PGREAD) != 0) {
489 			MPASS(vp->v_object != NULL);
490 			if ((vn_irflag_read(vp) & VIRF_PGREAD) == 0) {
491 				vn_irflag_set_cond(vp, VIRF_PGREAD);
492 			}
493 		}
494 	}
495 	return (retval);
496 }
497 
498 /*
499  * Setattr call. Disallow write attempts if the layer is mounted read-only.
500  */
501 static int
502 null_setattr(struct vop_setattr_args *ap)
503 {
504 	struct vnode *vp = ap->a_vp;
505 	struct vattr *vap = ap->a_vap;
506 
507   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
508 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
509 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
510 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
511 		return (EROFS);
512 	if (vap->va_size != VNOVAL) {
513  		switch (vp->v_type) {
514  		case VDIR:
515  			return (EISDIR);
516  		case VCHR:
517  		case VBLK:
518  		case VSOCK:
519  		case VFIFO:
520 			if (vap->va_flags != VNOVAL)
521 				return (EOPNOTSUPP);
522 			return (0);
523 		case VREG:
524 		case VLNK:
525  		default:
526 			/*
527 			 * Disallow write attempts if the filesystem is
528 			 * mounted read-only.
529 			 */
530 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
531 				return (EROFS);
532 		}
533 	}
534 
535 	return (null_bypass((struct vop_generic_args *)ap));
536 }
537 
538 /*
539  *  We handle stat and getattr only to change the fsid.
540  */
541 static int
542 null_stat(struct vop_stat_args *ap)
543 {
544 	int error;
545 
546 	if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
547 		return (error);
548 
549 	ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
550 	return (0);
551 }
552 
553 static int
554 null_getattr(struct vop_getattr_args *ap)
555 {
556 	int error;
557 
558 	if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
559 		return (error);
560 
561 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
562 	return (0);
563 }
564 
565 /*
566  * Handle to disallow write access if mounted read-only.
567  */
568 static int
569 null_access(struct vop_access_args *ap)
570 {
571 	struct vnode *vp = ap->a_vp;
572 	accmode_t accmode = ap->a_accmode;
573 
574 	/*
575 	 * Disallow write attempts on read-only layers;
576 	 * unless the file is a socket, fifo, or a block or
577 	 * character device resident on the filesystem.
578 	 */
579 	if (accmode & VWRITE) {
580 		switch (vp->v_type) {
581 		case VDIR:
582 		case VLNK:
583 		case VREG:
584 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
585 				return (EROFS);
586 			break;
587 		default:
588 			break;
589 		}
590 	}
591 	return (null_bypass((struct vop_generic_args *)ap));
592 }
593 
594 static int
595 null_accessx(struct vop_accessx_args *ap)
596 {
597 	struct vnode *vp = ap->a_vp;
598 	accmode_t accmode = ap->a_accmode;
599 
600 	/*
601 	 * Disallow write attempts on read-only layers;
602 	 * unless the file is a socket, fifo, or a block or
603 	 * character device resident on the filesystem.
604 	 */
605 	if (accmode & VWRITE) {
606 		switch (vp->v_type) {
607 		case VDIR:
608 		case VLNK:
609 		case VREG:
610 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
611 				return (EROFS);
612 			break;
613 		default:
614 			break;
615 		}
616 	}
617 	return (null_bypass((struct vop_generic_args *)ap));
618 }
619 
620 /*
621  * Increasing refcount of lower vnode is needed at least for the case
622  * when lower FS is NFS to do sillyrename if the file is in use.
623  * Unfortunately v_usecount is incremented in many places in
624  * the kernel and, as such, there may be races that result in
625  * the NFS client doing an extraneous silly rename, but that seems
626  * preferable to not doing a silly rename when it is needed.
627  */
628 static int
629 null_remove(struct vop_remove_args *ap)
630 {
631 	int retval, vreleit;
632 	struct vnode *lvp, *vp;
633 
634 	vp = ap->a_vp;
635 	if (vrefcnt(vp) > 1) {
636 		lvp = NULLVPTOLOWERVP(vp);
637 		VREF(lvp);
638 		vreleit = 1;
639 	} else
640 		vreleit = 0;
641 	VTONULL(vp)->null_flags |= NULLV_DROP;
642 	retval = null_bypass(&ap->a_gen);
643 	if (vreleit != 0)
644 		vrele(lvp);
645 	return (retval);
646 }
647 
648 /*
649  * We handle this to eliminate null FS to lower FS
650  * file moving. Don't know why we don't allow this,
651  * possibly we should.
652  */
653 static int
654 null_rename(struct vop_rename_args *ap)
655 {
656 	struct vnode *fdvp, *fvp, *tdvp, *tvp;
657 	struct vnode *lfdvp, *lfvp, *ltdvp, *ltvp;
658 	struct null_node *fdnn, *fnn, *tdnn, *tnn;
659 	int error;
660 
661 	tdvp = ap->a_tdvp;
662 	fvp = ap->a_fvp;
663 	fdvp = ap->a_fdvp;
664 	tvp = ap->a_tvp;
665 	lfdvp = NULL;
666 
667 	/* Check for cross-device rename. */
668 	if ((fvp->v_mount != tdvp->v_mount) ||
669 	    (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
670 		error = EXDEV;
671 		goto upper_err;
672 	}
673 
674 	VI_LOCK(fdvp);
675 	fdnn = VTONULL(fdvp);
676 	if (fdnn == NULL) {	/* fdvp is not locked, can be doomed */
677 		VI_UNLOCK(fdvp);
678 		error = ENOENT;
679 		goto upper_err;
680 	}
681 	lfdvp = fdnn->null_lowervp;
682 	vref(lfdvp);
683 	VI_UNLOCK(fdvp);
684 
685 	VI_LOCK(fvp);
686 	fnn = VTONULL(fvp);
687 	if (fnn == NULL) {
688 		VI_UNLOCK(fvp);
689 		error = ENOENT;
690 		goto upper_err;
691 	}
692 	lfvp = fnn->null_lowervp;
693 	vref(lfvp);
694 	VI_UNLOCK(fvp);
695 
696 	tdnn = VTONULL(tdvp);
697 	ltdvp = tdnn->null_lowervp;
698 	vref(ltdvp);
699 
700 	if (tvp != NULL) {
701 		tnn = VTONULL(tvp);
702 		ltvp = tnn->null_lowervp;
703 		vref(ltvp);
704 		tnn->null_flags |= NULLV_DROP;
705 	} else {
706 		ltvp = NULL;
707 	}
708 
709 	error = VOP_RENAME(lfdvp, lfvp, ap->a_fcnp, ltdvp, ltvp, ap->a_tcnp);
710 	vrele(fdvp);
711 	vrele(fvp);
712 	vrele(tdvp);
713 	if (tvp != NULL)
714 		vrele(tvp);
715 	return (error);
716 
717 upper_err:
718 	if (tdvp == tvp)
719 		vrele(tdvp);
720 	else
721 		vput(tdvp);
722 	if (tvp)
723 		vput(tvp);
724 	if (lfdvp != NULL)
725 		vrele(lfdvp);
726 	vrele(fdvp);
727 	vrele(fvp);
728 	return (error);
729 }
730 
731 static int
732 null_rmdir(struct vop_rmdir_args *ap)
733 {
734 
735 	VTONULL(ap->a_vp)->null_flags |= NULLV_DROP;
736 	return (null_bypass(&ap->a_gen));
737 }
738 
739 /*
740  * We need to process our own vnode lock and then clear the
741  * interlock flag as it applies only to our vnode, not the
742  * vnodes below us on the stack.
743  */
744 static int
745 null_lock(struct vop_lock1_args *ap)
746 {
747 	struct vnode *vp = ap->a_vp;
748 	int flags;
749 	struct null_node *nn;
750 	struct vnode *lvp;
751 	int error;
752 
753 	if ((ap->a_flags & LK_INTERLOCK) == 0)
754 		VI_LOCK(vp);
755 	else
756 		ap->a_flags &= ~LK_INTERLOCK;
757 	flags = ap->a_flags;
758 	nn = VTONULL(vp);
759 	/*
760 	 * If we're still active we must ask the lower layer to
761 	 * lock as ffs has special lock considerations in its
762 	 * vop lock.
763 	 */
764 	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
765 		/*
766 		 * We have to hold the vnode here to solve a potential
767 		 * reclaim race.  If we're forcibly vgone'd while we
768 		 * still have refs, a thread could be sleeping inside
769 		 * the lowervp's vop_lock routine.  When we vgone we will
770 		 * drop our last ref to the lowervp, which would allow it
771 		 * to be reclaimed.  The lowervp could then be recycled,
772 		 * in which case it is not legal to be sleeping in its VOP.
773 		 * We prevent it from being recycled by holding the vnode
774 		 * here.
775 		 */
776 		vholdnz(lvp);
777 		VI_UNLOCK(vp);
778 		error = VOP_LOCK(lvp, flags);
779 
780 		/*
781 		 * We might have slept to get the lock and someone might have
782 		 * clean our vnode already, switching vnode lock from one in
783 		 * lowervp to v_lock in our own vnode structure.  Handle this
784 		 * case by reacquiring correct lock in requested mode.
785 		 */
786 		if (VTONULL(vp) == NULL && error == 0) {
787 			ap->a_flags &= ~LK_TYPE_MASK;
788 			switch (flags & LK_TYPE_MASK) {
789 			case LK_SHARED:
790 				ap->a_flags |= LK_SHARED;
791 				break;
792 			case LK_UPGRADE:
793 			case LK_EXCLUSIVE:
794 				ap->a_flags |= LK_EXCLUSIVE;
795 				break;
796 			default:
797 				panic("Unsupported lock request %d\n",
798 				    ap->a_flags);
799 			}
800 			VOP_UNLOCK(lvp);
801 			error = vop_stdlock(ap);
802 		}
803 		vdrop(lvp);
804 	} else {
805 		VI_UNLOCK(vp);
806 		error = vop_stdlock(ap);
807 	}
808 
809 	return (error);
810 }
811 
812 /*
813  * We need to process our own vnode unlock and then clear the
814  * interlock flag as it applies only to our vnode, not the
815  * vnodes below us on the stack.
816  */
817 static int
818 null_unlock(struct vop_unlock_args *ap)
819 {
820 	struct vnode *vp = ap->a_vp;
821 	struct null_node *nn;
822 	struct vnode *lvp;
823 	int error;
824 
825 	nn = VTONULL(vp);
826 	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
827 		vholdnz(lvp);
828 		error = VOP_UNLOCK(lvp);
829 		vdrop(lvp);
830 	} else {
831 		error = vop_stdunlock(ap);
832 	}
833 
834 	return (error);
835 }
836 
837 /*
838  * Do not allow the VOP_INACTIVE to be passed to the lower layer,
839  * since the reference count on the lower vnode is not related to
840  * ours.
841  */
842 static int
843 null_want_recycle(struct vnode *vp)
844 {
845 	struct vnode *lvp;
846 	struct null_node *xp;
847 	struct mount *mp;
848 	struct null_mount *xmp;
849 
850 	xp = VTONULL(vp);
851 	lvp = NULLVPTOLOWERVP(vp);
852 	mp = vp->v_mount;
853 	xmp = MOUNTTONULLMOUNT(mp);
854 	if ((xmp->nullm_flags & NULLM_CACHE) == 0 ||
855 	    (xp->null_flags & NULLV_DROP) != 0 ||
856 	    (lvp->v_vflag & VV_NOSYNC) != 0) {
857 		/*
858 		 * If this is the last reference and caching of the
859 		 * nullfs vnodes is not enabled, or the lower vnode is
860 		 * deleted, then free up the vnode so as not to tie up
861 		 * the lower vnodes.
862 		 */
863 		return (1);
864 	}
865 	return (0);
866 }
867 
868 static int
869 null_inactive(struct vop_inactive_args *ap)
870 {
871 	struct vnode *vp;
872 
873 	vp = ap->a_vp;
874 	if (null_want_recycle(vp)) {
875 		vp->v_object = NULL;
876 		vrecycle(vp);
877 	}
878 	return (0);
879 }
880 
881 static int
882 null_need_inactive(struct vop_need_inactive_args *ap)
883 {
884 
885 	return (null_want_recycle(ap->a_vp) || vn_need_pageq_flush(ap->a_vp));
886 }
887 
888 /*
889  * Now, the nullfs vnode and, due to the sharing lock, the lower
890  * vnode, are exclusively locked, and we shall destroy the null vnode.
891  */
892 static int
893 null_reclaim(struct vop_reclaim_args *ap)
894 {
895 	struct vnode *vp;
896 	struct null_node *xp;
897 	struct vnode *lowervp;
898 
899 	vp = ap->a_vp;
900 	xp = VTONULL(vp);
901 	lowervp = xp->null_lowervp;
902 
903 	KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock,
904 	    ("Reclaiming incomplete null vnode %p", vp));
905 
906 	null_hashrem(xp);
907 	/*
908 	 * Use the interlock to protect the clearing of v_data to
909 	 * prevent faults in null_lock().
910 	 */
911 	lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
912 	VI_LOCK(vp);
913 	vp->v_data = NULL;
914 	vp->v_object = NULL;
915 	vp->v_vnlock = &vp->v_lock;
916 
917 	/*
918 	 * If we were opened for write, we leased the write reference
919 	 * to the lower vnode.  If this is a reclamation due to the
920 	 * forced unmount, undo the reference now.
921 	 */
922 	if (vp->v_writecount > 0)
923 		VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount);
924 	else if (vp->v_writecount < 0)
925 		vp->v_writecount = 0;
926 
927 	VI_UNLOCK(vp);
928 
929 	if ((xp->null_flags & NULLV_NOUNLOCK) != 0)
930 		vunref(lowervp);
931 	else
932 		vput(lowervp);
933 	free(xp, M_NULLFSNODE);
934 
935 	return (0);
936 }
937 
938 static int
939 null_print(struct vop_print_args *ap)
940 {
941 	struct vnode *vp = ap->a_vp;
942 
943 	printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp);
944 	return (0);
945 }
946 
947 /* ARGSUSED */
948 static int
949 null_getwritemount(struct vop_getwritemount_args *ap)
950 {
951 	struct null_node *xp;
952 	struct vnode *lowervp;
953 	struct vnode *vp;
954 
955 	vp = ap->a_vp;
956 	VI_LOCK(vp);
957 	xp = VTONULL(vp);
958 	if (xp && (lowervp = xp->null_lowervp)) {
959 		vholdnz(lowervp);
960 		VI_UNLOCK(vp);
961 		VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
962 		vdrop(lowervp);
963 	} else {
964 		VI_UNLOCK(vp);
965 		*(ap->a_mpp) = NULL;
966 	}
967 	return (0);
968 }
969 
970 static int
971 null_vptofh(struct vop_vptofh_args *ap)
972 {
973 	struct vnode *lvp;
974 
975 	lvp = NULLVPTOLOWERVP(ap->a_vp);
976 	return VOP_VPTOFH(lvp, ap->a_fhp);
977 }
978 
979 static int
980 null_vptocnp(struct vop_vptocnp_args *ap)
981 {
982 	struct vnode *vp = ap->a_vp;
983 	struct vnode **dvp = ap->a_vpp;
984 	struct vnode *lvp, *ldvp;
985 	struct mount *mp;
986 	int error, locked;
987 
988 	locked = VOP_ISLOCKED(vp);
989 	lvp = NULLVPTOLOWERVP(vp);
990 	mp = vp->v_mount;
991 	error = vfs_busy(mp, MBF_NOWAIT);
992 	if (error != 0)
993 		return (error);
994 	vhold(lvp);
995 	VOP_UNLOCK(vp); /* vp is held by vn_vptocnp_locked that called us */
996 	ldvp = lvp;
997 	vref(lvp);
998 	error = vn_vptocnp(&ldvp, ap->a_buf, ap->a_buflen);
999 	vdrop(lvp);
1000 	if (error != 0) {
1001 		vn_lock(vp, locked | LK_RETRY);
1002 		vfs_unbusy(mp);
1003 		return (ENOENT);
1004 	}
1005 
1006 	error = vn_lock(ldvp, LK_SHARED);
1007 	if (error != 0) {
1008 		vrele(ldvp);
1009 		vn_lock(vp, locked | LK_RETRY);
1010 		vfs_unbusy(mp);
1011 		return (ENOENT);
1012 	}
1013 	error = null_nodeget(mp, ldvp, dvp);
1014 	if (error == 0) {
1015 #ifdef DIAGNOSTIC
1016 		NULLVPTOLOWERVP(*dvp);
1017 #endif
1018 		VOP_UNLOCK(*dvp); /* keep reference on *dvp */
1019 	}
1020 	vn_lock(vp, locked | LK_RETRY);
1021 	vfs_unbusy(mp);
1022 	return (error);
1023 }
1024 
1025 static int
1026 null_read_pgcache(struct vop_read_pgcache_args *ap)
1027 {
1028 	struct vnode *lvp, *vp;
1029 	struct null_node *xp;
1030 	int error;
1031 
1032 	vp = ap->a_vp;
1033 	VI_LOCK(vp);
1034 	xp = VTONULL(vp);
1035 	if (xp == NULL) {
1036 		VI_UNLOCK(vp);
1037 		return (EJUSTRETURN);
1038 	}
1039 	lvp = xp->null_lowervp;
1040 	vref(lvp);
1041 	VI_UNLOCK(vp);
1042 	error = VOP_READ_PGCACHE(lvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1043 	vrele(lvp);
1044 	return (error);
1045 }
1046 
1047 static int
1048 null_advlock(struct vop_advlock_args *ap)
1049 {
1050 	struct vnode *lvp, *vp;
1051 	struct null_node *xp;
1052 	int error;
1053 
1054 	vp = ap->a_vp;
1055 	VI_LOCK(vp);
1056 	xp = VTONULL(vp);
1057 	if (xp == NULL) {
1058 		VI_UNLOCK(vp);
1059 		return (EBADF);
1060 	}
1061 	lvp = xp->null_lowervp;
1062 	vref(lvp);
1063 	VI_UNLOCK(vp);
1064 	error = VOP_ADVLOCK(lvp, ap->a_id, ap->a_op, ap->a_fl, ap->a_flags);
1065 	vrele(lvp);
1066 	return (error);
1067 }
1068 
1069 /*
1070  * Avoid standard bypass, since lower dvp and vp could be no longer
1071  * valid after vput().
1072  */
1073 static int
1074 null_vput_pair(struct vop_vput_pair_args *ap)
1075 {
1076 	struct mount *mp;
1077 	struct vnode *dvp, *ldvp, *lvp, *vp, *vp1, **vpp;
1078 	int error, res;
1079 
1080 	dvp = ap->a_dvp;
1081 	ldvp = NULLVPTOLOWERVP(dvp);
1082 	vref(ldvp);
1083 
1084 	vpp = ap->a_vpp;
1085 	vp = NULL;
1086 	lvp = NULL;
1087 	mp = NULL;
1088 	if (vpp != NULL)
1089 		vp = *vpp;
1090 	if (vp != NULL) {
1091 		lvp = NULLVPTOLOWERVP(vp);
1092 		vref(lvp);
1093 		if (!ap->a_unlock_vp) {
1094 			vhold(vp);
1095 			vhold(lvp);
1096 			mp = vp->v_mount;
1097 			vfs_ref(mp);
1098 		}
1099 	}
1100 
1101 	res = VOP_VPUT_PAIR(ldvp, lvp != NULL ? &lvp : NULL, true);
1102 	if (vp != NULL && ap->a_unlock_vp)
1103 		vrele(vp);
1104 	vrele(dvp);
1105 
1106 	if (vp == NULL || ap->a_unlock_vp)
1107 		return (res);
1108 
1109 	/* lvp has been unlocked and vp might be reclaimed */
1110 	VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
1111 	if (vp->v_data == NULL && vfs_busy(mp, MBF_NOWAIT) == 0) {
1112 		vput(vp);
1113 		vget(lvp, LK_EXCLUSIVE | LK_RETRY);
1114 		if (VN_IS_DOOMED(lvp)) {
1115 			vput(lvp);
1116 			vget(vp, LK_EXCLUSIVE | LK_RETRY);
1117 		} else {
1118 			error = null_nodeget(mp, lvp, &vp1);
1119 			if (error == 0) {
1120 				*vpp = vp1;
1121 			} else {
1122 				vget(vp, LK_EXCLUSIVE | LK_RETRY);
1123 			}
1124 		}
1125 		vfs_unbusy(mp);
1126 	}
1127 	vdrop(lvp);
1128 	vdrop(vp);
1129 	vfs_rel(mp);
1130 
1131 	return (res);
1132 }
1133 
1134 /*
1135  * Global vfs data structures
1136  */
1137 struct vop_vector null_vnodeops = {
1138 	.vop_bypass =		null_bypass,
1139 	.vop_access =		null_access,
1140 	.vop_accessx =		null_accessx,
1141 	.vop_advlock =		null_advlock,
1142 	.vop_advlockpurge =	vop_stdadvlockpurge,
1143 	.vop_bmap =		VOP_EOPNOTSUPP,
1144 	.vop_stat =		null_stat,
1145 	.vop_getattr =		null_getattr,
1146 	.vop_getwritemount =	null_getwritemount,
1147 	.vop_inactive =		null_inactive,
1148 	.vop_need_inactive =	null_need_inactive,
1149 	.vop_islocked =		vop_stdislocked,
1150 	.vop_lock1 =		null_lock,
1151 	.vop_lookup =		null_lookup,
1152 	.vop_open =		null_open,
1153 	.vop_print =		null_print,
1154 	.vop_read_pgcache =	null_read_pgcache,
1155 	.vop_reclaim =		null_reclaim,
1156 	.vop_remove =		null_remove,
1157 	.vop_rename =		null_rename,
1158 	.vop_rmdir =		null_rmdir,
1159 	.vop_setattr =		null_setattr,
1160 	.vop_strategy =		VOP_EOPNOTSUPP,
1161 	.vop_unlock =		null_unlock,
1162 	.vop_vptocnp =		null_vptocnp,
1163 	.vop_vptofh =		null_vptofh,
1164 	.vop_add_writecount =	null_add_writecount,
1165 	.vop_vput_pair =	null_vput_pair,
1166 };
1167 VFS_VOP_VECTOR_REGISTER(null_vnodeops);
1168