xref: /freebsd/sys/fs/nullfs/null_vnops.c (revision d5e3895ea4fe4ef9db8823774e07b4368180a23e)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * John Heidemann of the UCLA Ficus project.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
35  *
36  * Ancestors:
37  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
38  *	...and...
39  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
40  *
41  * $FreeBSD$
42  */
43 
44 /*
45  * Null Layer
46  *
47  * (See mount_nullfs(8) for more information.)
48  *
49  * The null layer duplicates a portion of the filesystem
50  * name space under a new name.  In this respect, it is
51  * similar to the loopback filesystem.  It differs from
52  * the loopback fs in two respects:  it is implemented using
53  * a stackable layers techniques, and its "null-node"s stack above
54  * all lower-layer vnodes, not just over directory vnodes.
55  *
56  * The null layer has two purposes.  First, it serves as a demonstration
57  * of layering by proving a layer which does nothing.  (It actually
58  * does everything the loopback filesystem does, which is slightly
59  * more than nothing.)  Second, the null layer can serve as a prototype
60  * layer.  Since it provides all necessary layer framework,
61  * new filesystem layers can be created very easily be starting
62  * with a null layer.
63  *
64  * The remainder of this man page examines the null layer as a basis
65  * for constructing new layers.
66  *
67  *
68  * INSTANTIATING NEW NULL LAYERS
69  *
70  * New null layers are created with mount_nullfs(8).
71  * Mount_nullfs(8) takes two arguments, the pathname
72  * of the lower vfs (target-pn) and the pathname where the null
73  * layer will appear in the namespace (alias-pn).  After
74  * the null layer is put into place, the contents
75  * of target-pn subtree will be aliased under alias-pn.
76  *
77  *
78  * OPERATION OF A NULL LAYER
79  *
80  * The null layer is the minimum filesystem layer,
81  * simply bypassing all possible operations to the lower layer
82  * for processing there.  The majority of its activity centers
83  * on the bypass routine, through which nearly all vnode operations
84  * pass.
85  *
86  * The bypass routine accepts arbitrary vnode operations for
87  * handling by the lower layer.  It begins by examing vnode
88  * operation arguments and replacing any null-nodes by their
89  * lower-layer equivlants.  It then invokes the operation
90  * on the lower layer.  Finally, it replaces the null-nodes
91  * in the arguments and, if a vnode is return by the operation,
92  * stacks a null-node on top of the returned vnode.
93  *
94  * Although bypass handles most operations, vop_getattr, vop_lock,
95  * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
96  * bypassed. Vop_getattr must change the fsid being returned.
97  * Vop_lock and vop_unlock must handle any locking for the
98  * current vnode as well as pass the lock request down.
99  * Vop_inactive and vop_reclaim are not bypassed so that
100  * they can handle freeing null-layer specific data. Vop_print
101  * is not bypassed to avoid excessive debugging information.
102  * Also, certain vnode operations change the locking state within
103  * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
104  * and symlink). Ideally these operations should not change the
105  * lock state, but should be changed to let the caller of the
106  * function unlock them. Otherwise all intermediate vnode layers
107  * (such as union, umapfs, etc) must catch these functions to do
108  * the necessary locking at their layer.
109  *
110  *
111  * INSTANTIATING VNODE STACKS
112  *
113  * Mounting associates the null layer with a lower layer,
114  * effect stacking two VFSes.  Vnode stacks are instead
115  * created on demand as files are accessed.
116  *
117  * The initial mount creates a single vnode stack for the
118  * root of the new null layer.  All other vnode stacks
119  * are created as a result of vnode operations on
120  * this or other null vnode stacks.
121  *
122  * New vnode stacks come into existence as a result of
123  * an operation which returns a vnode.
124  * The bypass routine stacks a null-node above the new
125  * vnode before returning it to the caller.
126  *
127  * For example, imagine mounting a null layer with
128  * "mount_nullfs /usr/include /dev/layer/null".
129  * Changing directory to /dev/layer/null will assign
130  * the root null-node (which was created when the null layer was mounted).
131  * Now consider opening "sys".  A vop_lookup would be
132  * done on the root null-node.  This operation would bypass through
133  * to the lower layer which would return a vnode representing
134  * the UFS "sys".  Null_bypass then builds a null-node
135  * aliasing the UFS "sys" and returns this to the caller.
136  * Later operations on the null-node "sys" will repeat this
137  * process when constructing other vnode stacks.
138  *
139  *
140  * CREATING OTHER FILE SYSTEM LAYERS
141  *
142  * One of the easiest ways to construct new filesystem layers is to make
143  * a copy of the null layer, rename all files and variables, and
144  * then begin modifing the copy.  Sed can be used to easily rename
145  * all variables.
146  *
147  * The umap layer is an example of a layer descended from the
148  * null layer.
149  *
150  *
151  * INVOKING OPERATIONS ON LOWER LAYERS
152  *
153  * There are two techniques to invoke operations on a lower layer
154  * when the operation cannot be completely bypassed.  Each method
155  * is appropriate in different situations.  In both cases,
156  * it is the responsibility of the aliasing layer to make
157  * the operation arguments "correct" for the lower layer
158  * by mapping a vnode arguments to the lower layer.
159  *
160  * The first approach is to call the aliasing layer's bypass routine.
161  * This method is most suitable when you wish to invoke the operation
162  * currently being handled on the lower layer.  It has the advantage
163  * that the bypass routine already must do argument mapping.
164  * An example of this is null_getattrs in the null layer.
165  *
166  * A second approach is to directly invoke vnode operations on
167  * the lower layer with the VOP_OPERATIONNAME interface.
168  * The advantage of this method is that it is easy to invoke
169  * arbitrary operations on the lower layer.  The disadvantage
170  * is that vnode arguments must be manualy mapped.
171  *
172  */
173 
174 #include <sys/param.h>
175 #include <sys/systm.h>
176 #include <sys/conf.h>
177 #include <sys/kernel.h>
178 #include <sys/lock.h>
179 #include <sys/malloc.h>
180 #include <sys/mount.h>
181 #include <sys/mutex.h>
182 #include <sys/namei.h>
183 #include <sys/sysctl.h>
184 #include <sys/vnode.h>
185 #include <sys/stat.h>
186 
187 #include <fs/nullfs/null.h>
188 
189 #include <vm/vm.h>
190 #include <vm/vm_extern.h>
191 #include <vm/vm_object.h>
192 #include <vm/vnode_pager.h>
193 
194 static int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
195 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
196 	&null_bug_bypass, 0, "");
197 
198 /*
199  * This is the 10-Apr-92 bypass routine.
200  *    This version has been optimized for speed, throwing away some
201  * safety checks.  It should still always work, but it's not as
202  * robust to programmer errors.
203  *
204  * In general, we map all vnodes going down and unmap them on the way back.
205  * As an exception to this, vnodes can be marked "unmapped" by setting
206  * the Nth bit in operation's vdesc_flags.
207  *
208  * Also, some BSD vnode operations have the side effect of vrele'ing
209  * their arguments.  With stacking, the reference counts are held
210  * by the upper node, not the lower one, so we must handle these
211  * side-effects here.  This is not of concern in Sun-derived systems
212  * since there are no such side-effects.
213  *
214  * This makes the following assumptions:
215  * - only one returned vpp
216  * - no INOUT vpp's (Sun's vop_open has one of these)
217  * - the vnode operation vector of the first vnode should be used
218  *   to determine what implementation of the op should be invoked
219  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
220  *   problems on rmdir'ing mount points and renaming?)
221  */
222 int
223 null_bypass(struct vop_generic_args *ap)
224 {
225 	struct vnode **this_vp_p;
226 	int error;
227 	struct vnode *old_vps[VDESC_MAX_VPS];
228 	struct vnode **vps_p[VDESC_MAX_VPS];
229 	struct vnode ***vppp;
230 	struct vnodeop_desc *descp = ap->a_desc;
231 	int reles, i;
232 
233 	if (null_bug_bypass)
234 		printf ("null_bypass: %s\n", descp->vdesc_name);
235 
236 #ifdef DIAGNOSTIC
237 	/*
238 	 * We require at least one vp.
239 	 */
240 	if (descp->vdesc_vp_offsets == NULL ||
241 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
242 		panic ("null_bypass: no vp's in map");
243 #endif
244 
245 	/*
246 	 * Map the vnodes going in.
247 	 * Later, we'll invoke the operation based on
248 	 * the first mapped vnode's operation vector.
249 	 */
250 	reles = descp->vdesc_flags;
251 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
252 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
253 			break;   /* bail out at end of list */
254 		vps_p[i] = this_vp_p =
255 			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
256 		/*
257 		 * We're not guaranteed that any but the first vnode
258 		 * are of our type.  Check for and don't map any
259 		 * that aren't.  (We must always map first vp or vclean fails.)
260 		 */
261 		if (i && (*this_vp_p == NULLVP ||
262 		    (*this_vp_p)->v_op != &null_vnodeops)) {
263 			old_vps[i] = NULLVP;
264 		} else {
265 			old_vps[i] = *this_vp_p;
266 			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
267 			/*
268 			 * XXX - Several operations have the side effect
269 			 * of vrele'ing their vp's.  We must account for
270 			 * that.  (This should go away in the future.)
271 			 */
272 			if (reles & VDESC_VP0_WILLRELE)
273 				VREF(*this_vp_p);
274 		}
275 
276 	}
277 
278 	/*
279 	 * Call the operation on the lower layer
280 	 * with the modified argument structure.
281 	 */
282 	if (vps_p[0] && *vps_p[0])
283 		error = VCALL(ap);
284 	else {
285 		printf("null_bypass: no map for %s\n", descp->vdesc_name);
286 		error = EINVAL;
287 	}
288 
289 	/*
290 	 * Maintain the illusion of call-by-value
291 	 * by restoring vnodes in the argument structure
292 	 * to their original value.
293 	 */
294 	reles = descp->vdesc_flags;
295 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
296 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
297 			break;   /* bail out at end of list */
298 		if (old_vps[i]) {
299 			*(vps_p[i]) = old_vps[i];
300 #if 0
301 			if (reles & VDESC_VP0_WILLUNLOCK)
302 				VOP_UNLOCK(*(vps_p[i]), 0);
303 #endif
304 			if (reles & VDESC_VP0_WILLRELE)
305 				vrele(*(vps_p[i]));
306 		}
307 	}
308 
309 	/*
310 	 * Map the possible out-going vpp
311 	 * (Assumes that the lower layer always returns
312 	 * a VREF'ed vpp unless it gets an error.)
313 	 */
314 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && !error) {
315 		/*
316 		 * XXX - even though some ops have vpp returned vp's,
317 		 * several ops actually vrele this before returning.
318 		 * We must avoid these ops.
319 		 * (This should go away when these ops are regularized.)
320 		 */
321 		vppp = VOPARG_OFFSETTO(struct vnode***,
322 				 descp->vdesc_vpp_offset,ap);
323 		if (*vppp)
324 			error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp);
325 	}
326 
327 	return (error);
328 }
329 
330 static int
331 null_add_writecount(struct vop_add_writecount_args *ap)
332 {
333 	struct vnode *lvp, *vp;
334 	int error;
335 
336 	vp = ap->a_vp;
337 	lvp = NULLVPTOLOWERVP(vp);
338 	VI_LOCK(vp);
339 	/* text refs are bypassed to lowervp */
340 	VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount"));
341 	VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
342 	    ("wrong writecount inc %d", ap->a_inc));
343 	error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc);
344 	if (error == 0)
345 		vp->v_writecount += ap->a_inc;
346 	VI_UNLOCK(vp);
347 	return (error);
348 }
349 
350 /*
351  * We have to carry on the locking protocol on the null layer vnodes
352  * as we progress through the tree. We also have to enforce read-only
353  * if this layer is mounted read-only.
354  */
355 static int
356 null_lookup(struct vop_lookup_args *ap)
357 {
358 	struct componentname *cnp = ap->a_cnp;
359 	struct vnode *dvp = ap->a_dvp;
360 	int flags = cnp->cn_flags;
361 	struct vnode *vp, *ldvp, *lvp;
362 	struct mount *mp;
363 	int error;
364 
365 	mp = dvp->v_mount;
366 	if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 &&
367 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
368 		return (EROFS);
369 	/*
370 	 * Although it is possible to call null_bypass(), we'll do
371 	 * a direct call to reduce overhead
372 	 */
373 	ldvp = NULLVPTOLOWERVP(dvp);
374 	vp = lvp = NULL;
375 	KASSERT((ldvp->v_vflag & VV_ROOT) == 0 ||
376 	    ((dvp->v_vflag & VV_ROOT) != 0 && (flags & ISDOTDOT) == 0),
377 	    ("ldvp %p fl %#x dvp %p fl %#x flags %#x", ldvp, ldvp->v_vflag,
378 	     dvp, dvp->v_vflag, flags));
379 
380 	/*
381 	 * Hold ldvp.  The reference on it, owned by dvp, is lost in
382 	 * case of dvp reclamation, and we need ldvp to move our lock
383 	 * from ldvp to dvp.
384 	 */
385 	vhold(ldvp);
386 
387 	error = VOP_LOOKUP(ldvp, &lvp, cnp);
388 
389 	/*
390 	 * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows
391 	 * dvp to be reclaimed due to shared v_vnlock.  Check for the
392 	 * doomed state and return error.
393 	 */
394 	if ((error == 0 || error == EJUSTRETURN) &&
395 	    VN_IS_DOOMED(dvp)) {
396 		error = ENOENT;
397 		if (lvp != NULL)
398 			vput(lvp);
399 
400 		/*
401 		 * If vgone() did reclaimed dvp before curthread
402 		 * relocked ldvp, the locks of dvp and ldpv are no
403 		 * longer shared.  In this case, relock of ldvp in
404 		 * lower fs VOP_LOOKUP() does not restore the locking
405 		 * state of dvp.  Compensate for this by unlocking
406 		 * ldvp and locking dvp, which is also correct if the
407 		 * locks are still shared.
408 		 */
409 		VOP_UNLOCK(ldvp);
410 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
411 	}
412 	vdrop(ldvp);
413 
414 	if (error == EJUSTRETURN && (flags & ISLASTCN) != 0 &&
415 	    (mp->mnt_flag & MNT_RDONLY) != 0 &&
416 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
417 		error = EROFS;
418 
419 	if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
420 		if (ldvp == lvp) {
421 			*ap->a_vpp = dvp;
422 			VREF(dvp);
423 			vrele(lvp);
424 		} else {
425 			error = null_nodeget(mp, lvp, &vp);
426 			if (error == 0)
427 				*ap->a_vpp = vp;
428 		}
429 	}
430 	return (error);
431 }
432 
433 static int
434 null_open(struct vop_open_args *ap)
435 {
436 	int retval;
437 	struct vnode *vp, *ldvp;
438 
439 	vp = ap->a_vp;
440 	ldvp = NULLVPTOLOWERVP(vp);
441 	retval = null_bypass(&ap->a_gen);
442 	if (retval == 0) {
443 		vp->v_object = ldvp->v_object;
444 		if ((ldvp->v_irflag & VIRF_PGREAD) != 0) {
445 			MPASS(vp->v_object != NULL);
446 			if ((vp->v_irflag & VIRF_PGREAD) == 0) {
447 				VI_LOCK(vp);
448 				vp->v_irflag |= VIRF_PGREAD;
449 				VI_UNLOCK(vp);
450 			}
451 		}
452 	}
453 	return (retval);
454 }
455 
456 /*
457  * Setattr call. Disallow write attempts if the layer is mounted read-only.
458  */
459 static int
460 null_setattr(struct vop_setattr_args *ap)
461 {
462 	struct vnode *vp = ap->a_vp;
463 	struct vattr *vap = ap->a_vap;
464 
465   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
466 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
467 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
468 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
469 		return (EROFS);
470 	if (vap->va_size != VNOVAL) {
471  		switch (vp->v_type) {
472  		case VDIR:
473  			return (EISDIR);
474  		case VCHR:
475  		case VBLK:
476  		case VSOCK:
477  		case VFIFO:
478 			if (vap->va_flags != VNOVAL)
479 				return (EOPNOTSUPP);
480 			return (0);
481 		case VREG:
482 		case VLNK:
483  		default:
484 			/*
485 			 * Disallow write attempts if the filesystem is
486 			 * mounted read-only.
487 			 */
488 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
489 				return (EROFS);
490 		}
491 	}
492 
493 	return (null_bypass((struct vop_generic_args *)ap));
494 }
495 
496 /*
497  *  We handle stat and getattr only to change the fsid.
498  */
499 static int
500 null_stat(struct vop_stat_args *ap)
501 {
502 	int error;
503 
504 	if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
505 		return (error);
506 
507 	ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
508 	return (0);
509 }
510 
511 static int
512 null_getattr(struct vop_getattr_args *ap)
513 {
514 	int error;
515 
516 	if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
517 		return (error);
518 
519 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
520 	return (0);
521 }
522 
523 /*
524  * Handle to disallow write access if mounted read-only.
525  */
526 static int
527 null_access(struct vop_access_args *ap)
528 {
529 	struct vnode *vp = ap->a_vp;
530 	accmode_t accmode = ap->a_accmode;
531 
532 	/*
533 	 * Disallow write attempts on read-only layers;
534 	 * unless the file is a socket, fifo, or a block or
535 	 * character device resident on the filesystem.
536 	 */
537 	if (accmode & VWRITE) {
538 		switch (vp->v_type) {
539 		case VDIR:
540 		case VLNK:
541 		case VREG:
542 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
543 				return (EROFS);
544 			break;
545 		default:
546 			break;
547 		}
548 	}
549 	return (null_bypass((struct vop_generic_args *)ap));
550 }
551 
552 static int
553 null_accessx(struct vop_accessx_args *ap)
554 {
555 	struct vnode *vp = ap->a_vp;
556 	accmode_t accmode = ap->a_accmode;
557 
558 	/*
559 	 * Disallow write attempts on read-only layers;
560 	 * unless the file is a socket, fifo, or a block or
561 	 * character device resident on the filesystem.
562 	 */
563 	if (accmode & VWRITE) {
564 		switch (vp->v_type) {
565 		case VDIR:
566 		case VLNK:
567 		case VREG:
568 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
569 				return (EROFS);
570 			break;
571 		default:
572 			break;
573 		}
574 	}
575 	return (null_bypass((struct vop_generic_args *)ap));
576 }
577 
578 /*
579  * Increasing refcount of lower vnode is needed at least for the case
580  * when lower FS is NFS to do sillyrename if the file is in use.
581  * Unfortunately v_usecount is incremented in many places in
582  * the kernel and, as such, there may be races that result in
583  * the NFS client doing an extraneous silly rename, but that seems
584  * preferable to not doing a silly rename when it is needed.
585  */
586 static int
587 null_remove(struct vop_remove_args *ap)
588 {
589 	int retval, vreleit;
590 	struct vnode *lvp, *vp;
591 
592 	vp = ap->a_vp;
593 	if (vrefcnt(vp) > 1) {
594 		lvp = NULLVPTOLOWERVP(vp);
595 		VREF(lvp);
596 		vreleit = 1;
597 	} else
598 		vreleit = 0;
599 	VTONULL(vp)->null_flags |= NULLV_DROP;
600 	retval = null_bypass(&ap->a_gen);
601 	if (vreleit != 0)
602 		vrele(lvp);
603 	return (retval);
604 }
605 
606 /*
607  * We handle this to eliminate null FS to lower FS
608  * file moving. Don't know why we don't allow this,
609  * possibly we should.
610  */
611 static int
612 null_rename(struct vop_rename_args *ap)
613 {
614 	struct vnode *tdvp = ap->a_tdvp;
615 	struct vnode *fvp = ap->a_fvp;
616 	struct vnode *fdvp = ap->a_fdvp;
617 	struct vnode *tvp = ap->a_tvp;
618 	struct null_node *tnn;
619 
620 	/* Check for cross-device rename. */
621 	if ((fvp->v_mount != tdvp->v_mount) ||
622 	    (tvp && (fvp->v_mount != tvp->v_mount))) {
623 		if (tdvp == tvp)
624 			vrele(tdvp);
625 		else
626 			vput(tdvp);
627 		if (tvp)
628 			vput(tvp);
629 		vrele(fdvp);
630 		vrele(fvp);
631 		return (EXDEV);
632 	}
633 
634 	if (tvp != NULL) {
635 		tnn = VTONULL(tvp);
636 		tnn->null_flags |= NULLV_DROP;
637 	}
638 	return (null_bypass((struct vop_generic_args *)ap));
639 }
640 
641 static int
642 null_rmdir(struct vop_rmdir_args *ap)
643 {
644 
645 	VTONULL(ap->a_vp)->null_flags |= NULLV_DROP;
646 	return (null_bypass(&ap->a_gen));
647 }
648 
649 /*
650  * We need to process our own vnode lock and then clear the
651  * interlock flag as it applies only to our vnode, not the
652  * vnodes below us on the stack.
653  */
654 static int
655 null_lock(struct vop_lock1_args *ap)
656 {
657 	struct vnode *vp = ap->a_vp;
658 	int flags;
659 	struct null_node *nn;
660 	struct vnode *lvp;
661 	int error;
662 
663 	if ((ap->a_flags & LK_INTERLOCK) == 0)
664 		VI_LOCK(vp);
665 	else
666 		ap->a_flags &= ~LK_INTERLOCK;
667 	flags = ap->a_flags;
668 	nn = VTONULL(vp);
669 	/*
670 	 * If we're still active we must ask the lower layer to
671 	 * lock as ffs has special lock considerations in its
672 	 * vop lock.
673 	 */
674 	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
675 		/*
676 		 * We have to hold the vnode here to solve a potential
677 		 * reclaim race.  If we're forcibly vgone'd while we
678 		 * still have refs, a thread could be sleeping inside
679 		 * the lowervp's vop_lock routine.  When we vgone we will
680 		 * drop our last ref to the lowervp, which would allow it
681 		 * to be reclaimed.  The lowervp could then be recycled,
682 		 * in which case it is not legal to be sleeping in its VOP.
683 		 * We prevent it from being recycled by holding the vnode
684 		 * here.
685 		 */
686 		vholdnz(lvp);
687 		VI_UNLOCK(vp);
688 		error = VOP_LOCK(lvp, flags);
689 
690 		/*
691 		 * We might have slept to get the lock and someone might have
692 		 * clean our vnode already, switching vnode lock from one in
693 		 * lowervp to v_lock in our own vnode structure.  Handle this
694 		 * case by reacquiring correct lock in requested mode.
695 		 */
696 		if (VTONULL(vp) == NULL && error == 0) {
697 			ap->a_flags &= ~LK_TYPE_MASK;
698 			switch (flags & LK_TYPE_MASK) {
699 			case LK_SHARED:
700 				ap->a_flags |= LK_SHARED;
701 				break;
702 			case LK_UPGRADE:
703 			case LK_EXCLUSIVE:
704 				ap->a_flags |= LK_EXCLUSIVE;
705 				break;
706 			default:
707 				panic("Unsupported lock request %d\n",
708 				    ap->a_flags);
709 			}
710 			VOP_UNLOCK(lvp);
711 			error = vop_stdlock(ap);
712 		}
713 		vdrop(lvp);
714 	} else {
715 		VI_UNLOCK(vp);
716 		error = vop_stdlock(ap);
717 	}
718 
719 	return (error);
720 }
721 
722 /*
723  * We need to process our own vnode unlock and then clear the
724  * interlock flag as it applies only to our vnode, not the
725  * vnodes below us on the stack.
726  */
727 static int
728 null_unlock(struct vop_unlock_args *ap)
729 {
730 	struct vnode *vp = ap->a_vp;
731 	struct null_node *nn;
732 	struct vnode *lvp;
733 	int error;
734 
735 	nn = VTONULL(vp);
736 	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
737 		vholdnz(lvp);
738 		error = VOP_UNLOCK(lvp);
739 		vdrop(lvp);
740 	} else {
741 		error = vop_stdunlock(ap);
742 	}
743 
744 	return (error);
745 }
746 
747 /*
748  * Do not allow the VOP_INACTIVE to be passed to the lower layer,
749  * since the reference count on the lower vnode is not related to
750  * ours.
751  */
752 static int
753 null_want_recycle(struct vnode *vp)
754 {
755 	struct vnode *lvp;
756 	struct null_node *xp;
757 	struct mount *mp;
758 	struct null_mount *xmp;
759 
760 	xp = VTONULL(vp);
761 	lvp = NULLVPTOLOWERVP(vp);
762 	mp = vp->v_mount;
763 	xmp = MOUNTTONULLMOUNT(mp);
764 	if ((xmp->nullm_flags & NULLM_CACHE) == 0 ||
765 	    (xp->null_flags & NULLV_DROP) != 0 ||
766 	    (lvp->v_vflag & VV_NOSYNC) != 0) {
767 		/*
768 		 * If this is the last reference and caching of the
769 		 * nullfs vnodes is not enabled, or the lower vnode is
770 		 * deleted, then free up the vnode so as not to tie up
771 		 * the lower vnodes.
772 		 */
773 		return (1);
774 	}
775 	return (0);
776 }
777 
778 static int
779 null_inactive(struct vop_inactive_args *ap)
780 {
781 	struct vnode *vp;
782 
783 	vp = ap->a_vp;
784 	if (null_want_recycle(vp)) {
785 		vp->v_object = NULL;
786 		vrecycle(vp);
787 	}
788 	return (0);
789 }
790 
791 static int
792 null_need_inactive(struct vop_need_inactive_args *ap)
793 {
794 
795 	return (null_want_recycle(ap->a_vp));
796 }
797 
798 /*
799  * Now, the nullfs vnode and, due to the sharing lock, the lower
800  * vnode, are exclusively locked, and we shall destroy the null vnode.
801  */
802 static int
803 null_reclaim(struct vop_reclaim_args *ap)
804 {
805 	struct vnode *vp;
806 	struct null_node *xp;
807 	struct vnode *lowervp;
808 
809 	vp = ap->a_vp;
810 	xp = VTONULL(vp);
811 	lowervp = xp->null_lowervp;
812 
813 	KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock,
814 	    ("Reclaiming incomplete null vnode %p", vp));
815 
816 	null_hashrem(xp);
817 	/*
818 	 * Use the interlock to protect the clearing of v_data to
819 	 * prevent faults in null_lock().
820 	 */
821 	lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
822 	VI_LOCK(vp);
823 	vp->v_data = NULL;
824 	vp->v_object = NULL;
825 	vp->v_vnlock = &vp->v_lock;
826 
827 	/*
828 	 * If we were opened for write, we leased the write reference
829 	 * to the lower vnode.  If this is a reclamation due to the
830 	 * forced unmount, undo the reference now.
831 	 */
832 	if (vp->v_writecount > 0)
833 		VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount);
834 	else if (vp->v_writecount < 0)
835 		vp->v_writecount = 0;
836 
837 	VI_UNLOCK(vp);
838 
839 	if ((xp->null_flags & NULLV_NOUNLOCK) != 0)
840 		vunref(lowervp);
841 	else
842 		vput(lowervp);
843 	free(xp, M_NULLFSNODE);
844 
845 	return (0);
846 }
847 
848 static int
849 null_print(struct vop_print_args *ap)
850 {
851 	struct vnode *vp = ap->a_vp;
852 
853 	printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp);
854 	return (0);
855 }
856 
857 /* ARGSUSED */
858 static int
859 null_getwritemount(struct vop_getwritemount_args *ap)
860 {
861 	struct null_node *xp;
862 	struct vnode *lowervp;
863 	struct vnode *vp;
864 
865 	vp = ap->a_vp;
866 	VI_LOCK(vp);
867 	xp = VTONULL(vp);
868 	if (xp && (lowervp = xp->null_lowervp)) {
869 		vholdnz(lowervp);
870 		VI_UNLOCK(vp);
871 		VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
872 		vdrop(lowervp);
873 	} else {
874 		VI_UNLOCK(vp);
875 		*(ap->a_mpp) = NULL;
876 	}
877 	return (0);
878 }
879 
880 static int
881 null_vptofh(struct vop_vptofh_args *ap)
882 {
883 	struct vnode *lvp;
884 
885 	lvp = NULLVPTOLOWERVP(ap->a_vp);
886 	return VOP_VPTOFH(lvp, ap->a_fhp);
887 }
888 
889 static int
890 null_vptocnp(struct vop_vptocnp_args *ap)
891 {
892 	struct vnode *vp = ap->a_vp;
893 	struct vnode **dvp = ap->a_vpp;
894 	struct vnode *lvp, *ldvp;
895 	struct ucred *cred = ap->a_cred;
896 	struct mount *mp;
897 	int error, locked;
898 
899 	locked = VOP_ISLOCKED(vp);
900 	lvp = NULLVPTOLOWERVP(vp);
901 	vhold(lvp);
902 	mp = vp->v_mount;
903 	vfs_ref(mp);
904 	VOP_UNLOCK(vp); /* vp is held by vn_vptocnp_locked that called us */
905 	ldvp = lvp;
906 	vref(lvp);
907 	error = vn_vptocnp(&ldvp, cred, ap->a_buf, ap->a_buflen);
908 	vdrop(lvp);
909 	if (error != 0) {
910 		vn_lock(vp, locked | LK_RETRY);
911 		vfs_rel(mp);
912 		return (ENOENT);
913 	}
914 
915 	error = vn_lock(ldvp, LK_SHARED);
916 	if (error != 0) {
917 		vrele(ldvp);
918 		vn_lock(vp, locked | LK_RETRY);
919 		vfs_rel(mp);
920 		return (ENOENT);
921 	}
922 	error = null_nodeget(mp, ldvp, dvp);
923 	if (error == 0) {
924 #ifdef DIAGNOSTIC
925 		NULLVPTOLOWERVP(*dvp);
926 #endif
927 		VOP_UNLOCK(*dvp); /* keep reference on *dvp */
928 	}
929 	vn_lock(vp, locked | LK_RETRY);
930 	vfs_rel(mp);
931 	return (error);
932 }
933 
934 /*
935  * Global vfs data structures
936  */
937 struct vop_vector null_vnodeops = {
938 	.vop_bypass =		null_bypass,
939 	.vop_access =		null_access,
940 	.vop_accessx =		null_accessx,
941 	.vop_advlockpurge =	vop_stdadvlockpurge,
942 	.vop_bmap =		VOP_EOPNOTSUPP,
943 	.vop_stat =		null_stat,
944 	.vop_getattr =		null_getattr,
945 	.vop_getwritemount =	null_getwritemount,
946 	.vop_inactive =		null_inactive,
947 	.vop_need_inactive =	null_need_inactive,
948 	.vop_islocked =		vop_stdislocked,
949 	.vop_lock1 =		null_lock,
950 	.vop_lookup =		null_lookup,
951 	.vop_open =		null_open,
952 	.vop_print =		null_print,
953 	.vop_reclaim =		null_reclaim,
954 	.vop_remove =		null_remove,
955 	.vop_rename =		null_rename,
956 	.vop_rmdir =		null_rmdir,
957 	.vop_setattr =		null_setattr,
958 	.vop_strategy =		VOP_EOPNOTSUPP,
959 	.vop_unlock =		null_unlock,
960 	.vop_vptocnp =		null_vptocnp,
961 	.vop_vptofh =		null_vptofh,
962 	.vop_add_writecount =	null_add_writecount,
963 };
964 VFS_VOP_VECTOR_REGISTER(null_vnodeops);
965