1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * John Heidemann of the UCLA Ficus project.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * Ancestors:
35 * ...and...
36 */
37
38 /*
39 * Null Layer
40 *
41 * (See mount_nullfs(8) for more information.)
42 *
43 * The null layer duplicates a portion of the filesystem
44 * name space under a new name. In this respect, it is
45 * similar to the loopback filesystem. It differs from
46 * the loopback fs in two respects: it is implemented using
47 * a stackable layers techniques, and its "null-node"s stack above
48 * all lower-layer vnodes, not just over directory vnodes.
49 *
50 * The null layer has two purposes. First, it serves as a demonstration
51 * of layering by proving a layer which does nothing. (It actually
52 * does everything the loopback filesystem does, which is slightly
53 * more than nothing.) Second, the null layer can serve as a prototype
54 * layer. Since it provides all necessary layer framework,
55 * new filesystem layers can be created very easily be starting
56 * with a null layer.
57 *
58 * The remainder of this man page examines the null layer as a basis
59 * for constructing new layers.
60 *
61 *
62 * INSTANTIATING NEW NULL LAYERS
63 *
64 * New null layers are created with mount_nullfs(8).
65 * Mount_nullfs(8) takes two arguments, the pathname
66 * of the lower vfs (target-pn) and the pathname where the null
67 * layer will appear in the namespace (alias-pn). After
68 * the null layer is put into place, the contents
69 * of target-pn subtree will be aliased under alias-pn.
70 *
71 *
72 * OPERATION OF A NULL LAYER
73 *
74 * The null layer is the minimum filesystem layer,
75 * simply bypassing all possible operations to the lower layer
76 * for processing there. The majority of its activity centers
77 * on the bypass routine, through which nearly all vnode operations
78 * pass.
79 *
80 * The bypass routine accepts arbitrary vnode operations for
81 * handling by the lower layer. It begins by examining vnode
82 * operation arguments and replacing any null-nodes by their
83 * lower-layer equivlants. It then invokes the operation
84 * on the lower layer. Finally, it replaces the null-nodes
85 * in the arguments and, if a vnode is return by the operation,
86 * stacks a null-node on top of the returned vnode.
87 *
88 * Although bypass handles most operations, vop_getattr, vop_lock,
89 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
90 * bypassed. Vop_getattr must change the fsid being returned.
91 * Vop_lock and vop_unlock must handle any locking for the
92 * current vnode as well as pass the lock request down.
93 * Vop_inactive and vop_reclaim are not bypassed so that
94 * they can handle freeing null-layer specific data. Vop_print
95 * is not bypassed to avoid excessive debugging information.
96 * Also, certain vnode operations change the locking state within
97 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
98 * and symlink). Ideally these operations should not change the
99 * lock state, but should be changed to let the caller of the
100 * function unlock them. Otherwise all intermediate vnode layers
101 * (such as union, umapfs, etc) must catch these functions to do
102 * the necessary locking at their layer.
103 *
104 *
105 * INSTANTIATING VNODE STACKS
106 *
107 * Mounting associates the null layer with a lower layer,
108 * effect stacking two VFSes. Vnode stacks are instead
109 * created on demand as files are accessed.
110 *
111 * The initial mount creates a single vnode stack for the
112 * root of the new null layer. All other vnode stacks
113 * are created as a result of vnode operations on
114 * this or other null vnode stacks.
115 *
116 * New vnode stacks come into existence as a result of
117 * an operation which returns a vnode.
118 * The bypass routine stacks a null-node above the new
119 * vnode before returning it to the caller.
120 *
121 * For example, imagine mounting a null layer with
122 * "mount_nullfs /usr/include /dev/layer/null".
123 * Changing directory to /dev/layer/null will assign
124 * the root null-node (which was created when the null layer was mounted).
125 * Now consider opening "sys". A vop_lookup would be
126 * done on the root null-node. This operation would bypass through
127 * to the lower layer which would return a vnode representing
128 * the UFS "sys". Null_bypass then builds a null-node
129 * aliasing the UFS "sys" and returns this to the caller.
130 * Later operations on the null-node "sys" will repeat this
131 * process when constructing other vnode stacks.
132 *
133 *
134 * CREATING OTHER FILE SYSTEM LAYERS
135 *
136 * One of the easiest ways to construct new filesystem layers is to make
137 * a copy of the null layer, rename all files and variables, and
138 * then begin modifing the copy. Sed can be used to easily rename
139 * all variables.
140 *
141 * The umap layer is an example of a layer descended from the
142 * null layer.
143 *
144 *
145 * INVOKING OPERATIONS ON LOWER LAYERS
146 *
147 * There are two techniques to invoke operations on a lower layer
148 * when the operation cannot be completely bypassed. Each method
149 * is appropriate in different situations. In both cases,
150 * it is the responsibility of the aliasing layer to make
151 * the operation arguments "correct" for the lower layer
152 * by mapping a vnode arguments to the lower layer.
153 *
154 * The first approach is to call the aliasing layer's bypass routine.
155 * This method is most suitable when you wish to invoke the operation
156 * currently being handled on the lower layer. It has the advantage
157 * that the bypass routine already must do argument mapping.
158 * An example of this is null_getattrs in the null layer.
159 *
160 * A second approach is to directly invoke vnode operations on
161 * the lower layer with the VOP_OPERATIONNAME interface.
162 * The advantage of this method is that it is easy to invoke
163 * arbitrary operations on the lower layer. The disadvantage
164 * is that vnode arguments must be manualy mapped.
165 *
166 */
167
168 #include <sys/param.h>
169 #include <sys/systm.h>
170 #include <sys/conf.h>
171 #include <sys/kernel.h>
172 #include <sys/lock.h>
173 #include <sys/malloc.h>
174 #include <sys/mount.h>
175 #include <sys/mutex.h>
176 #include <sys/namei.h>
177 #include <sys/sysctl.h>
178 #include <sys/vnode.h>
179 #include <sys/stat.h>
180
181 #include <fs/nullfs/null.h>
182
183 #include <vm/vm.h>
184 #include <vm/vm_extern.h>
185 #include <vm/vm_object.h>
186 #include <vm/vnode_pager.h>
187
188 static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
189 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
190 &null_bug_bypass, 0, "");
191
192 /*
193 * Synchronize inotify flags with the lower vnode:
194 * - If the upper vnode has the flag set and the lower does not, then the lower
195 * vnode is unwatched and the upper vnode does not need to go through
196 * VOP_INOTIFY.
197 * - If the lower vnode is watched, then the upper vnode should go through
198 * VOP_INOTIFY, so copy the flag up.
199 */
200 static void
null_copy_inotify(struct vnode * vp,struct vnode * lvp,short flag)201 null_copy_inotify(struct vnode *vp, struct vnode *lvp, short flag)
202 {
203 if ((vn_irflag_read(vp) & flag) != 0) {
204 if (__predict_false((vn_irflag_read(lvp) & flag) == 0))
205 vn_irflag_unset(vp, flag);
206 } else if ((vn_irflag_read(lvp) & flag) != 0) {
207 if (__predict_false((vn_irflag_read(vp) & flag) == 0))
208 vn_irflag_set(vp, flag);
209 }
210 }
211
212 /*
213 * This is the 10-Apr-92 bypass routine.
214 * This version has been optimized for speed, throwing away some
215 * safety checks. It should still always work, but it's not as
216 * robust to programmer errors.
217 *
218 * In general, we map all vnodes going down and unmap them on the way back.
219 * As an exception to this, vnodes can be marked "unmapped" by setting
220 * the Nth bit in operation's vdesc_flags.
221 *
222 * Also, some BSD vnode operations have the side effect of vrele'ing
223 * their arguments. With stacking, the reference counts are held
224 * by the upper node, not the lower one, so we must handle these
225 * side-effects here. This is not of concern in Sun-derived systems
226 * since there are no such side-effects.
227 *
228 * This makes the following assumptions:
229 * - only one returned vpp
230 * - no INOUT vpp's (Sun's vop_open has one of these)
231 * - the vnode operation vector of the first vnode should be used
232 * to determine what implementation of the op should be invoked
233 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
234 * problems on rmdir'ing mount points and renaming?)
235 */
236 int
null_bypass(struct vop_generic_args * ap)237 null_bypass(struct vop_generic_args *ap)
238 {
239 struct vnode **this_vp_p;
240 struct vnode *old_vps[VDESC_MAX_VPS];
241 struct vnode **vps_p[VDESC_MAX_VPS];
242 struct vnode ***vppp;
243 struct vnode *lvp;
244 struct vnodeop_desc *descp = ap->a_desc;
245 int error, i, reles;
246
247 if (null_bug_bypass)
248 printf ("null_bypass: %s\n", descp->vdesc_name);
249
250 #ifdef DIAGNOSTIC
251 /*
252 * We require at least one vp.
253 */
254 if (descp->vdesc_vp_offsets == NULL ||
255 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
256 panic ("null_bypass: no vp's in map");
257 #endif
258
259 /*
260 * Map the vnodes going in.
261 * Later, we'll invoke the operation based on
262 * the first mapped vnode's operation vector.
263 */
264 reles = descp->vdesc_flags;
265 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
266 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
267 break; /* bail out at end of list */
268 vps_p[i] = this_vp_p = VOPARG_OFFSETTO(struct vnode **,
269 descp->vdesc_vp_offsets[i], ap);
270
271 /*
272 * We're not guaranteed that any but the first vnode
273 * are of our type. Check for and don't map any
274 * that aren't. (We must always map first vp or vclean fails.)
275 */
276 if (i != 0 && (*this_vp_p == NULLVP ||
277 (*this_vp_p)->v_op != &null_vnodeops)) {
278 old_vps[i] = NULLVP;
279 } else {
280 old_vps[i] = *this_vp_p;
281 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
282
283 /*
284 * The upper vnode reference to the lower
285 * vnode is the only reference that keeps our
286 * pointer to the lower vnode alive. If lower
287 * vnode is relocked during the VOP call,
288 * upper vnode might become unlocked and
289 * reclaimed, which invalidates our reference.
290 * Add a transient hold around VOP call.
291 */
292 vhold(*this_vp_p);
293
294 /*
295 * XXX - Several operations have the side effect
296 * of vrele'ing their vp's. We must account for
297 * that. (This should go away in the future.)
298 */
299 if (reles & VDESC_VP0_WILLRELE)
300 vref(*this_vp_p);
301 }
302 }
303
304 /*
305 * Call the operation on the lower layer
306 * with the modified argument structure.
307 */
308 if (vps_p[0] != NULL && *vps_p[0] != NULL) {
309 error = VCALL(ap);
310 } else {
311 printf("null_bypass: no map for %s\n", descp->vdesc_name);
312 error = EINVAL;
313 }
314
315 /*
316 * Maintain the illusion of call-by-value
317 * by restoring vnodes in the argument structure
318 * to their original value.
319 */
320 reles = descp->vdesc_flags;
321 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
322 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
323 break; /* bail out at end of list */
324 if (old_vps[i] != NULL) {
325 lvp = *(vps_p[i]);
326
327 /*
328 * Get rid of the transient hold on lvp. Copy inotify
329 * flags up in case something is watching the lower
330 * layer.
331 *
332 * If lowervp was unlocked during VOP
333 * operation, nullfs upper vnode could have
334 * been reclaimed, which changes its v_vnlock
335 * back to private v_lock. In this case we
336 * must move lock ownership from lower to
337 * upper (reclaimed) vnode.
338 */
339 if (lvp != NULLVP) {
340 null_copy_inotify(old_vps[i], lvp,
341 VIRF_INOTIFY);
342 null_copy_inotify(old_vps[i], lvp,
343 VIRF_INOTIFY_PARENT);
344 if (VOP_ISLOCKED(lvp) == LK_EXCLUSIVE &&
345 old_vps[i]->v_vnlock != lvp->v_vnlock) {
346 VOP_UNLOCK(lvp);
347 VOP_LOCK(old_vps[i], LK_EXCLUSIVE |
348 LK_RETRY);
349 }
350 vdrop(lvp);
351 }
352
353 *(vps_p[i]) = old_vps[i];
354 #if 0
355 if (reles & VDESC_VP0_WILLUNLOCK)
356 VOP_UNLOCK(*(vps_p[i]), 0);
357 #endif
358 if (reles & VDESC_VP0_WILLRELE)
359 vrele(*(vps_p[i]));
360 }
361 }
362
363 /*
364 * Map the possible out-going vpp
365 * (Assumes that the lower layer always returns
366 * a VREF'ed vpp unless it gets an error.)
367 */
368 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && error == 0) {
369 /*
370 * XXX - even though some ops have vpp returned vp's,
371 * several ops actually vrele this before returning.
372 * We must avoid these ops.
373 * (This should go away when these ops are regularized.)
374 */
375 vppp = VOPARG_OFFSETTO(struct vnode ***,
376 descp->vdesc_vpp_offset, ap);
377 if (*vppp != NULL)
378 error = null_nodeget(old_vps[0]->v_mount, **vppp,
379 *vppp);
380 }
381
382 return (error);
383 }
384
385 static int
null_add_writecount(struct vop_add_writecount_args * ap)386 null_add_writecount(struct vop_add_writecount_args *ap)
387 {
388 struct vnode *lvp, *vp;
389 int error;
390
391 vp = ap->a_vp;
392 lvp = NULLVPTOLOWERVP(vp);
393 VI_LOCK(vp);
394 /* text refs are bypassed to lowervp */
395 VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount"));
396 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
397 ("wrong writecount inc %d", ap->a_inc));
398 error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc);
399 if (error == 0)
400 vp->v_writecount += ap->a_inc;
401 VI_UNLOCK(vp);
402 return (error);
403 }
404
405 /*
406 * We have to carry on the locking protocol on the null layer vnodes
407 * as we progress through the tree. We also have to enforce read-only
408 * if this layer is mounted read-only.
409 */
410 static int
null_lookup(struct vop_lookup_args * ap)411 null_lookup(struct vop_lookup_args *ap)
412 {
413 struct componentname *cnp = ap->a_cnp;
414 struct vnode *dvp = ap->a_dvp;
415 uint64_t flags = cnp->cn_flags;
416 struct vnode *vp, *ldvp, *lvp;
417 struct mount *mp;
418 int error;
419
420 mp = dvp->v_mount;
421 if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 &&
422 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
423 return (EROFS);
424 /*
425 * Although it is possible to call null_bypass(), we'll do
426 * a direct call to reduce overhead
427 */
428 ldvp = NULLVPTOLOWERVP(dvp);
429 vp = lvp = NULL;
430
431 /*
432 * Renames in the lower mounts might create an inconsistent
433 * configuration where lower vnode is moved out of the directory tree
434 * remounted by our null mount.
435 *
436 * Do not try to handle it fancy, just avoid VOP_LOOKUP() with DOTDOT
437 * name which cannot be handled by the VOP.
438 */
439 if ((flags & ISDOTDOT) != 0) {
440 struct nameidata *ndp;
441
442 if ((ldvp->v_vflag & VV_ROOT) != 0) {
443 KASSERT((dvp->v_vflag & VV_ROOT) == 0,
444 ("ldvp %p fl %#x dvp %p fl %#x flags %#jx",
445 ldvp, ldvp->v_vflag, dvp, dvp->v_vflag,
446 (uintmax_t)flags));
447 return (ENOENT);
448 }
449 ndp = vfs_lookup_nameidata(cnp);
450 if (ndp != NULL && vfs_lookup_isroot(ndp, ldvp))
451 return (ENOENT);
452 }
453
454 /*
455 * Hold ldvp. The reference on it, owned by dvp, is lost in
456 * case of dvp reclamation, and we need ldvp to move our lock
457 * from ldvp to dvp.
458 */
459 vhold(ldvp);
460
461 error = VOP_LOOKUP(ldvp, &lvp, cnp);
462
463 /*
464 * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows
465 * dvp to be reclaimed due to shared v_vnlock. Check for the
466 * doomed state and return error.
467 */
468 if (VN_IS_DOOMED(dvp)) {
469 if (error == 0 || error == EJUSTRETURN) {
470 if (lvp != NULL)
471 vput(lvp);
472 error = ENOENT;
473 }
474
475 /*
476 * If vgone() did reclaimed dvp before curthread
477 * relocked ldvp, the locks of dvp and ldpv are no
478 * longer shared. In this case, relock of ldvp in
479 * lower fs VOP_LOOKUP() does not restore the locking
480 * state of dvp. Compensate for this by unlocking
481 * ldvp and locking dvp, which is also correct if the
482 * locks are still shared.
483 */
484 VOP_UNLOCK(ldvp);
485 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
486 }
487 vdrop(ldvp);
488
489 if (error == EJUSTRETURN && (flags & ISLASTCN) != 0 &&
490 (mp->mnt_flag & MNT_RDONLY) != 0 &&
491 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
492 error = EROFS;
493
494 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
495 if (ldvp == lvp) {
496 *ap->a_vpp = dvp;
497 VREF(dvp);
498 vrele(lvp);
499 } else {
500 error = null_nodeget(mp, lvp, &vp);
501 if (error == 0)
502 *ap->a_vpp = vp;
503 }
504 }
505 return (error);
506 }
507
508 static int
null_open(struct vop_open_args * ap)509 null_open(struct vop_open_args *ap)
510 {
511 int retval;
512 struct vnode *vp, *ldvp;
513
514 vp = ap->a_vp;
515 ldvp = NULLVPTOLOWERVP(vp);
516 retval = null_bypass(&ap->a_gen);
517 if (retval == 0) {
518 vp->v_object = ldvp->v_object;
519 if ((vn_irflag_read(ldvp) & VIRF_PGREAD) != 0) {
520 MPASS(vp->v_object != NULL);
521 if ((vn_irflag_read(vp) & VIRF_PGREAD) == 0) {
522 vn_irflag_set_cond(vp, VIRF_PGREAD);
523 }
524 }
525 }
526 return (retval);
527 }
528
529 /*
530 * Setattr call. Disallow write attempts if the layer is mounted read-only.
531 */
532 static int
null_setattr(struct vop_setattr_args * ap)533 null_setattr(struct vop_setattr_args *ap)
534 {
535 struct vnode *vp = ap->a_vp;
536 struct vattr *vap = ap->a_vap;
537
538 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
539 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
540 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
541 (vp->v_mount->mnt_flag & MNT_RDONLY))
542 return (EROFS);
543 if (vap->va_size != VNOVAL) {
544 switch (vp->v_type) {
545 case VDIR:
546 return (EISDIR);
547 case VCHR:
548 case VBLK:
549 case VSOCK:
550 case VFIFO:
551 if (vap->va_flags != VNOVAL)
552 return (EOPNOTSUPP);
553 return (0);
554 case VREG:
555 case VLNK:
556 default:
557 /*
558 * Disallow write attempts if the filesystem is
559 * mounted read-only.
560 */
561 if (vp->v_mount->mnt_flag & MNT_RDONLY)
562 return (EROFS);
563 }
564 }
565
566 return (null_bypass(&ap->a_gen));
567 }
568
569 /*
570 * We handle stat and getattr only to change the fsid.
571 */
572 static int
null_stat(struct vop_stat_args * ap)573 null_stat(struct vop_stat_args *ap)
574 {
575 int error;
576
577 if ((error = null_bypass(&ap->a_gen)) != 0)
578 return (error);
579
580 ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
581 return (0);
582 }
583
584 static int
null_getattr(struct vop_getattr_args * ap)585 null_getattr(struct vop_getattr_args *ap)
586 {
587 int error;
588
589 if ((error = null_bypass(&ap->a_gen)) != 0)
590 return (error);
591
592 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
593 return (0);
594 }
595
596 /*
597 * Handle to disallow write access if mounted read-only.
598 */
599 static int
null_access(struct vop_access_args * ap)600 null_access(struct vop_access_args *ap)
601 {
602 struct vnode *vp = ap->a_vp;
603 accmode_t accmode = ap->a_accmode;
604
605 /*
606 * Disallow write attempts on read-only layers;
607 * unless the file is a socket, fifo, or a block or
608 * character device resident on the filesystem.
609 */
610 if (accmode & VWRITE) {
611 switch (vp->v_type) {
612 case VDIR:
613 case VLNK:
614 case VREG:
615 if (vp->v_mount->mnt_flag & MNT_RDONLY)
616 return (EROFS);
617 break;
618 default:
619 break;
620 }
621 }
622 return (null_bypass(&ap->a_gen));
623 }
624
625 static int
null_accessx(struct vop_accessx_args * ap)626 null_accessx(struct vop_accessx_args *ap)
627 {
628 struct vnode *vp = ap->a_vp;
629 accmode_t accmode = ap->a_accmode;
630
631 /*
632 * Disallow write attempts on read-only layers;
633 * unless the file is a socket, fifo, or a block or
634 * character device resident on the filesystem.
635 */
636 if (accmode & VWRITE) {
637 switch (vp->v_type) {
638 case VDIR:
639 case VLNK:
640 case VREG:
641 if (vp->v_mount->mnt_flag & MNT_RDONLY)
642 return (EROFS);
643 break;
644 default:
645 break;
646 }
647 }
648 return (null_bypass(&ap->a_gen));
649 }
650
651 /*
652 * Increasing refcount of lower vnode is needed at least for the case
653 * when lower FS is NFS to do sillyrename if the file is in use.
654 * Unfortunately v_usecount is incremented in many places in
655 * the kernel and, as such, there may be races that result in
656 * the NFS client doing an extraneous silly rename, but that seems
657 * preferable to not doing a silly rename when it is needed.
658 */
659 static int
null_remove(struct vop_remove_args * ap)660 null_remove(struct vop_remove_args *ap)
661 {
662 int retval, vreleit;
663 struct vnode *lvp, *vp;
664
665 vp = ap->a_vp;
666 if (vrefcnt(vp) > 1) {
667 lvp = NULLVPTOLOWERVP(vp);
668 VREF(lvp);
669 vreleit = 1;
670 } else
671 vreleit = 0;
672 VTONULL(vp)->null_flags |= NULLV_DROP;
673 retval = null_bypass(&ap->a_gen);
674 if (vreleit != 0)
675 vrele(lvp);
676 return (retval);
677 }
678
679 /*
680 * We handle this to eliminate null FS to lower FS
681 * file moving. Don't know why we don't allow this,
682 * possibly we should.
683 */
684 static int
null_rename(struct vop_rename_args * ap)685 null_rename(struct vop_rename_args *ap)
686 {
687 struct vnode *fdvp, *fvp, *tdvp, *tvp;
688 struct vnode *lfdvp, *lfvp, *ltdvp, *ltvp;
689 struct null_node *fdnn, *fnn, *tdnn, *tnn;
690 int error;
691
692 tdvp = ap->a_tdvp;
693 fvp = ap->a_fvp;
694 fdvp = ap->a_fdvp;
695 tvp = ap->a_tvp;
696 lfdvp = NULL;
697
698 /* Check for cross-device rename. */
699 if ((fvp->v_mount != tdvp->v_mount) ||
700 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
701 error = EXDEV;
702 goto upper_err;
703 }
704
705 VI_LOCK(fdvp);
706 fdnn = VTONULL(fdvp);
707 if (fdnn == NULL) { /* fdvp is not locked, can be doomed */
708 VI_UNLOCK(fdvp);
709 error = ENOENT;
710 goto upper_err;
711 }
712 lfdvp = fdnn->null_lowervp;
713 vref(lfdvp);
714 VI_UNLOCK(fdvp);
715
716 VI_LOCK(fvp);
717 fnn = VTONULL(fvp);
718 if (fnn == NULL) {
719 VI_UNLOCK(fvp);
720 error = ENOENT;
721 goto upper_err;
722 }
723 lfvp = fnn->null_lowervp;
724 vref(lfvp);
725 VI_UNLOCK(fvp);
726
727 tdnn = VTONULL(tdvp);
728 ltdvp = tdnn->null_lowervp;
729 vref(ltdvp);
730
731 if (tvp != NULL) {
732 tnn = VTONULL(tvp);
733 ltvp = tnn->null_lowervp;
734 vref(ltvp);
735 tnn->null_flags |= NULLV_DROP;
736 } else {
737 ltvp = NULL;
738 }
739
740 error = VOP_RENAME(lfdvp, lfvp, ap->a_fcnp, ltdvp, ltvp, ap->a_tcnp);
741 vrele(fdvp);
742 vrele(fvp);
743 vrele(tdvp);
744 if (tvp != NULL)
745 vrele(tvp);
746 return (error);
747
748 upper_err:
749 if (tdvp == tvp)
750 vrele(tdvp);
751 else
752 vput(tdvp);
753 if (tvp)
754 vput(tvp);
755 if (lfdvp != NULL)
756 vrele(lfdvp);
757 vrele(fdvp);
758 vrele(fvp);
759 return (error);
760 }
761
762 static int
null_rmdir(struct vop_rmdir_args * ap)763 null_rmdir(struct vop_rmdir_args *ap)
764 {
765
766 VTONULL(ap->a_vp)->null_flags |= NULLV_DROP;
767 return (null_bypass(&ap->a_gen));
768 }
769
770 /*
771 * We need to process our own vnode lock and then clear the
772 * interlock flag as it applies only to our vnode, not the
773 * vnodes below us on the stack.
774 */
775 static int
null_lock(struct vop_lock1_args * ap)776 null_lock(struct vop_lock1_args *ap)
777 {
778 struct vnode *vp = ap->a_vp;
779 int flags;
780 struct null_node *nn;
781 struct vnode *lvp;
782 int error;
783
784 if ((ap->a_flags & LK_INTERLOCK) == 0)
785 VI_LOCK(vp);
786 else
787 ap->a_flags &= ~LK_INTERLOCK;
788 flags = ap->a_flags;
789 nn = VTONULL(vp);
790 /*
791 * If we're still active we must ask the lower layer to
792 * lock as ffs has special lock considerations in its
793 * vop lock.
794 */
795 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
796 /*
797 * We have to hold the vnode here to solve a potential
798 * reclaim race. If we're forcibly vgone'd while we
799 * still have refs, a thread could be sleeping inside
800 * the lowervp's vop_lock routine. When we vgone we will
801 * drop our last ref to the lowervp, which would allow it
802 * to be reclaimed. The lowervp could then be recycled,
803 * in which case it is not legal to be sleeping in its VOP.
804 * We prevent it from being recycled by holding the vnode
805 * here.
806 */
807 vholdnz(lvp);
808 VI_UNLOCK(vp);
809 error = VOP_LOCK(lvp, flags);
810
811 /*
812 * We might have slept to get the lock and someone might have
813 * clean our vnode already, switching vnode lock from one in
814 * lowervp to v_lock in our own vnode structure. Handle this
815 * case by reacquiring correct lock in requested mode.
816 */
817 if (VTONULL(vp) == NULL && error == 0) {
818 ap->a_flags &= ~LK_TYPE_MASK;
819 switch (flags & LK_TYPE_MASK) {
820 case LK_SHARED:
821 ap->a_flags |= LK_SHARED;
822 break;
823 case LK_UPGRADE:
824 case LK_EXCLUSIVE:
825 ap->a_flags |= LK_EXCLUSIVE;
826 break;
827 default:
828 panic("Unsupported lock request %d\n",
829 ap->a_flags);
830 }
831 VOP_UNLOCK(lvp);
832 error = vop_stdlock(ap);
833 }
834 vdrop(lvp);
835 } else {
836 VI_UNLOCK(vp);
837 error = vop_stdlock(ap);
838 }
839
840 return (error);
841 }
842
843 /*
844 * We need to process our own vnode unlock and then clear the
845 * interlock flag as it applies only to our vnode, not the
846 * vnodes below us on the stack.
847 */
848 static int
null_unlock(struct vop_unlock_args * ap)849 null_unlock(struct vop_unlock_args *ap)
850 {
851 struct vnode *vp = ap->a_vp;
852 struct null_node *nn;
853 struct vnode *lvp;
854 int error;
855
856 nn = VTONULL(vp);
857 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
858 vholdnz(lvp);
859 error = VOP_UNLOCK(lvp);
860 vdrop(lvp);
861 } else {
862 error = vop_stdunlock(ap);
863 }
864
865 return (error);
866 }
867
868 /*
869 * Do not allow the VOP_INACTIVE to be passed to the lower layer,
870 * since the reference count on the lower vnode is not related to
871 * ours.
872 */
873 static int
null_want_recycle(struct vnode * vp)874 null_want_recycle(struct vnode *vp)
875 {
876 struct vnode *lvp;
877 struct null_node *xp;
878 struct mount *mp;
879 struct null_mount *xmp;
880
881 xp = VTONULL(vp);
882 lvp = NULLVPTOLOWERVP(vp);
883 mp = vp->v_mount;
884 xmp = MOUNTTONULLMOUNT(mp);
885 if ((xmp->nullm_flags & NULLM_CACHE) == 0 ||
886 (xp->null_flags & NULLV_DROP) != 0 ||
887 (lvp->v_vflag & VV_NOSYNC) != 0) {
888 /*
889 * If this is the last reference and caching of the
890 * nullfs vnodes is not enabled, or the lower vnode is
891 * deleted, then free up the vnode so as not to tie up
892 * the lower vnodes.
893 */
894 return (1);
895 }
896 return (0);
897 }
898
899 static int
null_inactive(struct vop_inactive_args * ap)900 null_inactive(struct vop_inactive_args *ap)
901 {
902 struct vnode *vp;
903
904 vp = ap->a_vp;
905 if (null_want_recycle(vp)) {
906 vp->v_object = NULL;
907 vrecycle(vp);
908 }
909 return (0);
910 }
911
912 static int
null_need_inactive(struct vop_need_inactive_args * ap)913 null_need_inactive(struct vop_need_inactive_args *ap)
914 {
915
916 return (null_want_recycle(ap->a_vp) || vn_need_pageq_flush(ap->a_vp));
917 }
918
919 /*
920 * Now, the nullfs vnode and, due to the sharing lock, the lower
921 * vnode, are exclusively locked, and we shall destroy the null vnode.
922 */
923 static int
null_reclaim(struct vop_reclaim_args * ap)924 null_reclaim(struct vop_reclaim_args *ap)
925 {
926 struct vnode *vp;
927 struct null_node *xp;
928 struct vnode *lowervp;
929
930 vp = ap->a_vp;
931 xp = VTONULL(vp);
932 lowervp = xp->null_lowervp;
933
934 KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock,
935 ("Reclaiming incomplete null vnode %p", vp));
936
937 null_hashrem(xp);
938 /*
939 * Use the interlock to protect the clearing of v_data to
940 * prevent faults in null_lock().
941 */
942 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
943 VI_LOCK(vp);
944 vp->v_data = NULL;
945 vp->v_object = NULL;
946 vp->v_vnlock = &vp->v_lock;
947
948 /*
949 * If we were opened for write, we leased the write reference
950 * to the lower vnode. If this is a reclamation due to the
951 * forced unmount, undo the reference now.
952 */
953 if (vp->v_writecount > 0)
954 VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount);
955 else if (vp->v_writecount < 0)
956 vp->v_writecount = 0;
957
958 VI_UNLOCK(vp);
959
960 if ((xp->null_flags & NULLV_NOUNLOCK) != 0)
961 vunref(lowervp);
962 else
963 vput(lowervp);
964 free(xp, M_NULLFSNODE);
965
966 return (0);
967 }
968
969 static int
null_print(struct vop_print_args * ap)970 null_print(struct vop_print_args *ap)
971 {
972 struct vnode *vp = ap->a_vp;
973
974 printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp);
975 return (0);
976 }
977
978 /* ARGSUSED */
979 static int
null_getwritemount(struct vop_getwritemount_args * ap)980 null_getwritemount(struct vop_getwritemount_args *ap)
981 {
982 struct null_node *xp;
983 struct vnode *lowervp;
984 struct vnode *vp;
985
986 vp = ap->a_vp;
987 VI_LOCK(vp);
988 xp = VTONULL(vp);
989 if (xp && (lowervp = xp->null_lowervp)) {
990 vholdnz(lowervp);
991 VI_UNLOCK(vp);
992 VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
993 vdrop(lowervp);
994 } else {
995 VI_UNLOCK(vp);
996 *(ap->a_mpp) = NULL;
997 }
998 return (0);
999 }
1000
1001 static int
null_vptofh(struct vop_vptofh_args * ap)1002 null_vptofh(struct vop_vptofh_args *ap)
1003 {
1004 struct vnode *lvp;
1005
1006 lvp = NULLVPTOLOWERVP(ap->a_vp);
1007 return VOP_VPTOFH(lvp, ap->a_fhp);
1008 }
1009
1010 static int
null_vptocnp(struct vop_vptocnp_args * ap)1011 null_vptocnp(struct vop_vptocnp_args *ap)
1012 {
1013 struct vnode *vp = ap->a_vp;
1014 struct vnode **dvp = ap->a_vpp;
1015 struct vnode *lvp, *ldvp;
1016 struct mount *mp;
1017 int error, locked;
1018
1019 locked = VOP_ISLOCKED(vp);
1020 lvp = NULLVPTOLOWERVP(vp);
1021 mp = vp->v_mount;
1022 error = vfs_busy(mp, MBF_NOWAIT);
1023 if (error != 0)
1024 return (error);
1025 vhold(lvp);
1026 VOP_UNLOCK(vp); /* vp is held by vn_vptocnp_locked that called us */
1027 ldvp = lvp;
1028 vref(lvp);
1029 error = vn_vptocnp(&ldvp, ap->a_buf, ap->a_buflen);
1030 vdrop(lvp);
1031 if (error != 0) {
1032 vn_lock(vp, locked | LK_RETRY);
1033 vfs_unbusy(mp);
1034 return (ENOENT);
1035 }
1036
1037 error = vn_lock(ldvp, LK_SHARED);
1038 if (error != 0) {
1039 vrele(ldvp);
1040 vn_lock(vp, locked | LK_RETRY);
1041 vfs_unbusy(mp);
1042 return (ENOENT);
1043 }
1044 error = null_nodeget(mp, ldvp, dvp);
1045 if (error == 0) {
1046 #ifdef DIAGNOSTIC
1047 NULLVPTOLOWERVP(*dvp);
1048 #endif
1049 VOP_UNLOCK(*dvp); /* keep reference on *dvp */
1050 }
1051 vn_lock(vp, locked | LK_RETRY);
1052 vfs_unbusy(mp);
1053 return (error);
1054 }
1055
1056 static int
null_read_pgcache(struct vop_read_pgcache_args * ap)1057 null_read_pgcache(struct vop_read_pgcache_args *ap)
1058 {
1059 struct vnode *lvp, *vp;
1060 struct null_node *xp;
1061 int error;
1062
1063 vp = ap->a_vp;
1064 VI_LOCK(vp);
1065 xp = VTONULL(vp);
1066 if (xp == NULL) {
1067 VI_UNLOCK(vp);
1068 return (EJUSTRETURN);
1069 }
1070 lvp = xp->null_lowervp;
1071 vref(lvp);
1072 VI_UNLOCK(vp);
1073 error = VOP_READ_PGCACHE(lvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1074 vrele(lvp);
1075 return (error);
1076 }
1077
1078 static int
null_advlock(struct vop_advlock_args * ap)1079 null_advlock(struct vop_advlock_args *ap)
1080 {
1081 struct vnode *lvp, *vp;
1082 struct null_node *xp;
1083 int error;
1084
1085 vp = ap->a_vp;
1086 VI_LOCK(vp);
1087 xp = VTONULL(vp);
1088 if (xp == NULL) {
1089 VI_UNLOCK(vp);
1090 return (EBADF);
1091 }
1092 lvp = xp->null_lowervp;
1093 vref(lvp);
1094 VI_UNLOCK(vp);
1095 error = VOP_ADVLOCK(lvp, ap->a_id, ap->a_op, ap->a_fl, ap->a_flags);
1096 vrele(lvp);
1097 return (error);
1098 }
1099
1100 /*
1101 * Avoid standard bypass, since lower dvp and vp could be no longer
1102 * valid after vput().
1103 */
1104 static int
null_vput_pair(struct vop_vput_pair_args * ap)1105 null_vput_pair(struct vop_vput_pair_args *ap)
1106 {
1107 struct mount *mp;
1108 struct vnode *dvp, *ldvp, *lvp, *vp, *vp1, **vpp;
1109 int error, res;
1110
1111 dvp = ap->a_dvp;
1112 ldvp = NULLVPTOLOWERVP(dvp);
1113 vref(ldvp);
1114
1115 vpp = ap->a_vpp;
1116 vp = NULL;
1117 lvp = NULL;
1118 mp = NULL;
1119 if (vpp != NULL)
1120 vp = *vpp;
1121 if (vp != NULL) {
1122 lvp = NULLVPTOLOWERVP(vp);
1123 vref(lvp);
1124 if (!ap->a_unlock_vp) {
1125 vhold(vp);
1126 vhold(lvp);
1127 mp = vp->v_mount;
1128 vfs_ref(mp);
1129 }
1130 }
1131
1132 res = VOP_VPUT_PAIR(ldvp, lvp != NULL ? &lvp : NULL, true);
1133 if (vp != NULL && ap->a_unlock_vp)
1134 vrele(vp);
1135 vrele(dvp);
1136
1137 if (vp == NULL || ap->a_unlock_vp)
1138 return (res);
1139
1140 /* lvp has been unlocked and vp might be reclaimed */
1141 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
1142 if (vp->v_data == NULL && vfs_busy(mp, MBF_NOWAIT) == 0) {
1143 vput(vp);
1144 vget(lvp, LK_EXCLUSIVE | LK_RETRY);
1145 if (VN_IS_DOOMED(lvp)) {
1146 vput(lvp);
1147 vget(vp, LK_EXCLUSIVE | LK_RETRY);
1148 } else {
1149 error = null_nodeget(mp, lvp, &vp1);
1150 if (error == 0) {
1151 *vpp = vp1;
1152 } else {
1153 vget(vp, LK_EXCLUSIVE | LK_RETRY);
1154 }
1155 }
1156 vfs_unbusy(mp);
1157 }
1158 vdrop(lvp);
1159 vdrop(vp);
1160 vfs_rel(mp);
1161
1162 return (res);
1163 }
1164
1165 static int
null_getlowvnode(struct vop_getlowvnode_args * ap)1166 null_getlowvnode(struct vop_getlowvnode_args *ap)
1167 {
1168 struct vnode *vp, *vpl;
1169
1170 vp = ap->a_vp;
1171 if (vn_lock(vp, LK_SHARED) != 0)
1172 return (EBADF);
1173
1174 vpl = NULLVPTOLOWERVP(vp);
1175 vhold(vpl);
1176 VOP_UNLOCK(vp);
1177 VOP_GETLOWVNODE(vpl, ap->a_vplp, ap->a_flags);
1178 vdrop(vpl);
1179 return (0);
1180 }
1181
1182 /*
1183 * Global vfs data structures
1184 */
1185 struct vop_vector null_vnodeops = {
1186 .vop_bypass = null_bypass,
1187 .vop_access = null_access,
1188 .vop_accessx = null_accessx,
1189 .vop_advlock = null_advlock,
1190 .vop_advlockpurge = vop_stdadvlockpurge,
1191 .vop_bmap = VOP_EOPNOTSUPP,
1192 .vop_stat = null_stat,
1193 .vop_getattr = null_getattr,
1194 .vop_getlowvnode = null_getlowvnode,
1195 .vop_getwritemount = null_getwritemount,
1196 .vop_inactive = null_inactive,
1197 .vop_need_inactive = null_need_inactive,
1198 .vop_islocked = vop_stdislocked,
1199 .vop_lock1 = null_lock,
1200 .vop_lookup = null_lookup,
1201 .vop_open = null_open,
1202 .vop_print = null_print,
1203 .vop_read_pgcache = null_read_pgcache,
1204 .vop_reclaim = null_reclaim,
1205 .vop_remove = null_remove,
1206 .vop_rename = null_rename,
1207 .vop_rmdir = null_rmdir,
1208 .vop_setattr = null_setattr,
1209 .vop_strategy = VOP_EOPNOTSUPP,
1210 .vop_unlock = null_unlock,
1211 .vop_vptocnp = null_vptocnp,
1212 .vop_vptofh = null_vptofh,
1213 .vop_add_writecount = null_add_writecount,
1214 .vop_vput_pair = null_vput_pair,
1215 .vop_copy_file_range = VOP_PANIC,
1216 };
1217 VFS_VOP_VECTOR_REGISTER(null_vnodeops);
1218