1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * John Heidemann of the UCLA Ficus project. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Ancestors: 35 * ...and... 36 */ 37 38 /* 39 * Null Layer 40 * 41 * (See mount_nullfs(8) for more information.) 42 * 43 * The null layer duplicates a portion of the filesystem 44 * name space under a new name. In this respect, it is 45 * similar to the loopback filesystem. It differs from 46 * the loopback fs in two respects: it is implemented using 47 * a stackable layers techniques, and its "null-node"s stack above 48 * all lower-layer vnodes, not just over directory vnodes. 49 * 50 * The null layer has two purposes. First, it serves as a demonstration 51 * of layering by proving a layer which does nothing. (It actually 52 * does everything the loopback filesystem does, which is slightly 53 * more than nothing.) Second, the null layer can serve as a prototype 54 * layer. Since it provides all necessary layer framework, 55 * new filesystem layers can be created very easily be starting 56 * with a null layer. 57 * 58 * The remainder of this man page examines the null layer as a basis 59 * for constructing new layers. 60 * 61 * 62 * INSTANTIATING NEW NULL LAYERS 63 * 64 * New null layers are created with mount_nullfs(8). 65 * Mount_nullfs(8) takes two arguments, the pathname 66 * of the lower vfs (target-pn) and the pathname where the null 67 * layer will appear in the namespace (alias-pn). After 68 * the null layer is put into place, the contents 69 * of target-pn subtree will be aliased under alias-pn. 70 * 71 * 72 * OPERATION OF A NULL LAYER 73 * 74 * The null layer is the minimum filesystem layer, 75 * simply bypassing all possible operations to the lower layer 76 * for processing there. The majority of its activity centers 77 * on the bypass routine, through which nearly all vnode operations 78 * pass. 79 * 80 * The bypass routine accepts arbitrary vnode operations for 81 * handling by the lower layer. It begins by examining vnode 82 * operation arguments and replacing any null-nodes by their 83 * lower-layer equivlants. It then invokes the operation 84 * on the lower layer. Finally, it replaces the null-nodes 85 * in the arguments and, if a vnode is return by the operation, 86 * stacks a null-node on top of the returned vnode. 87 * 88 * Although bypass handles most operations, vop_getattr, vop_lock, 89 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not 90 * bypassed. Vop_getattr must change the fsid being returned. 91 * Vop_lock and vop_unlock must handle any locking for the 92 * current vnode as well as pass the lock request down. 93 * Vop_inactive and vop_reclaim are not bypassed so that 94 * they can handle freeing null-layer specific data. Vop_print 95 * is not bypassed to avoid excessive debugging information. 96 * Also, certain vnode operations change the locking state within 97 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 98 * and symlink). Ideally these operations should not change the 99 * lock state, but should be changed to let the caller of the 100 * function unlock them. Otherwise all intermediate vnode layers 101 * (such as union, umapfs, etc) must catch these functions to do 102 * the necessary locking at their layer. 103 * 104 * 105 * INSTANTIATING VNODE STACKS 106 * 107 * Mounting associates the null layer with a lower layer, 108 * effect stacking two VFSes. Vnode stacks are instead 109 * created on demand as files are accessed. 110 * 111 * The initial mount creates a single vnode stack for the 112 * root of the new null layer. All other vnode stacks 113 * are created as a result of vnode operations on 114 * this or other null vnode stacks. 115 * 116 * New vnode stacks come into existence as a result of 117 * an operation which returns a vnode. 118 * The bypass routine stacks a null-node above the new 119 * vnode before returning it to the caller. 120 * 121 * For example, imagine mounting a null layer with 122 * "mount_nullfs /usr/include /dev/layer/null". 123 * Changing directory to /dev/layer/null will assign 124 * the root null-node (which was created when the null layer was mounted). 125 * Now consider opening "sys". A vop_lookup would be 126 * done on the root null-node. This operation would bypass through 127 * to the lower layer which would return a vnode representing 128 * the UFS "sys". Null_bypass then builds a null-node 129 * aliasing the UFS "sys" and returns this to the caller. 130 * Later operations on the null-node "sys" will repeat this 131 * process when constructing other vnode stacks. 132 * 133 * 134 * CREATING OTHER FILE SYSTEM LAYERS 135 * 136 * One of the easiest ways to construct new filesystem layers is to make 137 * a copy of the null layer, rename all files and variables, and 138 * then begin modifing the copy. Sed can be used to easily rename 139 * all variables. 140 * 141 * The umap layer is an example of a layer descended from the 142 * null layer. 143 * 144 * 145 * INVOKING OPERATIONS ON LOWER LAYERS 146 * 147 * There are two techniques to invoke operations on a lower layer 148 * when the operation cannot be completely bypassed. Each method 149 * is appropriate in different situations. In both cases, 150 * it is the responsibility of the aliasing layer to make 151 * the operation arguments "correct" for the lower layer 152 * by mapping a vnode arguments to the lower layer. 153 * 154 * The first approach is to call the aliasing layer's bypass routine. 155 * This method is most suitable when you wish to invoke the operation 156 * currently being handled on the lower layer. It has the advantage 157 * that the bypass routine already must do argument mapping. 158 * An example of this is null_getattrs in the null layer. 159 * 160 * A second approach is to directly invoke vnode operations on 161 * the lower layer with the VOP_OPERATIONNAME interface. 162 * The advantage of this method is that it is easy to invoke 163 * arbitrary operations on the lower layer. The disadvantage 164 * is that vnode arguments must be manualy mapped. 165 * 166 */ 167 168 #include <sys/param.h> 169 #include <sys/systm.h> 170 #include <sys/conf.h> 171 #include <sys/kernel.h> 172 #include <sys/lock.h> 173 #include <sys/malloc.h> 174 #include <sys/mount.h> 175 #include <sys/mutex.h> 176 #include <sys/namei.h> 177 #include <sys/sysctl.h> 178 #include <sys/vnode.h> 179 #include <sys/stat.h> 180 181 #include <fs/nullfs/null.h> 182 183 #include <vm/vm.h> 184 #include <vm/vm_extern.h> 185 #include <vm/vm_object.h> 186 #include <vm/vnode_pager.h> 187 188 static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 189 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, 190 &null_bug_bypass, 0, ""); 191 192 /* 193 * This is the 10-Apr-92 bypass routine. 194 * This version has been optimized for speed, throwing away some 195 * safety checks. It should still always work, but it's not as 196 * robust to programmer errors. 197 * 198 * In general, we map all vnodes going down and unmap them on the way back. 199 * As an exception to this, vnodes can be marked "unmapped" by setting 200 * the Nth bit in operation's vdesc_flags. 201 * 202 * Also, some BSD vnode operations have the side effect of vrele'ing 203 * their arguments. With stacking, the reference counts are held 204 * by the upper node, not the lower one, so we must handle these 205 * side-effects here. This is not of concern in Sun-derived systems 206 * since there are no such side-effects. 207 * 208 * This makes the following assumptions: 209 * - only one returned vpp 210 * - no INOUT vpp's (Sun's vop_open has one of these) 211 * - the vnode operation vector of the first vnode should be used 212 * to determine what implementation of the op should be invoked 213 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 214 * problems on rmdir'ing mount points and renaming?) 215 */ 216 int 217 null_bypass(struct vop_generic_args *ap) 218 { 219 struct vnode **this_vp_p; 220 struct vnode *old_vps[VDESC_MAX_VPS]; 221 struct vnode **vps_p[VDESC_MAX_VPS]; 222 struct vnode ***vppp; 223 struct vnode *lvp; 224 struct vnodeop_desc *descp = ap->a_desc; 225 int error, i, reles; 226 227 if (null_bug_bypass) 228 printf ("null_bypass: %s\n", descp->vdesc_name); 229 230 #ifdef DIAGNOSTIC 231 /* 232 * We require at least one vp. 233 */ 234 if (descp->vdesc_vp_offsets == NULL || 235 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 236 panic ("null_bypass: no vp's in map"); 237 #endif 238 239 /* 240 * Map the vnodes going in. 241 * Later, we'll invoke the operation based on 242 * the first mapped vnode's operation vector. 243 */ 244 reles = descp->vdesc_flags; 245 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 246 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 247 break; /* bail out at end of list */ 248 vps_p[i] = this_vp_p = VOPARG_OFFSETTO(struct vnode **, 249 descp->vdesc_vp_offsets[i], ap); 250 251 /* 252 * We're not guaranteed that any but the first vnode 253 * are of our type. Check for and don't map any 254 * that aren't. (We must always map first vp or vclean fails.) 255 */ 256 if (i != 0 && (*this_vp_p == NULLVP || 257 (*this_vp_p)->v_op != &null_vnodeops)) { 258 old_vps[i] = NULLVP; 259 } else { 260 old_vps[i] = *this_vp_p; 261 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); 262 263 /* 264 * The upper vnode reference to the lower 265 * vnode is the only reference that keeps our 266 * pointer to the lower vnode alive. If lower 267 * vnode is relocked during the VOP call, 268 * upper vnode might become unlocked and 269 * reclaimed, which invalidates our reference. 270 * Add a transient hold around VOP call. 271 */ 272 vhold(*this_vp_p); 273 274 /* 275 * XXX - Several operations have the side effect 276 * of vrele'ing their vp's. We must account for 277 * that. (This should go away in the future.) 278 */ 279 if (reles & VDESC_VP0_WILLRELE) 280 vref(*this_vp_p); 281 } 282 } 283 284 /* 285 * Call the operation on the lower layer 286 * with the modified argument structure. 287 */ 288 if (vps_p[0] != NULL && *vps_p[0] != NULL) { 289 error = VCALL(ap); 290 } else { 291 printf("null_bypass: no map for %s\n", descp->vdesc_name); 292 error = EINVAL; 293 } 294 295 /* 296 * Maintain the illusion of call-by-value 297 * by restoring vnodes in the argument structure 298 * to their original value. 299 */ 300 reles = descp->vdesc_flags; 301 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 302 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 303 break; /* bail out at end of list */ 304 if (old_vps[i] != NULL) { 305 lvp = *(vps_p[i]); 306 307 /* 308 * Get rid of the transient hold on lvp. 309 * If lowervp was unlocked during VOP 310 * operation, nullfs upper vnode could have 311 * been reclaimed, which changes its v_vnlock 312 * back to private v_lock. In this case we 313 * must move lock ownership from lower to 314 * upper (reclaimed) vnode. 315 */ 316 if (lvp != NULLVP) { 317 if (VOP_ISLOCKED(lvp) == LK_EXCLUSIVE && 318 old_vps[i]->v_vnlock != lvp->v_vnlock) { 319 VOP_UNLOCK(lvp); 320 VOP_LOCK(old_vps[i], LK_EXCLUSIVE | 321 LK_RETRY); 322 } 323 vdrop(lvp); 324 } 325 326 *(vps_p[i]) = old_vps[i]; 327 #if 0 328 if (reles & VDESC_VP0_WILLUNLOCK) 329 VOP_UNLOCK(*(vps_p[i]), 0); 330 #endif 331 if (reles & VDESC_VP0_WILLRELE) 332 vrele(*(vps_p[i])); 333 } 334 } 335 336 /* 337 * Map the possible out-going vpp 338 * (Assumes that the lower layer always returns 339 * a VREF'ed vpp unless it gets an error.) 340 */ 341 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && error == 0) { 342 /* 343 * XXX - even though some ops have vpp returned vp's, 344 * several ops actually vrele this before returning. 345 * We must avoid these ops. 346 * (This should go away when these ops are regularized.) 347 */ 348 vppp = VOPARG_OFFSETTO(struct vnode ***, 349 descp->vdesc_vpp_offset, ap); 350 if (*vppp != NULL) 351 error = null_nodeget(old_vps[0]->v_mount, **vppp, 352 *vppp); 353 } 354 355 return (error); 356 } 357 358 static int 359 null_add_writecount(struct vop_add_writecount_args *ap) 360 { 361 struct vnode *lvp, *vp; 362 int error; 363 364 vp = ap->a_vp; 365 lvp = NULLVPTOLOWERVP(vp); 366 VI_LOCK(vp); 367 /* text refs are bypassed to lowervp */ 368 VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount")); 369 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, 370 ("wrong writecount inc %d", ap->a_inc)); 371 error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc); 372 if (error == 0) 373 vp->v_writecount += ap->a_inc; 374 VI_UNLOCK(vp); 375 return (error); 376 } 377 378 /* 379 * We have to carry on the locking protocol on the null layer vnodes 380 * as we progress through the tree. We also have to enforce read-only 381 * if this layer is mounted read-only. 382 */ 383 static int 384 null_lookup(struct vop_lookup_args *ap) 385 { 386 struct componentname *cnp = ap->a_cnp; 387 struct vnode *dvp = ap->a_dvp; 388 uint64_t flags = cnp->cn_flags; 389 struct vnode *vp, *ldvp, *lvp; 390 struct mount *mp; 391 int error; 392 393 mp = dvp->v_mount; 394 if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 && 395 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 396 return (EROFS); 397 /* 398 * Although it is possible to call null_bypass(), we'll do 399 * a direct call to reduce overhead 400 */ 401 ldvp = NULLVPTOLOWERVP(dvp); 402 vp = lvp = NULL; 403 404 /* 405 * Renames in the lower mounts might create an inconsistent 406 * configuration where lower vnode is moved out of the directory tree 407 * remounted by our null mount. 408 * 409 * Do not try to handle it fancy, just avoid VOP_LOOKUP() with DOTDOT 410 * name which cannot be handled by the VOP. 411 */ 412 if ((flags & ISDOTDOT) != 0) { 413 struct nameidata *ndp; 414 415 if ((ldvp->v_vflag & VV_ROOT) != 0) { 416 KASSERT((dvp->v_vflag & VV_ROOT) == 0, 417 ("ldvp %p fl %#x dvp %p fl %#x flags %#jx", 418 ldvp, ldvp->v_vflag, dvp, dvp->v_vflag, 419 (uintmax_t)flags)); 420 return (ENOENT); 421 } 422 ndp = vfs_lookup_nameidata(cnp); 423 if (ndp != NULL && vfs_lookup_isroot(ndp, ldvp)) 424 return (ENOENT); 425 } 426 427 /* 428 * Hold ldvp. The reference on it, owned by dvp, is lost in 429 * case of dvp reclamation, and we need ldvp to move our lock 430 * from ldvp to dvp. 431 */ 432 vhold(ldvp); 433 434 error = VOP_LOOKUP(ldvp, &lvp, cnp); 435 436 /* 437 * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows 438 * dvp to be reclaimed due to shared v_vnlock. Check for the 439 * doomed state and return error. 440 */ 441 if (VN_IS_DOOMED(dvp)) { 442 if (error == 0 || error == EJUSTRETURN) { 443 if (lvp != NULL) 444 vput(lvp); 445 error = ENOENT; 446 } 447 448 /* 449 * If vgone() did reclaimed dvp before curthread 450 * relocked ldvp, the locks of dvp and ldpv are no 451 * longer shared. In this case, relock of ldvp in 452 * lower fs VOP_LOOKUP() does not restore the locking 453 * state of dvp. Compensate for this by unlocking 454 * ldvp and locking dvp, which is also correct if the 455 * locks are still shared. 456 */ 457 VOP_UNLOCK(ldvp); 458 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 459 } 460 vdrop(ldvp); 461 462 if (error == EJUSTRETURN && (flags & ISLASTCN) != 0 && 463 (mp->mnt_flag & MNT_RDONLY) != 0 && 464 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) 465 error = EROFS; 466 467 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { 468 if (ldvp == lvp) { 469 *ap->a_vpp = dvp; 470 VREF(dvp); 471 vrele(lvp); 472 } else { 473 error = null_nodeget(mp, lvp, &vp); 474 if (error == 0) 475 *ap->a_vpp = vp; 476 } 477 } 478 return (error); 479 } 480 481 static int 482 null_open(struct vop_open_args *ap) 483 { 484 int retval; 485 struct vnode *vp, *ldvp; 486 487 vp = ap->a_vp; 488 ldvp = NULLVPTOLOWERVP(vp); 489 retval = null_bypass(&ap->a_gen); 490 if (retval == 0) { 491 vp->v_object = ldvp->v_object; 492 if ((vn_irflag_read(ldvp) & VIRF_PGREAD) != 0) { 493 MPASS(vp->v_object != NULL); 494 if ((vn_irflag_read(vp) & VIRF_PGREAD) == 0) { 495 vn_irflag_set_cond(vp, VIRF_PGREAD); 496 } 497 } 498 } 499 return (retval); 500 } 501 502 /* 503 * Setattr call. Disallow write attempts if the layer is mounted read-only. 504 */ 505 static int 506 null_setattr(struct vop_setattr_args *ap) 507 { 508 struct vnode *vp = ap->a_vp; 509 struct vattr *vap = ap->a_vap; 510 511 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 512 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 513 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 514 (vp->v_mount->mnt_flag & MNT_RDONLY)) 515 return (EROFS); 516 if (vap->va_size != VNOVAL) { 517 switch (vp->v_type) { 518 case VDIR: 519 return (EISDIR); 520 case VCHR: 521 case VBLK: 522 case VSOCK: 523 case VFIFO: 524 if (vap->va_flags != VNOVAL) 525 return (EOPNOTSUPP); 526 return (0); 527 case VREG: 528 case VLNK: 529 default: 530 /* 531 * Disallow write attempts if the filesystem is 532 * mounted read-only. 533 */ 534 if (vp->v_mount->mnt_flag & MNT_RDONLY) 535 return (EROFS); 536 } 537 } 538 539 return (null_bypass(&ap->a_gen)); 540 } 541 542 /* 543 * We handle stat and getattr only to change the fsid. 544 */ 545 static int 546 null_stat(struct vop_stat_args *ap) 547 { 548 int error; 549 550 if ((error = null_bypass(&ap->a_gen)) != 0) 551 return (error); 552 553 ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 554 return (0); 555 } 556 557 static int 558 null_getattr(struct vop_getattr_args *ap) 559 { 560 int error; 561 562 if ((error = null_bypass(&ap->a_gen)) != 0) 563 return (error); 564 565 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 566 return (0); 567 } 568 569 /* 570 * Handle to disallow write access if mounted read-only. 571 */ 572 static int 573 null_access(struct vop_access_args *ap) 574 { 575 struct vnode *vp = ap->a_vp; 576 accmode_t accmode = ap->a_accmode; 577 578 /* 579 * Disallow write attempts on read-only layers; 580 * unless the file is a socket, fifo, or a block or 581 * character device resident on the filesystem. 582 */ 583 if (accmode & VWRITE) { 584 switch (vp->v_type) { 585 case VDIR: 586 case VLNK: 587 case VREG: 588 if (vp->v_mount->mnt_flag & MNT_RDONLY) 589 return (EROFS); 590 break; 591 default: 592 break; 593 } 594 } 595 return (null_bypass(&ap->a_gen)); 596 } 597 598 static int 599 null_accessx(struct vop_accessx_args *ap) 600 { 601 struct vnode *vp = ap->a_vp; 602 accmode_t accmode = ap->a_accmode; 603 604 /* 605 * Disallow write attempts on read-only layers; 606 * unless the file is a socket, fifo, or a block or 607 * character device resident on the filesystem. 608 */ 609 if (accmode & VWRITE) { 610 switch (vp->v_type) { 611 case VDIR: 612 case VLNK: 613 case VREG: 614 if (vp->v_mount->mnt_flag & MNT_RDONLY) 615 return (EROFS); 616 break; 617 default: 618 break; 619 } 620 } 621 return (null_bypass(&ap->a_gen)); 622 } 623 624 /* 625 * Increasing refcount of lower vnode is needed at least for the case 626 * when lower FS is NFS to do sillyrename if the file is in use. 627 * Unfortunately v_usecount is incremented in many places in 628 * the kernel and, as such, there may be races that result in 629 * the NFS client doing an extraneous silly rename, but that seems 630 * preferable to not doing a silly rename when it is needed. 631 */ 632 static int 633 null_remove(struct vop_remove_args *ap) 634 { 635 int retval, vreleit; 636 struct vnode *lvp, *vp; 637 638 vp = ap->a_vp; 639 if (vrefcnt(vp) > 1) { 640 lvp = NULLVPTOLOWERVP(vp); 641 VREF(lvp); 642 vreleit = 1; 643 } else 644 vreleit = 0; 645 VTONULL(vp)->null_flags |= NULLV_DROP; 646 retval = null_bypass(&ap->a_gen); 647 if (vreleit != 0) 648 vrele(lvp); 649 return (retval); 650 } 651 652 /* 653 * We handle this to eliminate null FS to lower FS 654 * file moving. Don't know why we don't allow this, 655 * possibly we should. 656 */ 657 static int 658 null_rename(struct vop_rename_args *ap) 659 { 660 struct vnode *fdvp, *fvp, *tdvp, *tvp; 661 struct vnode *lfdvp, *lfvp, *ltdvp, *ltvp; 662 struct null_node *fdnn, *fnn, *tdnn, *tnn; 663 int error; 664 665 tdvp = ap->a_tdvp; 666 fvp = ap->a_fvp; 667 fdvp = ap->a_fdvp; 668 tvp = ap->a_tvp; 669 lfdvp = NULL; 670 671 /* Check for cross-device rename. */ 672 if ((fvp->v_mount != tdvp->v_mount) || 673 (tvp != NULL && fvp->v_mount != tvp->v_mount)) { 674 error = EXDEV; 675 goto upper_err; 676 } 677 678 VI_LOCK(fdvp); 679 fdnn = VTONULL(fdvp); 680 if (fdnn == NULL) { /* fdvp is not locked, can be doomed */ 681 VI_UNLOCK(fdvp); 682 error = ENOENT; 683 goto upper_err; 684 } 685 lfdvp = fdnn->null_lowervp; 686 vref(lfdvp); 687 VI_UNLOCK(fdvp); 688 689 VI_LOCK(fvp); 690 fnn = VTONULL(fvp); 691 if (fnn == NULL) { 692 VI_UNLOCK(fvp); 693 error = ENOENT; 694 goto upper_err; 695 } 696 lfvp = fnn->null_lowervp; 697 vref(lfvp); 698 VI_UNLOCK(fvp); 699 700 tdnn = VTONULL(tdvp); 701 ltdvp = tdnn->null_lowervp; 702 vref(ltdvp); 703 704 if (tvp != NULL) { 705 tnn = VTONULL(tvp); 706 ltvp = tnn->null_lowervp; 707 vref(ltvp); 708 tnn->null_flags |= NULLV_DROP; 709 } else { 710 ltvp = NULL; 711 } 712 713 error = VOP_RENAME(lfdvp, lfvp, ap->a_fcnp, ltdvp, ltvp, ap->a_tcnp); 714 vrele(fdvp); 715 vrele(fvp); 716 vrele(tdvp); 717 if (tvp != NULL) 718 vrele(tvp); 719 return (error); 720 721 upper_err: 722 if (tdvp == tvp) 723 vrele(tdvp); 724 else 725 vput(tdvp); 726 if (tvp) 727 vput(tvp); 728 if (lfdvp != NULL) 729 vrele(lfdvp); 730 vrele(fdvp); 731 vrele(fvp); 732 return (error); 733 } 734 735 static int 736 null_rmdir(struct vop_rmdir_args *ap) 737 { 738 739 VTONULL(ap->a_vp)->null_flags |= NULLV_DROP; 740 return (null_bypass(&ap->a_gen)); 741 } 742 743 /* 744 * We need to process our own vnode lock and then clear the 745 * interlock flag as it applies only to our vnode, not the 746 * vnodes below us on the stack. 747 */ 748 static int 749 null_lock(struct vop_lock1_args *ap) 750 { 751 struct vnode *vp = ap->a_vp; 752 int flags; 753 struct null_node *nn; 754 struct vnode *lvp; 755 int error; 756 757 if ((ap->a_flags & LK_INTERLOCK) == 0) 758 VI_LOCK(vp); 759 else 760 ap->a_flags &= ~LK_INTERLOCK; 761 flags = ap->a_flags; 762 nn = VTONULL(vp); 763 /* 764 * If we're still active we must ask the lower layer to 765 * lock as ffs has special lock considerations in its 766 * vop lock. 767 */ 768 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { 769 /* 770 * We have to hold the vnode here to solve a potential 771 * reclaim race. If we're forcibly vgone'd while we 772 * still have refs, a thread could be sleeping inside 773 * the lowervp's vop_lock routine. When we vgone we will 774 * drop our last ref to the lowervp, which would allow it 775 * to be reclaimed. The lowervp could then be recycled, 776 * in which case it is not legal to be sleeping in its VOP. 777 * We prevent it from being recycled by holding the vnode 778 * here. 779 */ 780 vholdnz(lvp); 781 VI_UNLOCK(vp); 782 error = VOP_LOCK(lvp, flags); 783 784 /* 785 * We might have slept to get the lock and someone might have 786 * clean our vnode already, switching vnode lock from one in 787 * lowervp to v_lock in our own vnode structure. Handle this 788 * case by reacquiring correct lock in requested mode. 789 */ 790 if (VTONULL(vp) == NULL && error == 0) { 791 ap->a_flags &= ~LK_TYPE_MASK; 792 switch (flags & LK_TYPE_MASK) { 793 case LK_SHARED: 794 ap->a_flags |= LK_SHARED; 795 break; 796 case LK_UPGRADE: 797 case LK_EXCLUSIVE: 798 ap->a_flags |= LK_EXCLUSIVE; 799 break; 800 default: 801 panic("Unsupported lock request %d\n", 802 ap->a_flags); 803 } 804 VOP_UNLOCK(lvp); 805 error = vop_stdlock(ap); 806 } 807 vdrop(lvp); 808 } else { 809 VI_UNLOCK(vp); 810 error = vop_stdlock(ap); 811 } 812 813 return (error); 814 } 815 816 /* 817 * We need to process our own vnode unlock and then clear the 818 * interlock flag as it applies only to our vnode, not the 819 * vnodes below us on the stack. 820 */ 821 static int 822 null_unlock(struct vop_unlock_args *ap) 823 { 824 struct vnode *vp = ap->a_vp; 825 struct null_node *nn; 826 struct vnode *lvp; 827 int error; 828 829 nn = VTONULL(vp); 830 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { 831 vholdnz(lvp); 832 error = VOP_UNLOCK(lvp); 833 vdrop(lvp); 834 } else { 835 error = vop_stdunlock(ap); 836 } 837 838 return (error); 839 } 840 841 /* 842 * Do not allow the VOP_INACTIVE to be passed to the lower layer, 843 * since the reference count on the lower vnode is not related to 844 * ours. 845 */ 846 static int 847 null_want_recycle(struct vnode *vp) 848 { 849 struct vnode *lvp; 850 struct null_node *xp; 851 struct mount *mp; 852 struct null_mount *xmp; 853 854 xp = VTONULL(vp); 855 lvp = NULLVPTOLOWERVP(vp); 856 mp = vp->v_mount; 857 xmp = MOUNTTONULLMOUNT(mp); 858 if ((xmp->nullm_flags & NULLM_CACHE) == 0 || 859 (xp->null_flags & NULLV_DROP) != 0 || 860 (lvp->v_vflag & VV_NOSYNC) != 0) { 861 /* 862 * If this is the last reference and caching of the 863 * nullfs vnodes is not enabled, or the lower vnode is 864 * deleted, then free up the vnode so as not to tie up 865 * the lower vnodes. 866 */ 867 return (1); 868 } 869 return (0); 870 } 871 872 static int 873 null_inactive(struct vop_inactive_args *ap) 874 { 875 struct vnode *vp; 876 877 vp = ap->a_vp; 878 if (null_want_recycle(vp)) { 879 vp->v_object = NULL; 880 vrecycle(vp); 881 } 882 return (0); 883 } 884 885 static int 886 null_need_inactive(struct vop_need_inactive_args *ap) 887 { 888 889 return (null_want_recycle(ap->a_vp) || vn_need_pageq_flush(ap->a_vp)); 890 } 891 892 /* 893 * Now, the nullfs vnode and, due to the sharing lock, the lower 894 * vnode, are exclusively locked, and we shall destroy the null vnode. 895 */ 896 static int 897 null_reclaim(struct vop_reclaim_args *ap) 898 { 899 struct vnode *vp; 900 struct null_node *xp; 901 struct vnode *lowervp; 902 903 vp = ap->a_vp; 904 xp = VTONULL(vp); 905 lowervp = xp->null_lowervp; 906 907 KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock, 908 ("Reclaiming incomplete null vnode %p", vp)); 909 910 null_hashrem(xp); 911 /* 912 * Use the interlock to protect the clearing of v_data to 913 * prevent faults in null_lock(). 914 */ 915 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); 916 VI_LOCK(vp); 917 vp->v_data = NULL; 918 vp->v_object = NULL; 919 vp->v_vnlock = &vp->v_lock; 920 921 /* 922 * If we were opened for write, we leased the write reference 923 * to the lower vnode. If this is a reclamation due to the 924 * forced unmount, undo the reference now. 925 */ 926 if (vp->v_writecount > 0) 927 VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount); 928 else if (vp->v_writecount < 0) 929 vp->v_writecount = 0; 930 931 VI_UNLOCK(vp); 932 933 if ((xp->null_flags & NULLV_NOUNLOCK) != 0) 934 vunref(lowervp); 935 else 936 vput(lowervp); 937 free(xp, M_NULLFSNODE); 938 939 return (0); 940 } 941 942 static int 943 null_print(struct vop_print_args *ap) 944 { 945 struct vnode *vp = ap->a_vp; 946 947 printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp); 948 return (0); 949 } 950 951 /* ARGSUSED */ 952 static int 953 null_getwritemount(struct vop_getwritemount_args *ap) 954 { 955 struct null_node *xp; 956 struct vnode *lowervp; 957 struct vnode *vp; 958 959 vp = ap->a_vp; 960 VI_LOCK(vp); 961 xp = VTONULL(vp); 962 if (xp && (lowervp = xp->null_lowervp)) { 963 vholdnz(lowervp); 964 VI_UNLOCK(vp); 965 VOP_GETWRITEMOUNT(lowervp, ap->a_mpp); 966 vdrop(lowervp); 967 } else { 968 VI_UNLOCK(vp); 969 *(ap->a_mpp) = NULL; 970 } 971 return (0); 972 } 973 974 static int 975 null_vptofh(struct vop_vptofh_args *ap) 976 { 977 struct vnode *lvp; 978 979 lvp = NULLVPTOLOWERVP(ap->a_vp); 980 return VOP_VPTOFH(lvp, ap->a_fhp); 981 } 982 983 static int 984 null_vptocnp(struct vop_vptocnp_args *ap) 985 { 986 struct vnode *vp = ap->a_vp; 987 struct vnode **dvp = ap->a_vpp; 988 struct vnode *lvp, *ldvp; 989 struct mount *mp; 990 int error, locked; 991 992 locked = VOP_ISLOCKED(vp); 993 lvp = NULLVPTOLOWERVP(vp); 994 mp = vp->v_mount; 995 error = vfs_busy(mp, MBF_NOWAIT); 996 if (error != 0) 997 return (error); 998 vhold(lvp); 999 VOP_UNLOCK(vp); /* vp is held by vn_vptocnp_locked that called us */ 1000 ldvp = lvp; 1001 vref(lvp); 1002 error = vn_vptocnp(&ldvp, ap->a_buf, ap->a_buflen); 1003 vdrop(lvp); 1004 if (error != 0) { 1005 vn_lock(vp, locked | LK_RETRY); 1006 vfs_unbusy(mp); 1007 return (ENOENT); 1008 } 1009 1010 error = vn_lock(ldvp, LK_SHARED); 1011 if (error != 0) { 1012 vrele(ldvp); 1013 vn_lock(vp, locked | LK_RETRY); 1014 vfs_unbusy(mp); 1015 return (ENOENT); 1016 } 1017 error = null_nodeget(mp, ldvp, dvp); 1018 if (error == 0) { 1019 #ifdef DIAGNOSTIC 1020 NULLVPTOLOWERVP(*dvp); 1021 #endif 1022 VOP_UNLOCK(*dvp); /* keep reference on *dvp */ 1023 } 1024 vn_lock(vp, locked | LK_RETRY); 1025 vfs_unbusy(mp); 1026 return (error); 1027 } 1028 1029 static int 1030 null_read_pgcache(struct vop_read_pgcache_args *ap) 1031 { 1032 struct vnode *lvp, *vp; 1033 struct null_node *xp; 1034 int error; 1035 1036 vp = ap->a_vp; 1037 VI_LOCK(vp); 1038 xp = VTONULL(vp); 1039 if (xp == NULL) { 1040 VI_UNLOCK(vp); 1041 return (EJUSTRETURN); 1042 } 1043 lvp = xp->null_lowervp; 1044 vref(lvp); 1045 VI_UNLOCK(vp); 1046 error = VOP_READ_PGCACHE(lvp, ap->a_uio, ap->a_ioflag, ap->a_cred); 1047 vrele(lvp); 1048 return (error); 1049 } 1050 1051 static int 1052 null_advlock(struct vop_advlock_args *ap) 1053 { 1054 struct vnode *lvp, *vp; 1055 struct null_node *xp; 1056 int error; 1057 1058 vp = ap->a_vp; 1059 VI_LOCK(vp); 1060 xp = VTONULL(vp); 1061 if (xp == NULL) { 1062 VI_UNLOCK(vp); 1063 return (EBADF); 1064 } 1065 lvp = xp->null_lowervp; 1066 vref(lvp); 1067 VI_UNLOCK(vp); 1068 error = VOP_ADVLOCK(lvp, ap->a_id, ap->a_op, ap->a_fl, ap->a_flags); 1069 vrele(lvp); 1070 return (error); 1071 } 1072 1073 /* 1074 * Avoid standard bypass, since lower dvp and vp could be no longer 1075 * valid after vput(). 1076 */ 1077 static int 1078 null_vput_pair(struct vop_vput_pair_args *ap) 1079 { 1080 struct mount *mp; 1081 struct vnode *dvp, *ldvp, *lvp, *vp, *vp1, **vpp; 1082 int error, res; 1083 1084 dvp = ap->a_dvp; 1085 ldvp = NULLVPTOLOWERVP(dvp); 1086 vref(ldvp); 1087 1088 vpp = ap->a_vpp; 1089 vp = NULL; 1090 lvp = NULL; 1091 mp = NULL; 1092 if (vpp != NULL) 1093 vp = *vpp; 1094 if (vp != NULL) { 1095 lvp = NULLVPTOLOWERVP(vp); 1096 vref(lvp); 1097 if (!ap->a_unlock_vp) { 1098 vhold(vp); 1099 vhold(lvp); 1100 mp = vp->v_mount; 1101 vfs_ref(mp); 1102 } 1103 } 1104 1105 res = VOP_VPUT_PAIR(ldvp, lvp != NULL ? &lvp : NULL, true); 1106 if (vp != NULL && ap->a_unlock_vp) 1107 vrele(vp); 1108 vrele(dvp); 1109 1110 if (vp == NULL || ap->a_unlock_vp) 1111 return (res); 1112 1113 /* lvp has been unlocked and vp might be reclaimed */ 1114 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY); 1115 if (vp->v_data == NULL && vfs_busy(mp, MBF_NOWAIT) == 0) { 1116 vput(vp); 1117 vget(lvp, LK_EXCLUSIVE | LK_RETRY); 1118 if (VN_IS_DOOMED(lvp)) { 1119 vput(lvp); 1120 vget(vp, LK_EXCLUSIVE | LK_RETRY); 1121 } else { 1122 error = null_nodeget(mp, lvp, &vp1); 1123 if (error == 0) { 1124 *vpp = vp1; 1125 } else { 1126 vget(vp, LK_EXCLUSIVE | LK_RETRY); 1127 } 1128 } 1129 vfs_unbusy(mp); 1130 } 1131 vdrop(lvp); 1132 vdrop(vp); 1133 vfs_rel(mp); 1134 1135 return (res); 1136 } 1137 1138 static int 1139 null_getlowvnode(struct vop_getlowvnode_args *ap) 1140 { 1141 struct vnode *vp, *vpl; 1142 1143 vp = ap->a_vp; 1144 if (vn_lock(vp, LK_SHARED) != 0) 1145 return (EBADF); 1146 1147 vpl = NULLVPTOLOWERVP(vp); 1148 vhold(vpl); 1149 VOP_UNLOCK(vp); 1150 VOP_GETLOWVNODE(vpl, ap->a_vplp, ap->a_flags); 1151 vdrop(vpl); 1152 return (0); 1153 } 1154 1155 /* 1156 * Global vfs data structures 1157 */ 1158 struct vop_vector null_vnodeops = { 1159 .vop_bypass = null_bypass, 1160 .vop_access = null_access, 1161 .vop_accessx = null_accessx, 1162 .vop_advlock = null_advlock, 1163 .vop_advlockpurge = vop_stdadvlockpurge, 1164 .vop_bmap = VOP_EOPNOTSUPP, 1165 .vop_stat = null_stat, 1166 .vop_getattr = null_getattr, 1167 .vop_getlowvnode = null_getlowvnode, 1168 .vop_getwritemount = null_getwritemount, 1169 .vop_inactive = null_inactive, 1170 .vop_need_inactive = null_need_inactive, 1171 .vop_islocked = vop_stdislocked, 1172 .vop_lock1 = null_lock, 1173 .vop_lookup = null_lookup, 1174 .vop_open = null_open, 1175 .vop_print = null_print, 1176 .vop_read_pgcache = null_read_pgcache, 1177 .vop_reclaim = null_reclaim, 1178 .vop_remove = null_remove, 1179 .vop_rename = null_rename, 1180 .vop_rmdir = null_rmdir, 1181 .vop_setattr = null_setattr, 1182 .vop_strategy = VOP_EOPNOTSUPP, 1183 .vop_unlock = null_unlock, 1184 .vop_vptocnp = null_vptocnp, 1185 .vop_vptofh = null_vptofh, 1186 .vop_add_writecount = null_add_writecount, 1187 .vop_vput_pair = null_vput_pair, 1188 .vop_copy_file_range = VOP_PANIC, 1189 }; 1190 VFS_VOP_VECTOR_REGISTER(null_vnodeops); 1191