1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * John Heidemann of the UCLA Ficus project. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 35 * 36 * Ancestors: 37 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 38 * ...and... 39 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project 40 * 41 * $FreeBSD$ 42 */ 43 44 /* 45 * Null Layer 46 * 47 * (See mount_nullfs(8) for more information.) 48 * 49 * The null layer duplicates a portion of the filesystem 50 * name space under a new name. In this respect, it is 51 * similar to the loopback filesystem. It differs from 52 * the loopback fs in two respects: it is implemented using 53 * a stackable layers techniques, and its "null-node"s stack above 54 * all lower-layer vnodes, not just over directory vnodes. 55 * 56 * The null layer has two purposes. First, it serves as a demonstration 57 * of layering by proving a layer which does nothing. (It actually 58 * does everything the loopback filesystem does, which is slightly 59 * more than nothing.) Second, the null layer can serve as a prototype 60 * layer. Since it provides all necessary layer framework, 61 * new filesystem layers can be created very easily be starting 62 * with a null layer. 63 * 64 * The remainder of this man page examines the null layer as a basis 65 * for constructing new layers. 66 * 67 * 68 * INSTANTIATING NEW NULL LAYERS 69 * 70 * New null layers are created with mount_nullfs(8). 71 * Mount_nullfs(8) takes two arguments, the pathname 72 * of the lower vfs (target-pn) and the pathname where the null 73 * layer will appear in the namespace (alias-pn). After 74 * the null layer is put into place, the contents 75 * of target-pn subtree will be aliased under alias-pn. 76 * 77 * 78 * OPERATION OF A NULL LAYER 79 * 80 * The null layer is the minimum filesystem layer, 81 * simply bypassing all possible operations to the lower layer 82 * for processing there. The majority of its activity centers 83 * on the bypass routine, through which nearly all vnode operations 84 * pass. 85 * 86 * The bypass routine accepts arbitrary vnode operations for 87 * handling by the lower layer. It begins by examing vnode 88 * operation arguments and replacing any null-nodes by their 89 * lower-layer equivlants. It then invokes the operation 90 * on the lower layer. Finally, it replaces the null-nodes 91 * in the arguments and, if a vnode is return by the operation, 92 * stacks a null-node on top of the returned vnode. 93 * 94 * Although bypass handles most operations, vop_getattr, vop_lock, 95 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not 96 * bypassed. Vop_getattr must change the fsid being returned. 97 * Vop_lock and vop_unlock must handle any locking for the 98 * current vnode as well as pass the lock request down. 99 * Vop_inactive and vop_reclaim are not bypassed so that 100 * they can handle freeing null-layer specific data. Vop_print 101 * is not bypassed to avoid excessive debugging information. 102 * Also, certain vnode operations change the locking state within 103 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 104 * and symlink). Ideally these operations should not change the 105 * lock state, but should be changed to let the caller of the 106 * function unlock them. Otherwise all intermediate vnode layers 107 * (such as union, umapfs, etc) must catch these functions to do 108 * the necessary locking at their layer. 109 * 110 * 111 * INSTANTIATING VNODE STACKS 112 * 113 * Mounting associates the null layer with a lower layer, 114 * effect stacking two VFSes. Vnode stacks are instead 115 * created on demand as files are accessed. 116 * 117 * The initial mount creates a single vnode stack for the 118 * root of the new null layer. All other vnode stacks 119 * are created as a result of vnode operations on 120 * this or other null vnode stacks. 121 * 122 * New vnode stacks come into existence as a result of 123 * an operation which returns a vnode. 124 * The bypass routine stacks a null-node above the new 125 * vnode before returning it to the caller. 126 * 127 * For example, imagine mounting a null layer with 128 * "mount_nullfs /usr/include /dev/layer/null". 129 * Changing directory to /dev/layer/null will assign 130 * the root null-node (which was created when the null layer was mounted). 131 * Now consider opening "sys". A vop_lookup would be 132 * done on the root null-node. This operation would bypass through 133 * to the lower layer which would return a vnode representing 134 * the UFS "sys". Null_bypass then builds a null-node 135 * aliasing the UFS "sys" and returns this to the caller. 136 * Later operations on the null-node "sys" will repeat this 137 * process when constructing other vnode stacks. 138 * 139 * 140 * CREATING OTHER FILE SYSTEM LAYERS 141 * 142 * One of the easiest ways to construct new filesystem layers is to make 143 * a copy of the null layer, rename all files and variables, and 144 * then begin modifing the copy. Sed can be used to easily rename 145 * all variables. 146 * 147 * The umap layer is an example of a layer descended from the 148 * null layer. 149 * 150 * 151 * INVOKING OPERATIONS ON LOWER LAYERS 152 * 153 * There are two techniques to invoke operations on a lower layer 154 * when the operation cannot be completely bypassed. Each method 155 * is appropriate in different situations. In both cases, 156 * it is the responsibility of the aliasing layer to make 157 * the operation arguments "correct" for the lower layer 158 * by mapping a vnode arguments to the lower layer. 159 * 160 * The first approach is to call the aliasing layer's bypass routine. 161 * This method is most suitable when you wish to invoke the operation 162 * currently being handled on the lower layer. It has the advantage 163 * that the bypass routine already must do argument mapping. 164 * An example of this is null_getattrs in the null layer. 165 * 166 * A second approach is to directly invoke vnode operations on 167 * the lower layer with the VOP_OPERATIONNAME interface. 168 * The advantage of this method is that it is easy to invoke 169 * arbitrary operations on the lower layer. The disadvantage 170 * is that vnode arguments must be manualy mapped. 171 * 172 */ 173 174 #include <sys/param.h> 175 #include <sys/systm.h> 176 #include <sys/conf.h> 177 #include <sys/kernel.h> 178 #include <sys/lock.h> 179 #include <sys/malloc.h> 180 #include <sys/mount.h> 181 #include <sys/mutex.h> 182 #include <sys/namei.h> 183 #include <sys/sysctl.h> 184 #include <sys/vnode.h> 185 #include <sys/stat.h> 186 187 #include <fs/nullfs/null.h> 188 189 #include <vm/vm.h> 190 #include <vm/vm_extern.h> 191 #include <vm/vm_object.h> 192 #include <vm/vnode_pager.h> 193 194 static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 195 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, 196 &null_bug_bypass, 0, ""); 197 198 /* 199 * This is the 10-Apr-92 bypass routine. 200 * This version has been optimized for speed, throwing away some 201 * safety checks. It should still always work, but it's not as 202 * robust to programmer errors. 203 * 204 * In general, we map all vnodes going down and unmap them on the way back. 205 * As an exception to this, vnodes can be marked "unmapped" by setting 206 * the Nth bit in operation's vdesc_flags. 207 * 208 * Also, some BSD vnode operations have the side effect of vrele'ing 209 * their arguments. With stacking, the reference counts are held 210 * by the upper node, not the lower one, so we must handle these 211 * side-effects here. This is not of concern in Sun-derived systems 212 * since there are no such side-effects. 213 * 214 * This makes the following assumptions: 215 * - only one returned vpp 216 * - no INOUT vpp's (Sun's vop_open has one of these) 217 * - the vnode operation vector of the first vnode should be used 218 * to determine what implementation of the op should be invoked 219 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 220 * problems on rmdir'ing mount points and renaming?) 221 */ 222 int 223 null_bypass(struct vop_generic_args *ap) 224 { 225 struct vnode **this_vp_p; 226 int error; 227 struct vnode *old_vps[VDESC_MAX_VPS]; 228 struct vnode **vps_p[VDESC_MAX_VPS]; 229 struct vnode ***vppp; 230 struct vnode *lvp; 231 struct vnodeop_desc *descp = ap->a_desc; 232 int reles, i; 233 234 if (null_bug_bypass) 235 printf ("null_bypass: %s\n", descp->vdesc_name); 236 237 #ifdef DIAGNOSTIC 238 /* 239 * We require at least one vp. 240 */ 241 if (descp->vdesc_vp_offsets == NULL || 242 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 243 panic ("null_bypass: no vp's in map"); 244 #endif 245 246 /* 247 * Map the vnodes going in. 248 * Later, we'll invoke the operation based on 249 * the first mapped vnode's operation vector. 250 */ 251 reles = descp->vdesc_flags; 252 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 253 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 254 break; /* bail out at end of list */ 255 vps_p[i] = this_vp_p = 256 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 257 /* 258 * We're not guaranteed that any but the first vnode 259 * are of our type. Check for and don't map any 260 * that aren't. (We must always map first vp or vclean fails.) 261 */ 262 if (i && (*this_vp_p == NULLVP || 263 (*this_vp_p)->v_op != &null_vnodeops)) { 264 old_vps[i] = NULLVP; 265 } else { 266 old_vps[i] = *this_vp_p; 267 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); 268 /* 269 * XXX - Several operations have the side effect 270 * of vrele'ing their vp's. We must account for 271 * that. (This should go away in the future.) 272 */ 273 if (reles & VDESC_VP0_WILLRELE) 274 VREF(*this_vp_p); 275 } 276 } 277 278 /* 279 * Call the operation on the lower layer 280 * with the modified argument structure. 281 */ 282 if (vps_p[0] && *vps_p[0]) 283 error = VCALL(ap); 284 else { 285 printf("null_bypass: no map for %s\n", descp->vdesc_name); 286 error = EINVAL; 287 } 288 289 /* 290 * Maintain the illusion of call-by-value 291 * by restoring vnodes in the argument structure 292 * to their original value. 293 */ 294 reles = descp->vdesc_flags; 295 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 296 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 297 break; /* bail out at end of list */ 298 if (old_vps[i]) { 299 lvp = *(vps_p[i]); 300 301 /* 302 * If lowervp was unlocked during VOP 303 * operation, nullfs upper vnode could have 304 * been reclaimed, which changes its v_vnlock 305 * back to private v_lock. In this case we 306 * must move lock ownership from lower to 307 * upper (reclaimed) vnode. 308 */ 309 if (lvp != NULLVP && 310 VOP_ISLOCKED(lvp) == LK_EXCLUSIVE && 311 old_vps[i]->v_vnlock != lvp->v_vnlock) { 312 VOP_UNLOCK(lvp); 313 VOP_LOCK(old_vps[i], LK_EXCLUSIVE | LK_RETRY); 314 } 315 316 *(vps_p[i]) = old_vps[i]; 317 #if 0 318 if (reles & VDESC_VP0_WILLUNLOCK) 319 VOP_UNLOCK(*(vps_p[i]), 0); 320 #endif 321 if (reles & VDESC_VP0_WILLRELE) 322 vrele(*(vps_p[i])); 323 } 324 } 325 326 /* 327 * Map the possible out-going vpp 328 * (Assumes that the lower layer always returns 329 * a VREF'ed vpp unless it gets an error.) 330 */ 331 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && !error) { 332 /* 333 * XXX - even though some ops have vpp returned vp's, 334 * several ops actually vrele this before returning. 335 * We must avoid these ops. 336 * (This should go away when these ops are regularized.) 337 */ 338 vppp = VOPARG_OFFSETTO(struct vnode***, 339 descp->vdesc_vpp_offset,ap); 340 if (*vppp) 341 error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp); 342 } 343 344 return (error); 345 } 346 347 static int 348 null_add_writecount(struct vop_add_writecount_args *ap) 349 { 350 struct vnode *lvp, *vp; 351 int error; 352 353 vp = ap->a_vp; 354 lvp = NULLVPTOLOWERVP(vp); 355 VI_LOCK(vp); 356 /* text refs are bypassed to lowervp */ 357 VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount")); 358 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, 359 ("wrong writecount inc %d", ap->a_inc)); 360 error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc); 361 if (error == 0) 362 vp->v_writecount += ap->a_inc; 363 VI_UNLOCK(vp); 364 return (error); 365 } 366 367 /* 368 * We have to carry on the locking protocol on the null layer vnodes 369 * as we progress through the tree. We also have to enforce read-only 370 * if this layer is mounted read-only. 371 */ 372 static int 373 null_lookup(struct vop_lookup_args *ap) 374 { 375 struct componentname *cnp = ap->a_cnp; 376 struct vnode *dvp = ap->a_dvp; 377 int flags = cnp->cn_flags; 378 struct vnode *vp, *ldvp, *lvp; 379 struct mount *mp; 380 int error; 381 382 mp = dvp->v_mount; 383 if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 && 384 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 385 return (EROFS); 386 /* 387 * Although it is possible to call null_bypass(), we'll do 388 * a direct call to reduce overhead 389 */ 390 ldvp = NULLVPTOLOWERVP(dvp); 391 vp = lvp = NULL; 392 393 /* 394 * Renames in the lower mounts might create an inconsistent 395 * configuration where lower vnode is moved out of the 396 * directory tree remounted by our null mount. Do not try to 397 * handle it fancy, just avoid VOP_LOOKUP() with DOTDOT name 398 * which cannot be handled by VOP, at least passing over lower 399 * root. 400 */ 401 if ((ldvp->v_vflag & VV_ROOT) != 0 && (flags & ISDOTDOT) != 0) { 402 KASSERT((dvp->v_vflag & VV_ROOT) == 0, 403 ("ldvp %p fl %#x dvp %p fl %#x flags %#x", 404 ldvp, ldvp->v_vflag, dvp, dvp->v_vflag, flags)); 405 return (ENOENT); 406 } 407 408 /* 409 * Hold ldvp. The reference on it, owned by dvp, is lost in 410 * case of dvp reclamation, and we need ldvp to move our lock 411 * from ldvp to dvp. 412 */ 413 vhold(ldvp); 414 415 error = VOP_LOOKUP(ldvp, &lvp, cnp); 416 417 /* 418 * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows 419 * dvp to be reclaimed due to shared v_vnlock. Check for the 420 * doomed state and return error. 421 */ 422 if ((error == 0 || error == EJUSTRETURN) && 423 VN_IS_DOOMED(dvp)) { 424 error = ENOENT; 425 if (lvp != NULL) 426 vput(lvp); 427 428 /* 429 * If vgone() did reclaimed dvp before curthread 430 * relocked ldvp, the locks of dvp and ldpv are no 431 * longer shared. In this case, relock of ldvp in 432 * lower fs VOP_LOOKUP() does not restore the locking 433 * state of dvp. Compensate for this by unlocking 434 * ldvp and locking dvp, which is also correct if the 435 * locks are still shared. 436 */ 437 VOP_UNLOCK(ldvp); 438 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 439 } 440 vdrop(ldvp); 441 442 if (error == EJUSTRETURN && (flags & ISLASTCN) != 0 && 443 (mp->mnt_flag & MNT_RDONLY) != 0 && 444 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) 445 error = EROFS; 446 447 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { 448 if (ldvp == lvp) { 449 *ap->a_vpp = dvp; 450 VREF(dvp); 451 vrele(lvp); 452 } else { 453 error = null_nodeget(mp, lvp, &vp); 454 if (error == 0) 455 *ap->a_vpp = vp; 456 } 457 } 458 return (error); 459 } 460 461 static int 462 null_open(struct vop_open_args *ap) 463 { 464 int retval; 465 struct vnode *vp, *ldvp; 466 467 vp = ap->a_vp; 468 ldvp = NULLVPTOLOWERVP(vp); 469 retval = null_bypass(&ap->a_gen); 470 if (retval == 0) { 471 vp->v_object = ldvp->v_object; 472 if ((vn_irflag_read(ldvp) & VIRF_PGREAD) != 0) { 473 MPASS(vp->v_object != NULL); 474 if ((vn_irflag_read(vp) & VIRF_PGREAD) == 0) { 475 vn_irflag_set_cond(vp, VIRF_PGREAD); 476 } 477 } 478 } 479 return (retval); 480 } 481 482 /* 483 * Setattr call. Disallow write attempts if the layer is mounted read-only. 484 */ 485 static int 486 null_setattr(struct vop_setattr_args *ap) 487 { 488 struct vnode *vp = ap->a_vp; 489 struct vattr *vap = ap->a_vap; 490 491 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 492 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 493 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 494 (vp->v_mount->mnt_flag & MNT_RDONLY)) 495 return (EROFS); 496 if (vap->va_size != VNOVAL) { 497 switch (vp->v_type) { 498 case VDIR: 499 return (EISDIR); 500 case VCHR: 501 case VBLK: 502 case VSOCK: 503 case VFIFO: 504 if (vap->va_flags != VNOVAL) 505 return (EOPNOTSUPP); 506 return (0); 507 case VREG: 508 case VLNK: 509 default: 510 /* 511 * Disallow write attempts if the filesystem is 512 * mounted read-only. 513 */ 514 if (vp->v_mount->mnt_flag & MNT_RDONLY) 515 return (EROFS); 516 } 517 } 518 519 return (null_bypass((struct vop_generic_args *)ap)); 520 } 521 522 /* 523 * We handle stat and getattr only to change the fsid. 524 */ 525 static int 526 null_stat(struct vop_stat_args *ap) 527 { 528 int error; 529 530 if ((error = null_bypass((struct vop_generic_args *)ap)) != 0) 531 return (error); 532 533 ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 534 return (0); 535 } 536 537 static int 538 null_getattr(struct vop_getattr_args *ap) 539 { 540 int error; 541 542 if ((error = null_bypass((struct vop_generic_args *)ap)) != 0) 543 return (error); 544 545 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 546 return (0); 547 } 548 549 /* 550 * Handle to disallow write access if mounted read-only. 551 */ 552 static int 553 null_access(struct vop_access_args *ap) 554 { 555 struct vnode *vp = ap->a_vp; 556 accmode_t accmode = ap->a_accmode; 557 558 /* 559 * Disallow write attempts on read-only layers; 560 * unless the file is a socket, fifo, or a block or 561 * character device resident on the filesystem. 562 */ 563 if (accmode & VWRITE) { 564 switch (vp->v_type) { 565 case VDIR: 566 case VLNK: 567 case VREG: 568 if (vp->v_mount->mnt_flag & MNT_RDONLY) 569 return (EROFS); 570 break; 571 default: 572 break; 573 } 574 } 575 return (null_bypass((struct vop_generic_args *)ap)); 576 } 577 578 static int 579 null_accessx(struct vop_accessx_args *ap) 580 { 581 struct vnode *vp = ap->a_vp; 582 accmode_t accmode = ap->a_accmode; 583 584 /* 585 * Disallow write attempts on read-only layers; 586 * unless the file is a socket, fifo, or a block or 587 * character device resident on the filesystem. 588 */ 589 if (accmode & VWRITE) { 590 switch (vp->v_type) { 591 case VDIR: 592 case VLNK: 593 case VREG: 594 if (vp->v_mount->mnt_flag & MNT_RDONLY) 595 return (EROFS); 596 break; 597 default: 598 break; 599 } 600 } 601 return (null_bypass((struct vop_generic_args *)ap)); 602 } 603 604 /* 605 * Increasing refcount of lower vnode is needed at least for the case 606 * when lower FS is NFS to do sillyrename if the file is in use. 607 * Unfortunately v_usecount is incremented in many places in 608 * the kernel and, as such, there may be races that result in 609 * the NFS client doing an extraneous silly rename, but that seems 610 * preferable to not doing a silly rename when it is needed. 611 */ 612 static int 613 null_remove(struct vop_remove_args *ap) 614 { 615 int retval, vreleit; 616 struct vnode *lvp, *vp; 617 618 vp = ap->a_vp; 619 if (vrefcnt(vp) > 1) { 620 lvp = NULLVPTOLOWERVP(vp); 621 VREF(lvp); 622 vreleit = 1; 623 } else 624 vreleit = 0; 625 VTONULL(vp)->null_flags |= NULLV_DROP; 626 retval = null_bypass(&ap->a_gen); 627 if (vreleit != 0) 628 vrele(lvp); 629 return (retval); 630 } 631 632 /* 633 * We handle this to eliminate null FS to lower FS 634 * file moving. Don't know why we don't allow this, 635 * possibly we should. 636 */ 637 static int 638 null_rename(struct vop_rename_args *ap) 639 { 640 struct vnode *tdvp = ap->a_tdvp; 641 struct vnode *fvp = ap->a_fvp; 642 struct vnode *fdvp = ap->a_fdvp; 643 struct vnode *tvp = ap->a_tvp; 644 struct null_node *tnn; 645 646 /* Check for cross-device rename. */ 647 if ((fvp->v_mount != tdvp->v_mount) || 648 (tvp && (fvp->v_mount != tvp->v_mount))) { 649 if (tdvp == tvp) 650 vrele(tdvp); 651 else 652 vput(tdvp); 653 if (tvp) 654 vput(tvp); 655 vrele(fdvp); 656 vrele(fvp); 657 return (EXDEV); 658 } 659 660 if (tvp != NULL) { 661 tnn = VTONULL(tvp); 662 tnn->null_flags |= NULLV_DROP; 663 } 664 return (null_bypass((struct vop_generic_args *)ap)); 665 } 666 667 static int 668 null_rmdir(struct vop_rmdir_args *ap) 669 { 670 671 VTONULL(ap->a_vp)->null_flags |= NULLV_DROP; 672 return (null_bypass(&ap->a_gen)); 673 } 674 675 /* 676 * We need to process our own vnode lock and then clear the 677 * interlock flag as it applies only to our vnode, not the 678 * vnodes below us on the stack. 679 */ 680 static int 681 null_lock(struct vop_lock1_args *ap) 682 { 683 struct vnode *vp = ap->a_vp; 684 int flags; 685 struct null_node *nn; 686 struct vnode *lvp; 687 int error; 688 689 if ((ap->a_flags & LK_INTERLOCK) == 0) 690 VI_LOCK(vp); 691 else 692 ap->a_flags &= ~LK_INTERLOCK; 693 flags = ap->a_flags; 694 nn = VTONULL(vp); 695 /* 696 * If we're still active we must ask the lower layer to 697 * lock as ffs has special lock considerations in its 698 * vop lock. 699 */ 700 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { 701 /* 702 * We have to hold the vnode here to solve a potential 703 * reclaim race. If we're forcibly vgone'd while we 704 * still have refs, a thread could be sleeping inside 705 * the lowervp's vop_lock routine. When we vgone we will 706 * drop our last ref to the lowervp, which would allow it 707 * to be reclaimed. The lowervp could then be recycled, 708 * in which case it is not legal to be sleeping in its VOP. 709 * We prevent it from being recycled by holding the vnode 710 * here. 711 */ 712 vholdnz(lvp); 713 VI_UNLOCK(vp); 714 error = VOP_LOCK(lvp, flags); 715 716 /* 717 * We might have slept to get the lock and someone might have 718 * clean our vnode already, switching vnode lock from one in 719 * lowervp to v_lock in our own vnode structure. Handle this 720 * case by reacquiring correct lock in requested mode. 721 */ 722 if (VTONULL(vp) == NULL && error == 0) { 723 ap->a_flags &= ~LK_TYPE_MASK; 724 switch (flags & LK_TYPE_MASK) { 725 case LK_SHARED: 726 ap->a_flags |= LK_SHARED; 727 break; 728 case LK_UPGRADE: 729 case LK_EXCLUSIVE: 730 ap->a_flags |= LK_EXCLUSIVE; 731 break; 732 default: 733 panic("Unsupported lock request %d\n", 734 ap->a_flags); 735 } 736 VOP_UNLOCK(lvp); 737 error = vop_stdlock(ap); 738 } 739 vdrop(lvp); 740 } else { 741 VI_UNLOCK(vp); 742 error = vop_stdlock(ap); 743 } 744 745 return (error); 746 } 747 748 /* 749 * We need to process our own vnode unlock and then clear the 750 * interlock flag as it applies only to our vnode, not the 751 * vnodes below us on the stack. 752 */ 753 static int 754 null_unlock(struct vop_unlock_args *ap) 755 { 756 struct vnode *vp = ap->a_vp; 757 struct null_node *nn; 758 struct vnode *lvp; 759 int error; 760 761 nn = VTONULL(vp); 762 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { 763 vholdnz(lvp); 764 error = VOP_UNLOCK(lvp); 765 vdrop(lvp); 766 } else { 767 error = vop_stdunlock(ap); 768 } 769 770 return (error); 771 } 772 773 /* 774 * Do not allow the VOP_INACTIVE to be passed to the lower layer, 775 * since the reference count on the lower vnode is not related to 776 * ours. 777 */ 778 static int 779 null_want_recycle(struct vnode *vp) 780 { 781 struct vnode *lvp; 782 struct null_node *xp; 783 struct mount *mp; 784 struct null_mount *xmp; 785 786 xp = VTONULL(vp); 787 lvp = NULLVPTOLOWERVP(vp); 788 mp = vp->v_mount; 789 xmp = MOUNTTONULLMOUNT(mp); 790 if ((xmp->nullm_flags & NULLM_CACHE) == 0 || 791 (xp->null_flags & NULLV_DROP) != 0 || 792 (lvp->v_vflag & VV_NOSYNC) != 0) { 793 /* 794 * If this is the last reference and caching of the 795 * nullfs vnodes is not enabled, or the lower vnode is 796 * deleted, then free up the vnode so as not to tie up 797 * the lower vnodes. 798 */ 799 return (1); 800 } 801 return (0); 802 } 803 804 static int 805 null_inactive(struct vop_inactive_args *ap) 806 { 807 struct vnode *vp; 808 809 vp = ap->a_vp; 810 if (null_want_recycle(vp)) { 811 vp->v_object = NULL; 812 vrecycle(vp); 813 } 814 return (0); 815 } 816 817 static int 818 null_need_inactive(struct vop_need_inactive_args *ap) 819 { 820 821 return (null_want_recycle(ap->a_vp) || vn_need_pageq_flush(ap->a_vp)); 822 } 823 824 /* 825 * Now, the nullfs vnode and, due to the sharing lock, the lower 826 * vnode, are exclusively locked, and we shall destroy the null vnode. 827 */ 828 static int 829 null_reclaim(struct vop_reclaim_args *ap) 830 { 831 struct vnode *vp; 832 struct null_node *xp; 833 struct vnode *lowervp; 834 835 vp = ap->a_vp; 836 xp = VTONULL(vp); 837 lowervp = xp->null_lowervp; 838 839 KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock, 840 ("Reclaiming incomplete null vnode %p", vp)); 841 842 null_hashrem(xp); 843 /* 844 * Use the interlock to protect the clearing of v_data to 845 * prevent faults in null_lock(). 846 */ 847 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); 848 VI_LOCK(vp); 849 vp->v_data = NULL; 850 vp->v_object = NULL; 851 vp->v_vnlock = &vp->v_lock; 852 853 /* 854 * If we were opened for write, we leased the write reference 855 * to the lower vnode. If this is a reclamation due to the 856 * forced unmount, undo the reference now. 857 */ 858 if (vp->v_writecount > 0) 859 VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount); 860 else if (vp->v_writecount < 0) 861 vp->v_writecount = 0; 862 863 VI_UNLOCK(vp); 864 865 if ((xp->null_flags & NULLV_NOUNLOCK) != 0) 866 vunref(lowervp); 867 else 868 vput(lowervp); 869 free(xp, M_NULLFSNODE); 870 871 return (0); 872 } 873 874 static int 875 null_print(struct vop_print_args *ap) 876 { 877 struct vnode *vp = ap->a_vp; 878 879 printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp); 880 return (0); 881 } 882 883 /* ARGSUSED */ 884 static int 885 null_getwritemount(struct vop_getwritemount_args *ap) 886 { 887 struct null_node *xp; 888 struct vnode *lowervp; 889 struct vnode *vp; 890 891 vp = ap->a_vp; 892 VI_LOCK(vp); 893 xp = VTONULL(vp); 894 if (xp && (lowervp = xp->null_lowervp)) { 895 vholdnz(lowervp); 896 VI_UNLOCK(vp); 897 VOP_GETWRITEMOUNT(lowervp, ap->a_mpp); 898 vdrop(lowervp); 899 } else { 900 VI_UNLOCK(vp); 901 *(ap->a_mpp) = NULL; 902 } 903 return (0); 904 } 905 906 static int 907 null_vptofh(struct vop_vptofh_args *ap) 908 { 909 struct vnode *lvp; 910 911 lvp = NULLVPTOLOWERVP(ap->a_vp); 912 return VOP_VPTOFH(lvp, ap->a_fhp); 913 } 914 915 static int 916 null_vptocnp(struct vop_vptocnp_args *ap) 917 { 918 struct vnode *vp = ap->a_vp; 919 struct vnode **dvp = ap->a_vpp; 920 struct vnode *lvp, *ldvp; 921 struct mount *mp; 922 int error, locked; 923 924 locked = VOP_ISLOCKED(vp); 925 lvp = NULLVPTOLOWERVP(vp); 926 vhold(lvp); 927 mp = vp->v_mount; 928 vfs_ref(mp); 929 VOP_UNLOCK(vp); /* vp is held by vn_vptocnp_locked that called us */ 930 ldvp = lvp; 931 vref(lvp); 932 error = vn_vptocnp(&ldvp, ap->a_buf, ap->a_buflen); 933 vdrop(lvp); 934 if (error != 0) { 935 vn_lock(vp, locked | LK_RETRY); 936 vfs_rel(mp); 937 return (ENOENT); 938 } 939 940 error = vn_lock(ldvp, LK_SHARED); 941 if (error != 0) { 942 vrele(ldvp); 943 vn_lock(vp, locked | LK_RETRY); 944 vfs_rel(mp); 945 return (ENOENT); 946 } 947 error = null_nodeget(mp, ldvp, dvp); 948 if (error == 0) { 949 #ifdef DIAGNOSTIC 950 NULLVPTOLOWERVP(*dvp); 951 #endif 952 VOP_UNLOCK(*dvp); /* keep reference on *dvp */ 953 } 954 vn_lock(vp, locked | LK_RETRY); 955 vfs_rel(mp); 956 return (error); 957 } 958 959 static int 960 null_read_pgcache(struct vop_read_pgcache_args *ap) 961 { 962 struct vnode *lvp, *vp; 963 struct null_node *xp; 964 int error; 965 966 vp = ap->a_vp; 967 VI_LOCK(vp); 968 xp = VTONULL(vp); 969 if (xp == NULL) { 970 VI_UNLOCK(vp); 971 return (EJUSTRETURN); 972 } 973 lvp = xp->null_lowervp; 974 vref(lvp); 975 VI_UNLOCK(vp); 976 error = VOP_READ_PGCACHE(lvp, ap->a_uio, ap->a_ioflag, ap->a_cred); 977 vrele(lvp); 978 return (error); 979 } 980 981 /* 982 * Avoid standard bypass, since lower dvp and vp could be no longer 983 * valid after vput(). 984 */ 985 static int 986 null_vput_pair(struct vop_vput_pair_args *ap) 987 { 988 struct mount *mp; 989 struct vnode *dvp, *ldvp, *lvp, *vp, *vp1, **vpp; 990 int error, res; 991 992 dvp = ap->a_dvp; 993 ldvp = NULLVPTOLOWERVP(dvp); 994 vref(ldvp); 995 996 vpp = ap->a_vpp; 997 vp = NULL; 998 lvp = NULL; 999 mp = NULL; 1000 if (vpp != NULL) 1001 vp = *vpp; 1002 if (vp != NULL) { 1003 lvp = NULLVPTOLOWERVP(vp); 1004 vref(lvp); 1005 if (!ap->a_unlock_vp) { 1006 vhold(vp); 1007 vhold(lvp); 1008 mp = vp->v_mount; 1009 vfs_ref(mp); 1010 } 1011 } 1012 1013 res = VOP_VPUT_PAIR(ldvp, lvp != NULL ? &lvp : NULL, true); 1014 if (vp != NULL && ap->a_unlock_vp) 1015 vrele(vp); 1016 vrele(dvp); 1017 1018 if (vp == NULL || ap->a_unlock_vp) 1019 return (res); 1020 1021 /* lvp has been unlocked and vp might be reclaimed */ 1022 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY); 1023 if (vp->v_data == NULL && vfs_busy(mp, MBF_NOWAIT) == 0) { 1024 vput(vp); 1025 vget(lvp, LK_EXCLUSIVE | LK_RETRY); 1026 if (VN_IS_DOOMED(lvp)) { 1027 vput(lvp); 1028 vget(vp, LK_EXCLUSIVE | LK_RETRY); 1029 } else { 1030 error = null_nodeget(mp, lvp, &vp1); 1031 if (error == 0) { 1032 *vpp = vp1; 1033 } else { 1034 vget(vp, LK_EXCLUSIVE | LK_RETRY); 1035 } 1036 } 1037 vfs_unbusy(mp); 1038 } 1039 vdrop(lvp); 1040 vdrop(vp); 1041 vfs_rel(mp); 1042 1043 return (res); 1044 } 1045 1046 /* 1047 * Global vfs data structures 1048 */ 1049 struct vop_vector null_vnodeops = { 1050 .vop_bypass = null_bypass, 1051 .vop_access = null_access, 1052 .vop_accessx = null_accessx, 1053 .vop_advlockpurge = vop_stdadvlockpurge, 1054 .vop_bmap = VOP_EOPNOTSUPP, 1055 .vop_stat = null_stat, 1056 .vop_getattr = null_getattr, 1057 .vop_getwritemount = null_getwritemount, 1058 .vop_inactive = null_inactive, 1059 .vop_need_inactive = null_need_inactive, 1060 .vop_islocked = vop_stdislocked, 1061 .vop_lock1 = null_lock, 1062 .vop_lookup = null_lookup, 1063 .vop_open = null_open, 1064 .vop_print = null_print, 1065 .vop_read_pgcache = null_read_pgcache, 1066 .vop_reclaim = null_reclaim, 1067 .vop_remove = null_remove, 1068 .vop_rename = null_rename, 1069 .vop_rmdir = null_rmdir, 1070 .vop_setattr = null_setattr, 1071 .vop_strategy = VOP_EOPNOTSUPP, 1072 .vop_unlock = null_unlock, 1073 .vop_vptocnp = null_vptocnp, 1074 .vop_vptofh = null_vptofh, 1075 .vop_add_writecount = null_add_writecount, 1076 .vop_vput_pair = null_vput_pair, 1077 }; 1078 VFS_VOP_VECTOR_REGISTER(null_vnodeops); 1079