1 /*- 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * John Heidemann of the UCLA Ficus project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 33 * 34 * Ancestors: 35 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 36 * ...and... 37 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project 38 * 39 * $FreeBSD$ 40 */ 41 42 /* 43 * Null Layer 44 * 45 * (See mount_nullfs(8) for more information.) 46 * 47 * The null layer duplicates a portion of the filesystem 48 * name space under a new name. In this respect, it is 49 * similar to the loopback filesystem. It differs from 50 * the loopback fs in two respects: it is implemented using 51 * a stackable layers techniques, and its "null-node"s stack above 52 * all lower-layer vnodes, not just over directory vnodes. 53 * 54 * The null layer has two purposes. First, it serves as a demonstration 55 * of layering by proving a layer which does nothing. (It actually 56 * does everything the loopback filesystem does, which is slightly 57 * more than nothing.) Second, the null layer can serve as a prototype 58 * layer. Since it provides all necessary layer framework, 59 * new filesystem layers can be created very easily be starting 60 * with a null layer. 61 * 62 * The remainder of this man page examines the null layer as a basis 63 * for constructing new layers. 64 * 65 * 66 * INSTANTIATING NEW NULL LAYERS 67 * 68 * New null layers are created with mount_nullfs(8). 69 * Mount_nullfs(8) takes two arguments, the pathname 70 * of the lower vfs (target-pn) and the pathname where the null 71 * layer will appear in the namespace (alias-pn). After 72 * the null layer is put into place, the contents 73 * of target-pn subtree will be aliased under alias-pn. 74 * 75 * 76 * OPERATION OF A NULL LAYER 77 * 78 * The null layer is the minimum filesystem layer, 79 * simply bypassing all possible operations to the lower layer 80 * for processing there. The majority of its activity centers 81 * on the bypass routine, through which nearly all vnode operations 82 * pass. 83 * 84 * The bypass routine accepts arbitrary vnode operations for 85 * handling by the lower layer. It begins by examing vnode 86 * operation arguments and replacing any null-nodes by their 87 * lower-layer equivlants. It then invokes the operation 88 * on the lower layer. Finally, it replaces the null-nodes 89 * in the arguments and, if a vnode is return by the operation, 90 * stacks a null-node on top of the returned vnode. 91 * 92 * Although bypass handles most operations, vop_getattr, vop_lock, 93 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not 94 * bypassed. Vop_getattr must change the fsid being returned. 95 * Vop_lock and vop_unlock must handle any locking for the 96 * current vnode as well as pass the lock request down. 97 * Vop_inactive and vop_reclaim are not bypassed so that 98 * they can handle freeing null-layer specific data. Vop_print 99 * is not bypassed to avoid excessive debugging information. 100 * Also, certain vnode operations change the locking state within 101 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 102 * and symlink). Ideally these operations should not change the 103 * lock state, but should be changed to let the caller of the 104 * function unlock them. Otherwise all intermediate vnode layers 105 * (such as union, umapfs, etc) must catch these functions to do 106 * the necessary locking at their layer. 107 * 108 * 109 * INSTANTIATING VNODE STACKS 110 * 111 * Mounting associates the null layer with a lower layer, 112 * effect stacking two VFSes. Vnode stacks are instead 113 * created on demand as files are accessed. 114 * 115 * The initial mount creates a single vnode stack for the 116 * root of the new null layer. All other vnode stacks 117 * are created as a result of vnode operations on 118 * this or other null vnode stacks. 119 * 120 * New vnode stacks come into existance as a result of 121 * an operation which returns a vnode. 122 * The bypass routine stacks a null-node above the new 123 * vnode before returning it to the caller. 124 * 125 * For example, imagine mounting a null layer with 126 * "mount_nullfs /usr/include /dev/layer/null". 127 * Changing directory to /dev/layer/null will assign 128 * the root null-node (which was created when the null layer was mounted). 129 * Now consider opening "sys". A vop_lookup would be 130 * done on the root null-node. This operation would bypass through 131 * to the lower layer which would return a vnode representing 132 * the UFS "sys". Null_bypass then builds a null-node 133 * aliasing the UFS "sys" and returns this to the caller. 134 * Later operations on the null-node "sys" will repeat this 135 * process when constructing other vnode stacks. 136 * 137 * 138 * CREATING OTHER FILE SYSTEM LAYERS 139 * 140 * One of the easiest ways to construct new filesystem layers is to make 141 * a copy of the null layer, rename all files and variables, and 142 * then begin modifing the copy. Sed can be used to easily rename 143 * all variables. 144 * 145 * The umap layer is an example of a layer descended from the 146 * null layer. 147 * 148 * 149 * INVOKING OPERATIONS ON LOWER LAYERS 150 * 151 * There are two techniques to invoke operations on a lower layer 152 * when the operation cannot be completely bypassed. Each method 153 * is appropriate in different situations. In both cases, 154 * it is the responsibility of the aliasing layer to make 155 * the operation arguments "correct" for the lower layer 156 * by mapping a vnode arguments to the lower layer. 157 * 158 * The first approach is to call the aliasing layer's bypass routine. 159 * This method is most suitable when you wish to invoke the operation 160 * currently being handled on the lower layer. It has the advantage 161 * that the bypass routine already must do argument mapping. 162 * An example of this is null_getattrs in the null layer. 163 * 164 * A second approach is to directly invoke vnode operations on 165 * the lower layer with the VOP_OPERATIONNAME interface. 166 * The advantage of this method is that it is easy to invoke 167 * arbitrary operations on the lower layer. The disadvantage 168 * is that vnode arguments must be manualy mapped. 169 * 170 */ 171 172 #include <sys/param.h> 173 #include <sys/systm.h> 174 #include <sys/conf.h> 175 #include <sys/kernel.h> 176 #include <sys/lock.h> 177 #include <sys/malloc.h> 178 #include <sys/mount.h> 179 #include <sys/mutex.h> 180 #include <sys/namei.h> 181 #include <sys/sysctl.h> 182 #include <sys/vnode.h> 183 184 #include <fs/nullfs/null.h> 185 186 #include <vm/vm.h> 187 #include <vm/vm_extern.h> 188 #include <vm/vm_object.h> 189 #include <vm/vnode_pager.h> 190 191 static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 192 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, 193 &null_bug_bypass, 0, ""); 194 195 /* 196 * This is the 10-Apr-92 bypass routine. 197 * This version has been optimized for speed, throwing away some 198 * safety checks. It should still always work, but it's not as 199 * robust to programmer errors. 200 * 201 * In general, we map all vnodes going down and unmap them on the way back. 202 * As an exception to this, vnodes can be marked "unmapped" by setting 203 * the Nth bit in operation's vdesc_flags. 204 * 205 * Also, some BSD vnode operations have the side effect of vrele'ing 206 * their arguments. With stacking, the reference counts are held 207 * by the upper node, not the lower one, so we must handle these 208 * side-effects here. This is not of concern in Sun-derived systems 209 * since there are no such side-effects. 210 * 211 * This makes the following assumptions: 212 * - only one returned vpp 213 * - no INOUT vpp's (Sun's vop_open has one of these) 214 * - the vnode operation vector of the first vnode should be used 215 * to determine what implementation of the op should be invoked 216 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 217 * problems on rmdir'ing mount points and renaming?) 218 */ 219 int 220 null_bypass(struct vop_generic_args *ap) 221 { 222 struct vnode **this_vp_p; 223 int error; 224 struct vnode *old_vps[VDESC_MAX_VPS]; 225 struct vnode **vps_p[VDESC_MAX_VPS]; 226 struct vnode ***vppp; 227 struct vnodeop_desc *descp = ap->a_desc; 228 int reles, i; 229 230 if (null_bug_bypass) 231 printf ("null_bypass: %s\n", descp->vdesc_name); 232 233 #ifdef DIAGNOSTIC 234 /* 235 * We require at least one vp. 236 */ 237 if (descp->vdesc_vp_offsets == NULL || 238 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 239 panic ("null_bypass: no vp's in map"); 240 #endif 241 242 /* 243 * Map the vnodes going in. 244 * Later, we'll invoke the operation based on 245 * the first mapped vnode's operation vector. 246 */ 247 reles = descp->vdesc_flags; 248 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 249 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 250 break; /* bail out at end of list */ 251 vps_p[i] = this_vp_p = 252 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 253 /* 254 * We're not guaranteed that any but the first vnode 255 * are of our type. Check for and don't map any 256 * that aren't. (We must always map first vp or vclean fails.) 257 */ 258 if (i && (*this_vp_p == NULLVP || 259 (*this_vp_p)->v_op != &null_vnodeops)) { 260 old_vps[i] = NULLVP; 261 } else { 262 old_vps[i] = *this_vp_p; 263 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); 264 /* 265 * XXX - Several operations have the side effect 266 * of vrele'ing their vp's. We must account for 267 * that. (This should go away in the future.) 268 */ 269 if (reles & VDESC_VP0_WILLRELE) 270 VREF(*this_vp_p); 271 } 272 273 } 274 275 /* 276 * Call the operation on the lower layer 277 * with the modified argument structure. 278 */ 279 if (vps_p[0] && *vps_p[0]) 280 error = VCALL(ap); 281 else { 282 printf("null_bypass: no map for %s\n", descp->vdesc_name); 283 error = EINVAL; 284 } 285 286 /* 287 * Maintain the illusion of call-by-value 288 * by restoring vnodes in the argument structure 289 * to their original value. 290 */ 291 reles = descp->vdesc_flags; 292 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 293 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 294 break; /* bail out at end of list */ 295 if (old_vps[i]) { 296 *(vps_p[i]) = old_vps[i]; 297 #if 0 298 if (reles & VDESC_VP0_WILLUNLOCK) 299 VOP_UNLOCK(*(vps_p[i]), 0); 300 #endif 301 if (reles & VDESC_VP0_WILLRELE) 302 vrele(*(vps_p[i])); 303 } 304 } 305 306 /* 307 * Map the possible out-going vpp 308 * (Assumes that the lower layer always returns 309 * a VREF'ed vpp unless it gets an error.) 310 */ 311 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 312 !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 313 !error) { 314 /* 315 * XXX - even though some ops have vpp returned vp's, 316 * several ops actually vrele this before returning. 317 * We must avoid these ops. 318 * (This should go away when these ops are regularized.) 319 */ 320 if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 321 goto out; 322 vppp = VOPARG_OFFSETTO(struct vnode***, 323 descp->vdesc_vpp_offset,ap); 324 if (*vppp) 325 error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp); 326 } 327 328 out: 329 return (error); 330 } 331 332 /* 333 * We have to carry on the locking protocol on the null layer vnodes 334 * as we progress through the tree. We also have to enforce read-only 335 * if this layer is mounted read-only. 336 */ 337 static int 338 null_lookup(struct vop_lookup_args *ap) 339 { 340 struct componentname *cnp = ap->a_cnp; 341 struct vnode *dvp = ap->a_dvp; 342 int flags = cnp->cn_flags; 343 struct vnode *vp, *ldvp, *lvp; 344 int error; 345 346 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 347 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 348 return (EROFS); 349 /* 350 * Although it is possible to call null_bypass(), we'll do 351 * a direct call to reduce overhead 352 */ 353 ldvp = NULLVPTOLOWERVP(dvp); 354 vp = lvp = NULL; 355 error = VOP_LOOKUP(ldvp, &lvp, cnp); 356 if (error == EJUSTRETURN && (flags & ISLASTCN) && 357 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 358 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) 359 error = EROFS; 360 361 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { 362 if (ldvp == lvp) { 363 *ap->a_vpp = dvp; 364 VREF(dvp); 365 vrele(lvp); 366 } else { 367 error = null_nodeget(dvp->v_mount, lvp, &vp); 368 if (error == 0) 369 *ap->a_vpp = vp; 370 } 371 } 372 return (error); 373 } 374 375 static int 376 null_open(struct vop_open_args *ap) 377 { 378 int retval; 379 struct vnode *vp, *ldvp; 380 381 vp = ap->a_vp; 382 ldvp = NULLVPTOLOWERVP(vp); 383 retval = null_bypass(&ap->a_gen); 384 if (retval == 0) 385 vp->v_object = ldvp->v_object; 386 return (retval); 387 } 388 389 /* 390 * Setattr call. Disallow write attempts if the layer is mounted read-only. 391 */ 392 static int 393 null_setattr(struct vop_setattr_args *ap) 394 { 395 struct vnode *vp = ap->a_vp; 396 struct vattr *vap = ap->a_vap; 397 398 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 399 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 400 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 401 (vp->v_mount->mnt_flag & MNT_RDONLY)) 402 return (EROFS); 403 if (vap->va_size != VNOVAL) { 404 switch (vp->v_type) { 405 case VDIR: 406 return (EISDIR); 407 case VCHR: 408 case VBLK: 409 case VSOCK: 410 case VFIFO: 411 if (vap->va_flags != VNOVAL) 412 return (EOPNOTSUPP); 413 return (0); 414 case VREG: 415 case VLNK: 416 default: 417 /* 418 * Disallow write attempts if the filesystem is 419 * mounted read-only. 420 */ 421 if (vp->v_mount->mnt_flag & MNT_RDONLY) 422 return (EROFS); 423 } 424 } 425 426 return (null_bypass((struct vop_generic_args *)ap)); 427 } 428 429 /* 430 * We handle getattr only to change the fsid. 431 */ 432 static int 433 null_getattr(struct vop_getattr_args *ap) 434 { 435 int error; 436 437 if ((error = null_bypass((struct vop_generic_args *)ap)) != 0) 438 return (error); 439 440 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 441 return (0); 442 } 443 444 /* 445 * Handle to disallow write access if mounted read-only. 446 */ 447 static int 448 null_access(struct vop_access_args *ap) 449 { 450 struct vnode *vp = ap->a_vp; 451 accmode_t accmode = ap->a_accmode; 452 453 /* 454 * Disallow write attempts on read-only layers; 455 * unless the file is a socket, fifo, or a block or 456 * character device resident on the filesystem. 457 */ 458 if (accmode & VWRITE) { 459 switch (vp->v_type) { 460 case VDIR: 461 case VLNK: 462 case VREG: 463 if (vp->v_mount->mnt_flag & MNT_RDONLY) 464 return (EROFS); 465 break; 466 default: 467 break; 468 } 469 } 470 return (null_bypass((struct vop_generic_args *)ap)); 471 } 472 473 static int 474 null_accessx(struct vop_accessx_args *ap) 475 { 476 struct vnode *vp = ap->a_vp; 477 accmode_t accmode = ap->a_accmode; 478 479 /* 480 * Disallow write attempts on read-only layers; 481 * unless the file is a socket, fifo, or a block or 482 * character device resident on the filesystem. 483 */ 484 if (accmode & VWRITE) { 485 switch (vp->v_type) { 486 case VDIR: 487 case VLNK: 488 case VREG: 489 if (vp->v_mount->mnt_flag & MNT_RDONLY) 490 return (EROFS); 491 break; 492 default: 493 break; 494 } 495 } 496 return (null_bypass((struct vop_generic_args *)ap)); 497 } 498 499 /* 500 * Increasing refcount of lower vnode is needed at least for the case 501 * when lower FS is NFS to do sillyrename if the file is in use. 502 * Unfortunately v_usecount is incremented in many places in 503 * the kernel and, as such, there may be races that result in 504 * the NFS client doing an extraneous silly rename, but that seems 505 * preferable to not doing a silly rename when it is needed. 506 */ 507 static int 508 null_remove(struct vop_remove_args *ap) 509 { 510 int retval, vreleit; 511 struct vnode *lvp; 512 513 if (vrefcnt(ap->a_vp) > 1) { 514 lvp = NULLVPTOLOWERVP(ap->a_vp); 515 VREF(lvp); 516 vreleit = 1; 517 } else 518 vreleit = 0; 519 retval = null_bypass(&ap->a_gen); 520 if (vreleit != 0) 521 vrele(lvp); 522 return (retval); 523 } 524 525 /* 526 * We handle this to eliminate null FS to lower FS 527 * file moving. Don't know why we don't allow this, 528 * possibly we should. 529 */ 530 static int 531 null_rename(struct vop_rename_args *ap) 532 { 533 struct vnode *tdvp = ap->a_tdvp; 534 struct vnode *fvp = ap->a_fvp; 535 struct vnode *fdvp = ap->a_fdvp; 536 struct vnode *tvp = ap->a_tvp; 537 538 /* Check for cross-device rename. */ 539 if ((fvp->v_mount != tdvp->v_mount) || 540 (tvp && (fvp->v_mount != tvp->v_mount))) { 541 if (tdvp == tvp) 542 vrele(tdvp); 543 else 544 vput(tdvp); 545 if (tvp) 546 vput(tvp); 547 vrele(fdvp); 548 vrele(fvp); 549 return (EXDEV); 550 } 551 552 return (null_bypass((struct vop_generic_args *)ap)); 553 } 554 555 /* 556 * We need to process our own vnode lock and then clear the 557 * interlock flag as it applies only to our vnode, not the 558 * vnodes below us on the stack. 559 */ 560 static int 561 null_lock(struct vop_lock1_args *ap) 562 { 563 struct vnode *vp = ap->a_vp; 564 int flags = ap->a_flags; 565 struct null_node *nn; 566 struct vnode *lvp; 567 int error; 568 569 570 if ((flags & LK_INTERLOCK) == 0) { 571 VI_LOCK(vp); 572 ap->a_flags = flags |= LK_INTERLOCK; 573 } 574 nn = VTONULL(vp); 575 /* 576 * If we're still active we must ask the lower layer to 577 * lock as ffs has special lock considerations in it's 578 * vop lock. 579 */ 580 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { 581 VI_LOCK_FLAGS(lvp, MTX_DUPOK); 582 VI_UNLOCK(vp); 583 /* 584 * We have to hold the vnode here to solve a potential 585 * reclaim race. If we're forcibly vgone'd while we 586 * still have refs, a thread could be sleeping inside 587 * the lowervp's vop_lock routine. When we vgone we will 588 * drop our last ref to the lowervp, which would allow it 589 * to be reclaimed. The lowervp could then be recycled, 590 * in which case it is not legal to be sleeping in it's VOP. 591 * We prevent it from being recycled by holding the vnode 592 * here. 593 */ 594 vholdl(lvp); 595 error = VOP_LOCK(lvp, flags); 596 597 /* 598 * We might have slept to get the lock and someone might have 599 * clean our vnode already, switching vnode lock from one in 600 * lowervp to v_lock in our own vnode structure. Handle this 601 * case by reacquiring correct lock in requested mode. 602 */ 603 if (VTONULL(vp) == NULL && error == 0) { 604 ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK); 605 switch (flags & LK_TYPE_MASK) { 606 case LK_SHARED: 607 ap->a_flags |= LK_SHARED; 608 break; 609 case LK_UPGRADE: 610 case LK_EXCLUSIVE: 611 ap->a_flags |= LK_EXCLUSIVE; 612 break; 613 default: 614 panic("Unsupported lock request %d\n", 615 ap->a_flags); 616 } 617 VOP_UNLOCK(lvp, 0); 618 error = vop_stdlock(ap); 619 } 620 vdrop(lvp); 621 } else 622 error = vop_stdlock(ap); 623 624 return (error); 625 } 626 627 /* 628 * We need to process our own vnode unlock and then clear the 629 * interlock flag as it applies only to our vnode, not the 630 * vnodes below us on the stack. 631 */ 632 static int 633 null_unlock(struct vop_unlock_args *ap) 634 { 635 struct vnode *vp = ap->a_vp; 636 int flags = ap->a_flags; 637 int mtxlkflag = 0; 638 struct null_node *nn; 639 struct vnode *lvp; 640 int error; 641 642 if ((flags & LK_INTERLOCK) != 0) 643 mtxlkflag = 1; 644 else if (mtx_owned(VI_MTX(vp)) == 0) { 645 VI_LOCK(vp); 646 mtxlkflag = 2; 647 } 648 nn = VTONULL(vp); 649 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { 650 VI_LOCK_FLAGS(lvp, MTX_DUPOK); 651 flags |= LK_INTERLOCK; 652 vholdl(lvp); 653 VI_UNLOCK(vp); 654 error = VOP_UNLOCK(lvp, flags); 655 vdrop(lvp); 656 if (mtxlkflag == 0) 657 VI_LOCK(vp); 658 } else { 659 if (mtxlkflag == 2) 660 VI_UNLOCK(vp); 661 error = vop_stdunlock(ap); 662 } 663 664 return (error); 665 } 666 667 /* 668 * There is no way to tell that someone issued remove/rmdir operation 669 * on the underlying filesystem. For now we just have to release lowervp 670 * as soon as possible. 671 * 672 * Note, we can't release any resources nor remove vnode from hash before 673 * appropriate VXLOCK stuff is done because other process can find this 674 * vnode in hash during inactivation and may be sitting in vget() and waiting 675 * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM. 676 */ 677 static int 678 null_inactive(struct vop_inactive_args *ap) 679 { 680 struct vnode *vp = ap->a_vp; 681 682 vp->v_object = NULL; 683 684 /* 685 * If this is the last reference, then free up the vnode 686 * so as not to tie up the lower vnodes. 687 */ 688 vrecycle(vp); 689 690 return (0); 691 } 692 693 /* 694 * Now, the VXLOCK is in force and we're free to destroy the null vnode. 695 */ 696 static int 697 null_reclaim(struct vop_reclaim_args *ap) 698 { 699 struct vnode *vp; 700 struct null_node *xp; 701 struct vnode *lowervp; 702 703 vp = ap->a_vp; 704 xp = VTONULL(vp); 705 lowervp = xp->null_lowervp; 706 707 KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock, 708 ("Reclaiming inclomplete null vnode %p", vp)); 709 710 null_hashrem(xp); 711 /* 712 * Use the interlock to protect the clearing of v_data to 713 * prevent faults in null_lock(). 714 */ 715 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); 716 VI_LOCK(vp); 717 vp->v_data = NULL; 718 vp->v_object = NULL; 719 vp->v_vnlock = &vp->v_lock; 720 VI_UNLOCK(vp); 721 vput(lowervp); 722 free(xp, M_NULLFSNODE); 723 724 return (0); 725 } 726 727 static int 728 null_print(struct vop_print_args *ap) 729 { 730 struct vnode *vp = ap->a_vp; 731 732 printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp); 733 return (0); 734 } 735 736 /* ARGSUSED */ 737 static int 738 null_getwritemount(struct vop_getwritemount_args *ap) 739 { 740 struct null_node *xp; 741 struct vnode *lowervp; 742 struct vnode *vp; 743 744 vp = ap->a_vp; 745 VI_LOCK(vp); 746 xp = VTONULL(vp); 747 if (xp && (lowervp = xp->null_lowervp)) { 748 VI_LOCK_FLAGS(lowervp, MTX_DUPOK); 749 VI_UNLOCK(vp); 750 vholdl(lowervp); 751 VI_UNLOCK(lowervp); 752 VOP_GETWRITEMOUNT(lowervp, ap->a_mpp); 753 vdrop(lowervp); 754 } else { 755 VI_UNLOCK(vp); 756 *(ap->a_mpp) = NULL; 757 } 758 return (0); 759 } 760 761 static int 762 null_vptofh(struct vop_vptofh_args *ap) 763 { 764 struct vnode *lvp; 765 766 lvp = NULLVPTOLOWERVP(ap->a_vp); 767 return VOP_VPTOFH(lvp, ap->a_fhp); 768 } 769 770 static int 771 null_vptocnp(struct vop_vptocnp_args *ap) 772 { 773 struct vnode *vp = ap->a_vp; 774 struct vnode **dvp = ap->a_vpp; 775 struct vnode *lvp, *ldvp; 776 struct ucred *cred = ap->a_cred; 777 int error, locked; 778 779 if (vp->v_type == VDIR) 780 return (vop_stdvptocnp(ap)); 781 782 locked = VOP_ISLOCKED(vp); 783 lvp = NULLVPTOLOWERVP(vp); 784 vhold(lvp); 785 VOP_UNLOCK(vp, 0); /* vp is held by vn_vptocnp_locked that called us */ 786 ldvp = lvp; 787 vref(lvp); 788 error = vn_vptocnp(&ldvp, cred, ap->a_buf, ap->a_buflen); 789 vdrop(lvp); 790 if (error != 0) { 791 vn_lock(vp, locked | LK_RETRY); 792 return (ENOENT); 793 } 794 795 /* 796 * Exclusive lock is required by insmntque1 call in 797 * null_nodeget() 798 */ 799 error = vn_lock(ldvp, LK_EXCLUSIVE); 800 if (error != 0) { 801 vrele(ldvp); 802 vn_lock(vp, locked | LK_RETRY); 803 return (ENOENT); 804 } 805 vref(ldvp); 806 error = null_nodeget(vp->v_mount, ldvp, dvp); 807 if (error == 0) { 808 #ifdef DIAGNOSTIC 809 NULLVPTOLOWERVP(*dvp); 810 #endif 811 VOP_UNLOCK(*dvp, 0); /* keep reference on *dvp */ 812 } 813 vn_lock(vp, locked | LK_RETRY); 814 return (error); 815 } 816 817 /* 818 * Global vfs data structures 819 */ 820 struct vop_vector null_vnodeops = { 821 .vop_bypass = null_bypass, 822 .vop_access = null_access, 823 .vop_accessx = null_accessx, 824 .vop_advlockpurge = vop_stdadvlockpurge, 825 .vop_bmap = VOP_EOPNOTSUPP, 826 .vop_getattr = null_getattr, 827 .vop_getwritemount = null_getwritemount, 828 .vop_inactive = null_inactive, 829 .vop_islocked = vop_stdislocked, 830 .vop_lock1 = null_lock, 831 .vop_lookup = null_lookup, 832 .vop_open = null_open, 833 .vop_print = null_print, 834 .vop_reclaim = null_reclaim, 835 .vop_remove = null_remove, 836 .vop_rename = null_rename, 837 .vop_setattr = null_setattr, 838 .vop_strategy = VOP_EOPNOTSUPP, 839 .vop_unlock = null_unlock, 840 .vop_vptocnp = null_vptocnp, 841 .vop_vptofh = null_vptofh, 842 }; 843