1 /* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * John Heidemann of the UCLA Ficus project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)null_vnops.c 8.1 (Berkeley) 6/10/93 37 * 38 * $Id: null_vnops.c,v 1.10 1995/12/03 14:54:24 bde Exp $ 39 */ 40 41 /* 42 * Null Layer 43 * 44 * (See mount_null(8) for more information.) 45 * 46 * The null layer duplicates a portion of the file system 47 * name space under a new name. In this respect, it is 48 * similar to the loopback file system. It differs from 49 * the loopback fs in two respects: it is implemented using 50 * a stackable layers techniques, and it's "null-node"s stack above 51 * all lower-layer vnodes, not just over directory vnodes. 52 * 53 * The null layer has two purposes. First, it serves as a demonstration 54 * of layering by proving a layer which does nothing. (It actually 55 * does everything the loopback file system does, which is slightly 56 * more than nothing.) Second, the null layer can serve as a prototype 57 * layer. Since it provides all necessary layer framework, 58 * new file system layers can be created very easily be starting 59 * with a null layer. 60 * 61 * The remainder of this man page examines the null layer as a basis 62 * for constructing new layers. 63 * 64 * 65 * INSTANTIATING NEW NULL LAYERS 66 * 67 * New null layers are created with mount_null(8). 68 * Mount_null(8) takes two arguments, the pathname 69 * of the lower vfs (target-pn) and the pathname where the null 70 * layer will appear in the namespace (alias-pn). After 71 * the null layer is put into place, the contents 72 * of target-pn subtree will be aliased under alias-pn. 73 * 74 * 75 * OPERATION OF A NULL LAYER 76 * 77 * The null layer is the minimum file system layer, 78 * simply bypassing all possible operations to the lower layer 79 * for processing there. The majority of its activity centers 80 * on the bypass routine, though which nearly all vnode operations 81 * pass. 82 * 83 * The bypass routine accepts arbitrary vnode operations for 84 * handling by the lower layer. It begins by examing vnode 85 * operation arguments and replacing any null-nodes by their 86 * lower-layer equivlants. It then invokes the operation 87 * on the lower layer. Finally, it replaces the null-nodes 88 * in the arguments and, if a vnode is return by the operation, 89 * stacks a null-node on top of the returned vnode. 90 * 91 * Although bypass handles most operations, 92 * vop_getattr, _inactive, _reclaim, and _print are not bypassed. 93 * Vop_getattr must change the fsid being returned. 94 * Vop_inactive and vop_reclaim are not bypassed so that 95 * they can handle freeing null-layer specific data. 96 * Vop_print is not bypassed to avoid excessive debugging 97 * information. 98 * 99 * 100 * INSTANTIATING VNODE STACKS 101 * 102 * Mounting associates the null layer with a lower layer, 103 * effect stacking two VFSes. Vnode stacks are instead 104 * created on demand as files are accessed. 105 * 106 * The initial mount creates a single vnode stack for the 107 * root of the new null layer. All other vnode stacks 108 * are created as a result of vnode operations on 109 * this or other null vnode stacks. 110 * 111 * New vnode stacks come into existance as a result of 112 * an operation which returns a vnode. 113 * The bypass routine stacks a null-node above the new 114 * vnode before returning it to the caller. 115 * 116 * For example, imagine mounting a null layer with 117 * "mount_null /usr/include /dev/layer/null". 118 * Changing directory to /dev/layer/null will assign 119 * the root null-node (which was created when the null layer was mounted). 120 * Now consider opening "sys". A vop_lookup would be 121 * done on the root null-node. This operation would bypass through 122 * to the lower layer which would return a vnode representing 123 * the UFS "sys". Null_bypass then builds a null-node 124 * aliasing the UFS "sys" and returns this to the caller. 125 * Later operations on the null-node "sys" will repeat this 126 * process when constructing other vnode stacks. 127 * 128 * 129 * CREATING OTHER FILE SYSTEM LAYERS 130 * 131 * One of the easiest ways to construct new file system layers is to make 132 * a copy of the null layer, rename all files and variables, and 133 * then begin modifing the copy. Sed can be used to easily rename 134 * all variables. 135 * 136 * The umap layer is an example of a layer descended from the 137 * null layer. 138 * 139 * 140 * INVOKING OPERATIONS ON LOWER LAYERS 141 * 142 * There are two techniques to invoke operations on a lower layer 143 * when the operation cannot be completely bypassed. Each method 144 * is appropriate in different situations. In both cases, 145 * it is the responsibility of the aliasing layer to make 146 * the operation arguments "correct" for the lower layer 147 * by mapping an vnode arguments to the lower layer. 148 * 149 * The first approach is to call the aliasing layer's bypass routine. 150 * This method is most suitable when you wish to invoke the operation 151 * currently being hanldled on the lower layer. It has the advantage 152 * that the bypass routine already must do argument mapping. 153 * An example of this is null_getattrs in the null layer. 154 * 155 * A second approach is to directly invoked vnode operations on 156 * the lower layer with the VOP_OPERATIONNAME interface. 157 * The advantage of this method is that it is easy to invoke 158 * arbitrary operations on the lower layer. The disadvantage 159 * is that vnodes arguments must be manualy mapped. 160 * 161 */ 162 163 #include <sys/param.h> 164 #include <sys/systm.h> 165 #include <sys/kernel.h> 166 #include <sys/sysctl.h> 167 #include <sys/proc.h> 168 #include <sys/time.h> 169 #include <sys/types.h> 170 #include <sys/vnode.h> 171 #include <sys/mount.h> 172 #include <sys/namei.h> 173 #include <sys/malloc.h> 174 #include <sys/buf.h> 175 #include <miscfs/nullfs/null.h> 176 177 static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 178 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, 179 &null_bug_bypass, 0, ""); 180 181 static int null_bypass __P((struct vop_generic_args *ap)); 182 static int null_bwrite __P((struct vop_bwrite_args *ap)); 183 static int null_getattr __P((struct vop_getattr_args *ap)); 184 static int null_inactive __P((struct vop_inactive_args *ap)); 185 static int null_print __P((struct vop_print_args *ap)); 186 static int null_reclaim __P((struct vop_reclaim_args *ap)); 187 static int null_strategy __P((struct vop_strategy_args *ap)); 188 189 /* 190 * This is the 10-Apr-92 bypass routine. 191 * This version has been optimized for speed, throwing away some 192 * safety checks. It should still always work, but it's not as 193 * robust to programmer errors. 194 * Define SAFETY to include some error checking code. 195 * 196 * In general, we map all vnodes going down and unmap them on the way back. 197 * As an exception to this, vnodes can be marked "unmapped" by setting 198 * the Nth bit in operation's vdesc_flags. 199 * 200 * Also, some BSD vnode operations have the side effect of vrele'ing 201 * their arguments. With stacking, the reference counts are held 202 * by the upper node, not the lower one, so we must handle these 203 * side-effects here. This is not of concern in Sun-derived systems 204 * since there are no such side-effects. 205 * 206 * This makes the following assumptions: 207 * - only one returned vpp 208 * - no INOUT vpp's (Sun's vop_open has one of these) 209 * - the vnode operation vector of the first vnode should be used 210 * to determine what implementation of the op should be invoked 211 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 212 * problems on rmdir'ing mount points and renaming?) 213 */ 214 static int 215 null_bypass(ap) 216 struct vop_generic_args /* { 217 struct vnodeop_desc *a_desc; 218 <other random data follows, presumably> 219 } */ *ap; 220 { 221 register struct vnode **this_vp_p; 222 int error; 223 struct vnode *old_vps[VDESC_MAX_VPS]; 224 struct vnode **vps_p[VDESC_MAX_VPS]; 225 struct vnode ***vppp; 226 struct vnodeop_desc *descp = ap->a_desc; 227 int reles, i; 228 229 if (null_bug_bypass) 230 printf ("null_bypass: %s\n", descp->vdesc_name); 231 232 #ifdef SAFETY 233 /* 234 * We require at least one vp. 235 */ 236 if (descp->vdesc_vp_offsets == NULL || 237 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 238 panic ("null_bypass: no vp's in map."); 239 #endif 240 241 /* 242 * Map the vnodes going in. 243 * Later, we'll invoke the operation based on 244 * the first mapped vnode's operation vector. 245 */ 246 reles = descp->vdesc_flags; 247 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 248 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 249 break; /* bail out at end of list */ 250 vps_p[i] = this_vp_p = 251 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 252 /* 253 * We're not guaranteed that any but the first vnode 254 * are of our type. Check for and don't map any 255 * that aren't. (We must always map first vp or vclean fails.) 256 */ 257 if (i && (*this_vp_p)->v_op != null_vnodeop_p) { 258 old_vps[i] = NULL; 259 } else { 260 old_vps[i] = *this_vp_p; 261 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); 262 /* 263 * XXX - Several operations have the side effect 264 * of vrele'ing their vp's. We must account for 265 * that. (This should go away in the future.) 266 */ 267 if (reles & 1) 268 VREF(*this_vp_p); 269 } 270 271 } 272 273 /* 274 * Call the operation on the lower layer 275 * with the modified argument structure. 276 */ 277 error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap); 278 279 /* 280 * Maintain the illusion of call-by-value 281 * by restoring vnodes in the argument structure 282 * to their original value. 283 */ 284 reles = descp->vdesc_flags; 285 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 286 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 287 break; /* bail out at end of list */ 288 if (old_vps[i]) { 289 *(vps_p[i]) = old_vps[i]; 290 if (reles & 1) 291 vrele(*(vps_p[i])); 292 } 293 } 294 295 /* 296 * Map the possible out-going vpp 297 * (Assumes that the lower layer always returns 298 * a VREF'ed vpp unless it gets an error.) 299 */ 300 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 301 !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 302 !error) { 303 /* 304 * XXX - even though some ops have vpp returned vp's, 305 * several ops actually vrele this before returning. 306 * We must avoid these ops. 307 * (This should go away when these ops are regularized.) 308 */ 309 if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 310 goto out; 311 vppp = VOPARG_OFFSETTO(struct vnode***, 312 descp->vdesc_vpp_offset,ap); 313 error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp); 314 } 315 316 out: 317 return (error); 318 } 319 320 321 /* 322 * We handle getattr only to change the fsid. 323 */ 324 static int 325 null_getattr(ap) 326 struct vop_getattr_args /* { 327 struct vnode *a_vp; 328 struct vattr *a_vap; 329 struct ucred *a_cred; 330 struct proc *a_p; 331 } */ *ap; 332 { 333 int error; 334 error = null_bypass(ap); 335 if (error) 336 return (error); 337 /* Requires that arguments be restored. */ 338 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 339 return (0); 340 } 341 342 343 static int 344 null_inactive(ap) 345 struct vop_inactive_args /* { 346 struct vnode *a_vp; 347 } */ *ap; 348 { 349 /* 350 * Do nothing (and _don't_ bypass). 351 * Wait to vrele lowervp until reclaim, 352 * so that until then our null_node is in the 353 * cache and reusable. 354 * 355 * NEEDSWORK: Someday, consider inactive'ing 356 * the lowervp and then trying to reactivate it 357 * with capabilities (v_id) 358 * like they do in the name lookup cache code. 359 * That's too much work for now. 360 */ 361 return (0); 362 } 363 364 static int 365 null_reclaim(ap) 366 struct vop_reclaim_args /* { 367 struct vnode *a_vp; 368 } */ *ap; 369 { 370 struct vnode *vp = ap->a_vp; 371 struct null_node *xp = VTONULL(vp); 372 struct vnode *lowervp = xp->null_lowervp; 373 374 /* 375 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p, 376 * so we can't call VOPs on ourself. 377 */ 378 /* After this assignment, this node will not be re-used. */ 379 xp->null_lowervp = NULL; 380 remque(xp); 381 FREE(vp->v_data, M_TEMP); 382 vp->v_data = NULL; 383 vrele (lowervp); 384 return (0); 385 } 386 387 388 static int 389 null_print(ap) 390 struct vop_print_args /* { 391 struct vnode *a_vp; 392 } */ *ap; 393 { 394 register struct vnode *vp = ap->a_vp; 395 printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp)); 396 return (0); 397 } 398 399 400 /* 401 * XXX - vop_strategy must be hand coded because it has no 402 * vnode in its arguments. 403 * This goes away with a merged VM/buffer cache. 404 */ 405 static int 406 null_strategy(ap) 407 struct vop_strategy_args /* { 408 struct buf *a_bp; 409 } */ *ap; 410 { 411 struct buf *bp = ap->a_bp; 412 int error; 413 struct vnode *savedvp; 414 415 savedvp = bp->b_vp; 416 bp->b_vp = NULLVPTOLOWERVP(bp->b_vp); 417 418 error = VOP_STRATEGY(bp); 419 420 bp->b_vp = savedvp; 421 422 return (error); 423 } 424 425 426 /* 427 * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no 428 * vnode in its arguments. 429 * This goes away with a merged VM/buffer cache. 430 */ 431 static int 432 null_bwrite(ap) 433 struct vop_bwrite_args /* { 434 struct buf *a_bp; 435 } */ *ap; 436 { 437 struct buf *bp = ap->a_bp; 438 int error; 439 struct vnode *savedvp; 440 441 savedvp = bp->b_vp; 442 bp->b_vp = NULLVPTOLOWERVP(bp->b_vp); 443 444 error = VOP_BWRITE(bp); 445 446 bp->b_vp = savedvp; 447 448 return (error); 449 } 450 451 /* 452 * Global vfs data structures 453 */ 454 vop_t **null_vnodeop_p; 455 static struct vnodeopv_entry_desc null_vnodeop_entries[] = { 456 { &vop_default_desc, (vop_t *)null_bypass }, 457 458 { &vop_getattr_desc, (vop_t *)null_getattr }, 459 { &vop_inactive_desc, (vop_t *)null_inactive }, 460 { &vop_reclaim_desc, (vop_t *)null_reclaim }, 461 { &vop_print_desc, (vop_t *)null_print }, 462 463 { &vop_strategy_desc, (vop_t *)null_strategy }, 464 { &vop_bwrite_desc, (vop_t *)null_bwrite }, 465 466 { NULL, NULL } 467 }; 468 static struct vnodeopv_desc null_vnodeop_opv_desc = 469 { &null_vnodeop_p, null_vnodeop_entries }; 470 471 VNODEOP_SET(null_vnodeop_opv_desc); 472