1 /* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * John Heidemann of the UCLA Ficus project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)null_vnops.c 8.1 (Berkeley) 6/10/93 37 * 38 * $Id: null_vnops.c,v 1.2 1994/08/02 07:45:06 davidg Exp $ 39 */ 40 41 /* 42 * Null Layer 43 * 44 * (See mount_null(8) for more information.) 45 * 46 * The null layer duplicates a portion of the file system 47 * name space under a new name. In this respect, it is 48 * similar to the loopback file system. It differs from 49 * the loopback fs in two respects: it is implemented using 50 * a stackable layers techniques, and it's "null-node"s stack above 51 * all lower-layer vnodes, not just over directory vnodes. 52 * 53 * The null layer has two purposes. First, it serves as a demonstration 54 * of layering by proving a layer which does nothing. (It actually 55 * does everything the loopback file system does, which is slightly 56 * more than nothing.) Second, the null layer can serve as a prototype 57 * layer. Since it provides all necessary layer framework, 58 * new file system layers can be created very easily be starting 59 * with a null layer. 60 * 61 * The remainder of this man page examines the null layer as a basis 62 * for constructing new layers. 63 * 64 * 65 * INSTANTIATING NEW NULL LAYERS 66 * 67 * New null layers are created with mount_null(8). 68 * Mount_null(8) takes two arguments, the pathname 69 * of the lower vfs (target-pn) and the pathname where the null 70 * layer will appear in the namespace (alias-pn). After 71 * the null layer is put into place, the contents 72 * of target-pn subtree will be aliased under alias-pn. 73 * 74 * 75 * OPERATION OF A NULL LAYER 76 * 77 * The null layer is the minimum file system layer, 78 * simply bypassing all possible operations to the lower layer 79 * for processing there. The majority of its activity centers 80 * on the bypass routine, though which nearly all vnode operations 81 * pass. 82 * 83 * The bypass routine accepts arbitrary vnode operations for 84 * handling by the lower layer. It begins by examing vnode 85 * operation arguments and replacing any null-nodes by their 86 * lower-layer equivlants. It then invokes the operation 87 * on the lower layer. Finally, it replaces the null-nodes 88 * in the arguments and, if a vnode is return by the operation, 89 * stacks a null-node on top of the returned vnode. 90 * 91 * Although bypass handles most operations, 92 * vop_getattr, _inactive, _reclaim, and _print are not bypassed. 93 * Vop_getattr must change the fsid being returned. 94 * Vop_inactive and vop_reclaim are not bypassed so that 95 * they can handle freeing null-layer specific data. 96 * Vop_print is not bypassed to avoid excessive debugging 97 * information. 98 * 99 * 100 * INSTANTIATING VNODE STACKS 101 * 102 * Mounting associates the null layer with a lower layer, 103 * effect stacking two VFSes. Vnode stacks are instead 104 * created on demand as files are accessed. 105 * 106 * The initial mount creates a single vnode stack for the 107 * root of the new null layer. All other vnode stacks 108 * are created as a result of vnode operations on 109 * this or other null vnode stacks. 110 * 111 * New vnode stacks come into existance as a result of 112 * an operation which returns a vnode. 113 * The bypass routine stacks a null-node above the new 114 * vnode before returning it to the caller. 115 * 116 * For example, imagine mounting a null layer with 117 * "mount_null /usr/include /dev/layer/null". 118 * Changing directory to /dev/layer/null will assign 119 * the root null-node (which was created when the null layer was mounted). 120 * Now consider opening "sys". A vop_lookup would be 121 * done on the root null-node. This operation would bypass through 122 * to the lower layer which would return a vnode representing 123 * the UFS "sys". Null_bypass then builds a null-node 124 * aliasing the UFS "sys" and returns this to the caller. 125 * Later operations on the null-node "sys" will repeat this 126 * process when constructing other vnode stacks. 127 * 128 * 129 * CREATING OTHER FILE SYSTEM LAYERS 130 * 131 * One of the easiest ways to construct new file system layers is to make 132 * a copy of the null layer, rename all files and variables, and 133 * then begin modifing the copy. Sed can be used to easily rename 134 * all variables. 135 * 136 * The umap layer is an example of a layer descended from the 137 * null layer. 138 * 139 * 140 * INVOKING OPERATIONS ON LOWER LAYERS 141 * 142 * There are two techniques to invoke operations on a lower layer 143 * when the operation cannot be completely bypassed. Each method 144 * is appropriate in different situations. In both cases, 145 * it is the responsibility of the aliasing layer to make 146 * the operation arguments "correct" for the lower layer 147 * by mapping an vnode arguments to the lower layer. 148 * 149 * The first approach is to call the aliasing layer's bypass routine. 150 * This method is most suitable when you wish to invoke the operation 151 * currently being hanldled on the lower layer. It has the advantage 152 * that the bypass routine already must do argument mapping. 153 * An example of this is null_getattrs in the null layer. 154 * 155 * A second approach is to directly invoked vnode operations on 156 * the lower layer with the VOP_OPERATIONNAME interface. 157 * The advantage of this method is that it is easy to invoke 158 * arbitrary operations on the lower layer. The disadvantage 159 * is that vnodes arguments must be manualy mapped. 160 * 161 */ 162 163 #include <sys/param.h> 164 #include <sys/systm.h> 165 #include <sys/proc.h> 166 #include <sys/time.h> 167 #include <sys/types.h> 168 #include <sys/vnode.h> 169 #include <sys/mount.h> 170 #include <sys/namei.h> 171 #include <sys/malloc.h> 172 #include <sys/buf.h> 173 #include <miscfs/nullfs/null.h> 174 175 176 int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 177 178 /* 179 * This is the 10-Apr-92 bypass routine. 180 * This version has been optimized for speed, throwing away some 181 * safety checks. It should still always work, but it's not as 182 * robust to programmer errors. 183 * Define SAFETY to include some error checking code. 184 * 185 * In general, we map all vnodes going down and unmap them on the way back. 186 * As an exception to this, vnodes can be marked "unmapped" by setting 187 * the Nth bit in operation's vdesc_flags. 188 * 189 * Also, some BSD vnode operations have the side effect of vrele'ing 190 * their arguments. With stacking, the reference counts are held 191 * by the upper node, not the lower one, so we must handle these 192 * side-effects here. This is not of concern in Sun-derived systems 193 * since there are no such side-effects. 194 * 195 * This makes the following assumptions: 196 * - only one returned vpp 197 * - no INOUT vpp's (Sun's vop_open has one of these) 198 * - the vnode operation vector of the first vnode should be used 199 * to determine what implementation of the op should be invoked 200 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 201 * problems on rmdir'ing mount points and renaming?) 202 */ 203 int 204 null_bypass(ap) 205 struct vop_generic_args /* { 206 struct vnodeop_desc *a_desc; 207 <other random data follows, presumably> 208 } */ *ap; 209 { 210 register struct vnode **this_vp_p; 211 int error; 212 struct vnode *old_vps[VDESC_MAX_VPS]; 213 struct vnode **vps_p[VDESC_MAX_VPS]; 214 struct vnode ***vppp; 215 struct vnodeop_desc *descp = ap->a_desc; 216 int reles, i; 217 218 if (null_bug_bypass) 219 printf ("null_bypass: %s\n", descp->vdesc_name); 220 221 #ifdef SAFETY 222 /* 223 * We require at least one vp. 224 */ 225 if (descp->vdesc_vp_offsets == NULL || 226 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 227 panic ("null_bypass: no vp's in map.\n"); 228 #endif 229 230 /* 231 * Map the vnodes going in. 232 * Later, we'll invoke the operation based on 233 * the first mapped vnode's operation vector. 234 */ 235 reles = descp->vdesc_flags; 236 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 237 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 238 break; /* bail out at end of list */ 239 vps_p[i] = this_vp_p = 240 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 241 /* 242 * We're not guaranteed that any but the first vnode 243 * are of our type. Check for and don't map any 244 * that aren't. (We must always map first vp or vclean fails.) 245 */ 246 if (i && (*this_vp_p)->v_op != null_vnodeop_p) { 247 old_vps[i] = NULL; 248 } else { 249 old_vps[i] = *this_vp_p; 250 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); 251 /* 252 * XXX - Several operations have the side effect 253 * of vrele'ing their vp's. We must account for 254 * that. (This should go away in the future.) 255 */ 256 if (reles & 1) 257 VREF(*this_vp_p); 258 } 259 260 } 261 262 /* 263 * Call the operation on the lower layer 264 * with the modified argument structure. 265 */ 266 error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap); 267 268 /* 269 * Maintain the illusion of call-by-value 270 * by restoring vnodes in the argument structure 271 * to their original value. 272 */ 273 reles = descp->vdesc_flags; 274 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 275 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 276 break; /* bail out at end of list */ 277 if (old_vps[i]) { 278 *(vps_p[i]) = old_vps[i]; 279 if (reles & 1) 280 vrele(*(vps_p[i])); 281 } 282 } 283 284 /* 285 * Map the possible out-going vpp 286 * (Assumes that the lower layer always returns 287 * a VREF'ed vpp unless it gets an error.) 288 */ 289 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 290 !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 291 !error) { 292 /* 293 * XXX - even though some ops have vpp returned vp's, 294 * several ops actually vrele this before returning. 295 * We must avoid these ops. 296 * (This should go away when these ops are regularized.) 297 */ 298 if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 299 goto out; 300 vppp = VOPARG_OFFSETTO(struct vnode***, 301 descp->vdesc_vpp_offset,ap); 302 error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp); 303 } 304 305 out: 306 return (error); 307 } 308 309 310 /* 311 * We handle getattr only to change the fsid. 312 */ 313 int 314 null_getattr(ap) 315 struct vop_getattr_args /* { 316 struct vnode *a_vp; 317 struct vattr *a_vap; 318 struct ucred *a_cred; 319 struct proc *a_p; 320 } */ *ap; 321 { 322 int error; 323 if (error = null_bypass(ap)) 324 return (error); 325 /* Requires that arguments be restored. */ 326 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 327 return (0); 328 } 329 330 331 int 332 null_inactive(ap) 333 struct vop_inactive_args /* { 334 struct vnode *a_vp; 335 } */ *ap; 336 { 337 /* 338 * Do nothing (and _don't_ bypass). 339 * Wait to vrele lowervp until reclaim, 340 * so that until then our null_node is in the 341 * cache and reusable. 342 * 343 * NEEDSWORK: Someday, consider inactive'ing 344 * the lowervp and then trying to reactivate it 345 * with capabilities (v_id) 346 * like they do in the name lookup cache code. 347 * That's too much work for now. 348 */ 349 return (0); 350 } 351 352 int 353 null_reclaim(ap) 354 struct vop_reclaim_args /* { 355 struct vnode *a_vp; 356 } */ *ap; 357 { 358 struct vnode *vp = ap->a_vp; 359 struct null_node *xp = VTONULL(vp); 360 struct vnode *lowervp = xp->null_lowervp; 361 362 /* 363 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p, 364 * so we can't call VOPs on ourself. 365 */ 366 /* After this assignment, this node will not be re-used. */ 367 xp->null_lowervp = NULL; 368 remque(xp); 369 FREE(vp->v_data, M_TEMP); 370 vp->v_data = NULL; 371 vrele (lowervp); 372 return (0); 373 } 374 375 376 int 377 null_print(ap) 378 struct vop_print_args /* { 379 struct vnode *a_vp; 380 } */ *ap; 381 { 382 register struct vnode *vp = ap->a_vp; 383 printf ("\ttag VT_NULLFS, vp=%x, lowervp=%x\n", vp, NULLVPTOLOWERVP(vp)); 384 return (0); 385 } 386 387 388 /* 389 * XXX - vop_strategy must be hand coded because it has no 390 * vnode in its arguments. 391 * This goes away with a merged VM/buffer cache. 392 */ 393 int 394 null_strategy(ap) 395 struct vop_strategy_args /* { 396 struct buf *a_bp; 397 } */ *ap; 398 { 399 struct buf *bp = ap->a_bp; 400 int error; 401 struct vnode *savedvp; 402 403 savedvp = bp->b_vp; 404 bp->b_vp = NULLVPTOLOWERVP(bp->b_vp); 405 406 error = VOP_STRATEGY(bp); 407 408 bp->b_vp = savedvp; 409 410 return (error); 411 } 412 413 414 /* 415 * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no 416 * vnode in its arguments. 417 * This goes away with a merged VM/buffer cache. 418 */ 419 int 420 null_bwrite(ap) 421 struct vop_bwrite_args /* { 422 struct buf *a_bp; 423 } */ *ap; 424 { 425 struct buf *bp = ap->a_bp; 426 int error; 427 struct vnode *savedvp; 428 429 savedvp = bp->b_vp; 430 bp->b_vp = NULLVPTOLOWERVP(bp->b_vp); 431 432 error = VOP_BWRITE(bp); 433 434 bp->b_vp = savedvp; 435 436 return (error); 437 } 438 439 /* 440 * Global vfs data structures 441 */ 442 int (**null_vnodeop_p)(); 443 struct vnodeopv_entry_desc null_vnodeop_entries[] = { 444 { &vop_default_desc, null_bypass }, 445 446 { &vop_getattr_desc, null_getattr }, 447 { &vop_inactive_desc, null_inactive }, 448 { &vop_reclaim_desc, null_reclaim }, 449 { &vop_print_desc, null_print }, 450 451 { &vop_strategy_desc, null_strategy }, 452 { &vop_bwrite_desc, null_bwrite }, 453 454 { (struct vnodeop_desc*)NULL, (int(*)())NULL } 455 }; 456 struct vnodeopv_desc null_vnodeop_opv_desc = 457 { &null_vnodeop_p, null_vnodeop_entries }; 458