1 /* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * John Heidemann of the UCLA Ficus project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)null_vnops.c 8.1 (Berkeley) 6/10/93 37 * 38 * $Id: null_vnops.c,v 1.5 1994/09/21 23:22:41 wollman Exp $ 39 */ 40 41 /* 42 * Null Layer 43 * 44 * (See mount_null(8) for more information.) 45 * 46 * The null layer duplicates a portion of the file system 47 * name space under a new name. In this respect, it is 48 * similar to the loopback file system. It differs from 49 * the loopback fs in two respects: it is implemented using 50 * a stackable layers techniques, and it's "null-node"s stack above 51 * all lower-layer vnodes, not just over directory vnodes. 52 * 53 * The null layer has two purposes. First, it serves as a demonstration 54 * of layering by proving a layer which does nothing. (It actually 55 * does everything the loopback file system does, which is slightly 56 * more than nothing.) Second, the null layer can serve as a prototype 57 * layer. Since it provides all necessary layer framework, 58 * new file system layers can be created very easily be starting 59 * with a null layer. 60 * 61 * The remainder of this man page examines the null layer as a basis 62 * for constructing new layers. 63 * 64 * 65 * INSTANTIATING NEW NULL LAYERS 66 * 67 * New null layers are created with mount_null(8). 68 * Mount_null(8) takes two arguments, the pathname 69 * of the lower vfs (target-pn) and the pathname where the null 70 * layer will appear in the namespace (alias-pn). After 71 * the null layer is put into place, the contents 72 * of target-pn subtree will be aliased under alias-pn. 73 * 74 * 75 * OPERATION OF A NULL LAYER 76 * 77 * The null layer is the minimum file system layer, 78 * simply bypassing all possible operations to the lower layer 79 * for processing there. The majority of its activity centers 80 * on the bypass routine, though which nearly all vnode operations 81 * pass. 82 * 83 * The bypass routine accepts arbitrary vnode operations for 84 * handling by the lower layer. It begins by examing vnode 85 * operation arguments and replacing any null-nodes by their 86 * lower-layer equivlants. It then invokes the operation 87 * on the lower layer. Finally, it replaces the null-nodes 88 * in the arguments and, if a vnode is return by the operation, 89 * stacks a null-node on top of the returned vnode. 90 * 91 * Although bypass handles most operations, 92 * vop_getattr, _inactive, _reclaim, and _print are not bypassed. 93 * Vop_getattr must change the fsid being returned. 94 * Vop_inactive and vop_reclaim are not bypassed so that 95 * they can handle freeing null-layer specific data. 96 * Vop_print is not bypassed to avoid excessive debugging 97 * information. 98 * 99 * 100 * INSTANTIATING VNODE STACKS 101 * 102 * Mounting associates the null layer with a lower layer, 103 * effect stacking two VFSes. Vnode stacks are instead 104 * created on demand as files are accessed. 105 * 106 * The initial mount creates a single vnode stack for the 107 * root of the new null layer. All other vnode stacks 108 * are created as a result of vnode operations on 109 * this or other null vnode stacks. 110 * 111 * New vnode stacks come into existance as a result of 112 * an operation which returns a vnode. 113 * The bypass routine stacks a null-node above the new 114 * vnode before returning it to the caller. 115 * 116 * For example, imagine mounting a null layer with 117 * "mount_null /usr/include /dev/layer/null". 118 * Changing directory to /dev/layer/null will assign 119 * the root null-node (which was created when the null layer was mounted). 120 * Now consider opening "sys". A vop_lookup would be 121 * done on the root null-node. This operation would bypass through 122 * to the lower layer which would return a vnode representing 123 * the UFS "sys". Null_bypass then builds a null-node 124 * aliasing the UFS "sys" and returns this to the caller. 125 * Later operations on the null-node "sys" will repeat this 126 * process when constructing other vnode stacks. 127 * 128 * 129 * CREATING OTHER FILE SYSTEM LAYERS 130 * 131 * One of the easiest ways to construct new file system layers is to make 132 * a copy of the null layer, rename all files and variables, and 133 * then begin modifing the copy. Sed can be used to easily rename 134 * all variables. 135 * 136 * The umap layer is an example of a layer descended from the 137 * null layer. 138 * 139 * 140 * INVOKING OPERATIONS ON LOWER LAYERS 141 * 142 * There are two techniques to invoke operations on a lower layer 143 * when the operation cannot be completely bypassed. Each method 144 * is appropriate in different situations. In both cases, 145 * it is the responsibility of the aliasing layer to make 146 * the operation arguments "correct" for the lower layer 147 * by mapping an vnode arguments to the lower layer. 148 * 149 * The first approach is to call the aliasing layer's bypass routine. 150 * This method is most suitable when you wish to invoke the operation 151 * currently being hanldled on the lower layer. It has the advantage 152 * that the bypass routine already must do argument mapping. 153 * An example of this is null_getattrs in the null layer. 154 * 155 * A second approach is to directly invoked vnode operations on 156 * the lower layer with the VOP_OPERATIONNAME interface. 157 * The advantage of this method is that it is easy to invoke 158 * arbitrary operations on the lower layer. The disadvantage 159 * is that vnodes arguments must be manualy mapped. 160 * 161 */ 162 163 #include <sys/param.h> 164 #include <sys/systm.h> 165 #include <sys/kernel.h> 166 #include <sys/proc.h> 167 #include <sys/time.h> 168 #include <sys/types.h> 169 #include <sys/vnode.h> 170 #include <sys/mount.h> 171 #include <sys/namei.h> 172 #include <sys/malloc.h> 173 #include <sys/buf.h> 174 #include <miscfs/nullfs/null.h> 175 176 177 int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 178 179 /* 180 * This is the 10-Apr-92 bypass routine. 181 * This version has been optimized for speed, throwing away some 182 * safety checks. It should still always work, but it's not as 183 * robust to programmer errors. 184 * Define SAFETY to include some error checking code. 185 * 186 * In general, we map all vnodes going down and unmap them on the way back. 187 * As an exception to this, vnodes can be marked "unmapped" by setting 188 * the Nth bit in operation's vdesc_flags. 189 * 190 * Also, some BSD vnode operations have the side effect of vrele'ing 191 * their arguments. With stacking, the reference counts are held 192 * by the upper node, not the lower one, so we must handle these 193 * side-effects here. This is not of concern in Sun-derived systems 194 * since there are no such side-effects. 195 * 196 * This makes the following assumptions: 197 * - only one returned vpp 198 * - no INOUT vpp's (Sun's vop_open has one of these) 199 * - the vnode operation vector of the first vnode should be used 200 * to determine what implementation of the op should be invoked 201 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 202 * problems on rmdir'ing mount points and renaming?) 203 */ 204 int 205 null_bypass(ap) 206 struct vop_generic_args /* { 207 struct vnodeop_desc *a_desc; 208 <other random data follows, presumably> 209 } */ *ap; 210 { 211 register struct vnode **this_vp_p; 212 int error; 213 struct vnode *old_vps[VDESC_MAX_VPS]; 214 struct vnode **vps_p[VDESC_MAX_VPS]; 215 struct vnode ***vppp; 216 struct vnodeop_desc *descp = ap->a_desc; 217 int reles, i; 218 219 if (null_bug_bypass) 220 printf ("null_bypass: %s\n", descp->vdesc_name); 221 222 #ifdef SAFETY 223 /* 224 * We require at least one vp. 225 */ 226 if (descp->vdesc_vp_offsets == NULL || 227 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 228 panic ("null_bypass: no vp's in map.\n"); 229 #endif 230 231 /* 232 * Map the vnodes going in. 233 * Later, we'll invoke the operation based on 234 * the first mapped vnode's operation vector. 235 */ 236 reles = descp->vdesc_flags; 237 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 238 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 239 break; /* bail out at end of list */ 240 vps_p[i] = this_vp_p = 241 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 242 /* 243 * We're not guaranteed that any but the first vnode 244 * are of our type. Check for and don't map any 245 * that aren't. (We must always map first vp or vclean fails.) 246 */ 247 if (i && (*this_vp_p)->v_op != null_vnodeop_p) { 248 old_vps[i] = NULL; 249 } else { 250 old_vps[i] = *this_vp_p; 251 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); 252 /* 253 * XXX - Several operations have the side effect 254 * of vrele'ing their vp's. We must account for 255 * that. (This should go away in the future.) 256 */ 257 if (reles & 1) 258 VREF(*this_vp_p); 259 } 260 261 } 262 263 /* 264 * Call the operation on the lower layer 265 * with the modified argument structure. 266 */ 267 error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap); 268 269 /* 270 * Maintain the illusion of call-by-value 271 * by restoring vnodes in the argument structure 272 * to their original value. 273 */ 274 reles = descp->vdesc_flags; 275 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 276 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 277 break; /* bail out at end of list */ 278 if (old_vps[i]) { 279 *(vps_p[i]) = old_vps[i]; 280 if (reles & 1) 281 vrele(*(vps_p[i])); 282 } 283 } 284 285 /* 286 * Map the possible out-going vpp 287 * (Assumes that the lower layer always returns 288 * a VREF'ed vpp unless it gets an error.) 289 */ 290 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 291 !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 292 !error) { 293 /* 294 * XXX - even though some ops have vpp returned vp's, 295 * several ops actually vrele this before returning. 296 * We must avoid these ops. 297 * (This should go away when these ops are regularized.) 298 */ 299 if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 300 goto out; 301 vppp = VOPARG_OFFSETTO(struct vnode***, 302 descp->vdesc_vpp_offset,ap); 303 error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp); 304 } 305 306 out: 307 return (error); 308 } 309 310 311 /* 312 * We handle getattr only to change the fsid. 313 */ 314 int 315 null_getattr(ap) 316 struct vop_getattr_args /* { 317 struct vnode *a_vp; 318 struct vattr *a_vap; 319 struct ucred *a_cred; 320 struct proc *a_p; 321 } */ *ap; 322 { 323 int error; 324 error = null_bypass(ap); 325 if (error) 326 return (error); 327 /* Requires that arguments be restored. */ 328 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 329 return (0); 330 } 331 332 333 int 334 null_inactive(ap) 335 struct vop_inactive_args /* { 336 struct vnode *a_vp; 337 } */ *ap; 338 { 339 /* 340 * Do nothing (and _don't_ bypass). 341 * Wait to vrele lowervp until reclaim, 342 * so that until then our null_node is in the 343 * cache and reusable. 344 * 345 * NEEDSWORK: Someday, consider inactive'ing 346 * the lowervp and then trying to reactivate it 347 * with capabilities (v_id) 348 * like they do in the name lookup cache code. 349 * That's too much work for now. 350 */ 351 return (0); 352 } 353 354 int 355 null_reclaim(ap) 356 struct vop_reclaim_args /* { 357 struct vnode *a_vp; 358 } */ *ap; 359 { 360 struct vnode *vp = ap->a_vp; 361 struct null_node *xp = VTONULL(vp); 362 struct vnode *lowervp = xp->null_lowervp; 363 364 /* 365 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p, 366 * so we can't call VOPs on ourself. 367 */ 368 /* After this assignment, this node will not be re-used. */ 369 xp->null_lowervp = NULL; 370 remque(xp); 371 FREE(vp->v_data, M_TEMP); 372 vp->v_data = NULL; 373 vrele (lowervp); 374 return (0); 375 } 376 377 378 int 379 null_print(ap) 380 struct vop_print_args /* { 381 struct vnode *a_vp; 382 } */ *ap; 383 { 384 register struct vnode *vp = ap->a_vp; 385 printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp)); 386 return (0); 387 } 388 389 390 /* 391 * XXX - vop_strategy must be hand coded because it has no 392 * vnode in its arguments. 393 * This goes away with a merged VM/buffer cache. 394 */ 395 int 396 null_strategy(ap) 397 struct vop_strategy_args /* { 398 struct buf *a_bp; 399 } */ *ap; 400 { 401 struct buf *bp = ap->a_bp; 402 int error; 403 struct vnode *savedvp; 404 405 savedvp = bp->b_vp; 406 bp->b_vp = NULLVPTOLOWERVP(bp->b_vp); 407 408 error = VOP_STRATEGY(bp); 409 410 bp->b_vp = savedvp; 411 412 return (error); 413 } 414 415 416 /* 417 * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no 418 * vnode in its arguments. 419 * This goes away with a merged VM/buffer cache. 420 */ 421 int 422 null_bwrite(ap) 423 struct vop_bwrite_args /* { 424 struct buf *a_bp; 425 } */ *ap; 426 { 427 struct buf *bp = ap->a_bp; 428 int error; 429 struct vnode *savedvp; 430 431 savedvp = bp->b_vp; 432 bp->b_vp = NULLVPTOLOWERVP(bp->b_vp); 433 434 error = VOP_BWRITE(bp); 435 436 bp->b_vp = savedvp; 437 438 return (error); 439 } 440 441 /* 442 * Global vfs data structures 443 */ 444 int (**null_vnodeop_p)(); 445 struct vnodeopv_entry_desc null_vnodeop_entries[] = { 446 { &vop_default_desc, null_bypass }, 447 448 { &vop_getattr_desc, null_getattr }, 449 { &vop_inactive_desc, null_inactive }, 450 { &vop_reclaim_desc, null_reclaim }, 451 { &vop_print_desc, null_print }, 452 453 { &vop_strategy_desc, null_strategy }, 454 { &vop_bwrite_desc, null_bwrite }, 455 456 { (struct vnodeop_desc*)NULL, (int(*)())NULL } 457 }; 458 struct vnodeopv_desc null_vnodeop_opv_desc = 459 { &null_vnodeop_p, null_vnodeop_entries }; 460 461 VNODEOP_SET(null_vnodeop_opv_desc); 462