1d167cf6fSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1992, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software donated to Berkeley by 6df8bae1dSRodney W. Grimes * Jan-Simon Pendry. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 17df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 18df8bae1dSRodney W. Grimes * without specific prior written permission. 19df8bae1dSRodney W. Grimes * 20df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30df8bae1dSRodney W. Grimes * SUCH DAMAGE. 31df8bae1dSRodney W. Grimes * 32996c772fSJohn Dyson * @(#)null_subr.c 8.7 (Berkeley) 5/14/95 33df8bae1dSRodney W. Grimes * 34c3aac50fSPeter Wemm * $FreeBSD$ 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 37df8bae1dSRodney W. Grimes #include <sys/param.h> 38df8bae1dSRodney W. Grimes #include <sys/systm.h> 398da80660SBoris Popov #include <sys/kernel.h> 40fb919e4dSMark Murray #include <sys/lock.h> 4115420031SSemen Ustimenko #include <sys/mutex.h> 42df8bae1dSRodney W. Grimes #include <sys/malloc.h> 43fb919e4dSMark Murray #include <sys/mount.h> 44fb919e4dSMark Murray #include <sys/proc.h> 45fb919e4dSMark Murray #include <sys/vnode.h> 46fb919e4dSMark Murray 4799d300a1SRuslan Ermilov #include <fs/nullfs/null.h> 48df8bae1dSRodney W. Grimes 4954939875STim J. Robbins #define LOG2_SIZEVNODE 8 /* log2(sizeof struct vnode) */ 50df8bae1dSRodney W. Grimes #define NNULLNODECACHE 16 51df8bae1dSRodney W. Grimes 52df8bae1dSRodney W. Grimes /* 53df8bae1dSRodney W. Grimes * Null layer cache: 54df8bae1dSRodney W. Grimes * Each cache entry holds a reference to the lower vnode 55df8bae1dSRodney W. Grimes * along with a pointer to the alias vnode. When an 56df8bae1dSRodney W. Grimes * entry is added the lower vnode is VREF'd. When the 57df8bae1dSRodney W. Grimes * alias is removed the lower vnode is vrele'd. 58df8bae1dSRodney W. Grimes */ 59df8bae1dSRodney W. Grimes 60996c772fSJohn Dyson #define NULL_NHASH(vp) \ 61a23d65bfSBruce Evans (&null_node_hashtbl[(((uintptr_t)vp)>>LOG2_SIZEVNODE) & null_node_hash]) 628da80660SBoris Popov 63e3975643SJake Burkholder static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl; 64303b270bSEivind Eklund static u_long null_node_hash; 6515420031SSemen Ustimenko struct mtx null_hashmtx; 668da80660SBoris Popov 678da80660SBoris Popov static MALLOC_DEFINE(M_NULLFSHASH, "NULLFS hash", "NULLFS hash table"); 688da80660SBoris Popov MALLOC_DEFINE(M_NULLFSNODE, "NULLFS node", "NULLFS vnode private part"); 69df8bae1dSRodney W. Grimes 7054939875STim J. Robbins static struct vnode * null_hashget(struct mount *, struct vnode *); 7154939875STim J. Robbins static struct vnode * null_hashins(struct mount *, struct null_node *); 729b5e8b3aSBruce Evans 73df8bae1dSRodney W. Grimes /* 74df8bae1dSRodney W. Grimes * Initialise cache headers 75df8bae1dSRodney W. Grimes */ 7626f9a767SRodney W. Grimes int 77996c772fSJohn Dyson nullfs_init(vfsp) 78996c772fSJohn Dyson struct vfsconf *vfsp; 79df8bae1dSRodney W. Grimes { 80996c772fSJohn Dyson 818da80660SBoris Popov NULLFSDEBUG("nullfs_init\n"); /* printed during system boot */ 828da80660SBoris Popov null_node_hashtbl = hashinit(NNULLNODECACHE, M_NULLFSHASH, &null_node_hash); 8315420031SSemen Ustimenko mtx_init(&null_hashmtx, "nullhs", NULL, MTX_DEF); 848da80660SBoris Popov return (0); 858da80660SBoris Popov } 868da80660SBoris Popov 878da80660SBoris Popov int 888da80660SBoris Popov nullfs_uninit(vfsp) 898da80660SBoris Popov struct vfsconf *vfsp; 908da80660SBoris Popov { 918da80660SBoris Popov 9215420031SSemen Ustimenko mtx_destroy(&null_hashmtx); 938da80660SBoris Popov free(null_node_hashtbl, M_NULLFSHASH); 9426f9a767SRodney W. Grimes return (0); 95df8bae1dSRodney W. Grimes } 96df8bae1dSRodney W. Grimes 97df8bae1dSRodney W. Grimes /* 98df8bae1dSRodney W. Grimes * Return a VREF'ed alias for lower vnode if already exists, else 0. 994451405fSBoris Popov * Lower vnode should be locked on entry and will be left locked on exit. 100df8bae1dSRodney W. Grimes */ 101df8bae1dSRodney W. Grimes static struct vnode * 10254939875STim J. Robbins null_hashget(mp, lowervp) 10354939875STim J. Robbins struct mount *mp; 104df8bae1dSRodney W. Grimes struct vnode *lowervp; 105df8bae1dSRodney W. Grimes { 106b40ce416SJulian Elischer struct thread *td = curthread; /* XXX */ 107996c772fSJohn Dyson struct null_node_hashhead *hd; 108df8bae1dSRodney W. Grimes struct null_node *a; 109df8bae1dSRodney W. Grimes struct vnode *vp; 110df8bae1dSRodney W. Grimes 111df8bae1dSRodney W. Grimes /* 112df8bae1dSRodney W. Grimes * Find hash base, and then search the (two-way) linked 113df8bae1dSRodney W. Grimes * list looking for a null_node structure which is referencing 114df8bae1dSRodney W. Grimes * the lower vnode. If found, the increment the null_node 115df8bae1dSRodney W. Grimes * reference count (but NOT the lower vnode's VREF counter). 116df8bae1dSRodney W. Grimes */ 117996c772fSJohn Dyson hd = NULL_NHASH(lowervp); 118df8bae1dSRodney W. Grimes loop: 11915420031SSemen Ustimenko mtx_lock(&null_hashmtx); 120fc2ffbe6SPoul-Henning Kamp LIST_FOREACH(a, hd, null_hash) { 12154939875STim J. Robbins if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) { 122df8bae1dSRodney W. Grimes vp = NULLTOV(a); 12315420031SSemen Ustimenko mtx_lock(&vp->v_interlock); 12454939875STim J. Robbins /* 12554939875STim J. Robbins * Don't block if nullfs vnode is being recycled. 12654939875STim J. Robbins * We already hold a lock on the lower vnode, thus 12754939875STim J. Robbins * waiting might deadlock against the thread 12854939875STim J. Robbins * recycling the nullfs vnode or another thread 12954939875STim J. Robbins * in vrele() waiting for the vnode lock. 13054939875STim J. Robbins */ 13154939875STim J. Robbins if ((vp->v_iflag & VI_XLOCK) != 0) { 13254939875STim J. Robbins VI_UNLOCK(vp); 13354939875STim J. Robbins continue; 13454939875STim J. Robbins } 13515420031SSemen Ustimenko mtx_unlock(&null_hashmtx); 136df8bae1dSRodney W. Grimes /* 137df8bae1dSRodney W. Grimes * We need vget for the VXLOCK 138df8bae1dSRodney W. Grimes * stuff, but we don't want to lock 139df8bae1dSRodney W. Grimes * the lower node. 140df8bae1dSRodney W. Grimes */ 14115420031SSemen Ustimenko if (vget(vp, LK_EXCLUSIVE | LK_THISLAYER | LK_INTERLOCK, td)) 142df8bae1dSRodney W. Grimes goto loop; 14315420031SSemen Ustimenko 144df8bae1dSRodney W. Grimes return (vp); 145df8bae1dSRodney W. Grimes } 146df8bae1dSRodney W. Grimes } 14715420031SSemen Ustimenko mtx_unlock(&null_hashmtx); 1481cfdefbbSSemen Ustimenko return (NULLVP); 149df8bae1dSRodney W. Grimes } 150df8bae1dSRodney W. Grimes 1511cfdefbbSSemen Ustimenko /* 1521cfdefbbSSemen Ustimenko * Act like null_hashget, but add passed null_node to hash if no existing 1531cfdefbbSSemen Ustimenko * node found. 1541cfdefbbSSemen Ustimenko */ 1551cfdefbbSSemen Ustimenko static struct vnode * 15654939875STim J. Robbins null_hashins(mp, xp) 15754939875STim J. Robbins struct mount *mp; 1581cfdefbbSSemen Ustimenko struct null_node *xp; 1591cfdefbbSSemen Ustimenko { 1601cfdefbbSSemen Ustimenko struct thread *td = curthread; /* XXX */ 1611cfdefbbSSemen Ustimenko struct null_node_hashhead *hd; 1621cfdefbbSSemen Ustimenko struct null_node *oxp; 1631cfdefbbSSemen Ustimenko struct vnode *ovp; 1641cfdefbbSSemen Ustimenko 1651cfdefbbSSemen Ustimenko hd = NULL_NHASH(xp->null_lowervp); 1661cfdefbbSSemen Ustimenko loop: 1671cfdefbbSSemen Ustimenko mtx_lock(&null_hashmtx); 1681cfdefbbSSemen Ustimenko LIST_FOREACH(oxp, hd, null_hash) { 16954939875STim J. Robbins if (oxp->null_lowervp == xp->null_lowervp && 17054939875STim J. Robbins NULLTOV(oxp)->v_mount == mp) { 1711cfdefbbSSemen Ustimenko ovp = NULLTOV(oxp); 1721cfdefbbSSemen Ustimenko mtx_lock(&ovp->v_interlock); 17354939875STim J. Robbins /* 17454939875STim J. Robbins * Don't block if nullfs vnode is being recycled. 17554939875STim J. Robbins * We already hold a lock on the lower vnode, thus 17654939875STim J. Robbins * waiting might deadlock against the thread 17754939875STim J. Robbins * recycling the nullfs vnode or another thread 17854939875STim J. Robbins * in vrele() waiting for the vnode lock. 17954939875STim J. Robbins */ 18054939875STim J. Robbins if ((ovp->v_iflag & VI_XLOCK) != 0) { 18154939875STim J. Robbins VI_UNLOCK(ovp); 18254939875STim J. Robbins continue; 18354939875STim J. Robbins } 1841cfdefbbSSemen Ustimenko mtx_unlock(&null_hashmtx); 1851cfdefbbSSemen Ustimenko if (vget(ovp, LK_EXCLUSIVE | LK_THISLAYER | LK_INTERLOCK, td)) 1861cfdefbbSSemen Ustimenko goto loop; 1871cfdefbbSSemen Ustimenko 1881cfdefbbSSemen Ustimenko return (ovp); 1891cfdefbbSSemen Ustimenko } 1901cfdefbbSSemen Ustimenko } 1911cfdefbbSSemen Ustimenko LIST_INSERT_HEAD(hd, xp, null_hash); 1921cfdefbbSSemen Ustimenko mtx_unlock(&null_hashmtx); 1931cfdefbbSSemen Ustimenko return (NULLVP); 1941cfdefbbSSemen Ustimenko } 195df8bae1dSRodney W. Grimes 196df8bae1dSRodney W. Grimes /* 1971cfdefbbSSemen Ustimenko * Make a new or get existing nullfs node. 1981cfdefbbSSemen Ustimenko * Vp is the alias vnode, lowervp is the lower vnode. 1991cfdefbbSSemen Ustimenko * 2001cfdefbbSSemen Ustimenko * The lowervp assumed to be locked and having "spare" reference. This routine 2011cfdefbbSSemen Ustimenko * vrele lowervp if nullfs node was taken from hash. Otherwise it "transfers" 2021cfdefbbSSemen Ustimenko * the caller's "spare" reference to created nullfs vnode. 203df8bae1dSRodney W. Grimes */ 2041cfdefbbSSemen Ustimenko int 2051cfdefbbSSemen Ustimenko null_nodeget(mp, lowervp, vpp) 206df8bae1dSRodney W. Grimes struct mount *mp; 207df8bae1dSRodney W. Grimes struct vnode *lowervp; 208df8bae1dSRodney W. Grimes struct vnode **vpp; 209df8bae1dSRodney W. Grimes { 210b40ce416SJulian Elischer struct thread *td = curthread; /* XXX */ 211df8bae1dSRodney W. Grimes struct null_node *xp; 2121cfdefbbSSemen Ustimenko struct vnode *vp; 213df8bae1dSRodney W. Grimes int error; 214df8bae1dSRodney W. Grimes 2151cfdefbbSSemen Ustimenko /* Lookup the hash firstly */ 21654939875STim J. Robbins *vpp = null_hashget(mp, lowervp); 2171cfdefbbSSemen Ustimenko if (*vpp != NULL) { 2181cfdefbbSSemen Ustimenko vrele(lowervp); 2191cfdefbbSSemen Ustimenko return (0); 2201cfdefbbSSemen Ustimenko } 2211cfdefbbSSemen Ustimenko 2221cfdefbbSSemen Ustimenko /* 2231cfdefbbSSemen Ustimenko * We do not serialize vnode creation, instead we will check for 2241cfdefbbSSemen Ustimenko * duplicates later, when adding new vnode to hash. 2251cfdefbbSSemen Ustimenko * 2261cfdefbbSSemen Ustimenko * Note that duplicate can only appear in hash if the lowervp is 2271cfdefbbSSemen Ustimenko * locked LK_SHARED. 2281cfdefbbSSemen Ustimenko */ 2291cfdefbbSSemen Ustimenko 2302f9bae59SDavid Greenman /* 2312f9bae59SDavid Greenman * Do the MALLOC before the getnewvnode since doing so afterward 2322f9bae59SDavid Greenman * might cause a bogus v_data pointer to get dereferenced 2332f9bae59SDavid Greenman * elsewhere if MALLOC should block. 2342f9bae59SDavid Greenman */ 2358da80660SBoris Popov MALLOC(xp, struct null_node *, sizeof(struct null_node), 236a163d034SWarner Losh M_NULLFSNODE, M_WAITOK); 2372f9bae59SDavid Greenman 238aec0fb7bSPoul-Henning Kamp error = getnewvnode("null", mp, &null_vnodeops, &vp); 2392f9bae59SDavid Greenman if (error) { 2408da80660SBoris Popov FREE(xp, M_NULLFSNODE); 241df8bae1dSRodney W. Grimes return (error); 2422f9bae59SDavid Greenman } 243df8bae1dSRodney W. Grimes 244df8bae1dSRodney W. Grimes xp->null_vnode = vp; 245df8bae1dSRodney W. Grimes xp->null_lowervp = lowervp; 24654939875STim J. Robbins xp->null_pending_locks = 0; 24754939875STim J. Robbins xp->null_drain_wakeup = 0; 24808720e34SSemen Ustimenko 24908720e34SSemen Ustimenko vp->v_type = lowervp->v_type; 25008720e34SSemen Ustimenko vp->v_data = xp; 25108720e34SSemen Ustimenko 252df8bae1dSRodney W. Grimes /* 2534451405fSBoris Popov * From NetBSD: 2544451405fSBoris Popov * Now lock the new node. We rely on the fact that we were passed 2554451405fSBoris Popov * a locked vnode. If the lower node is exporting a struct lock 2564451405fSBoris Popov * (v_vnlock != NULL) then we just set the upper v_vnlock to the 2574451405fSBoris Popov * lower one, and both are now locked. If the lower node is exporting 2584451405fSBoris Popov * NULL, then we copy that up and manually lock the new vnode. 2594451405fSBoris Popov */ 2604451405fSBoris Popov 2614451405fSBoris Popov vp->v_vnlock = lowervp->v_vnlock; 262b40ce416SJulian Elischer error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_THISLAYER, td); 2634451405fSBoris Popov if (error) 2641cfdefbbSSemen Ustimenko panic("null_nodeget: can't lock new vnode\n"); 265df8bae1dSRodney W. Grimes 266df8bae1dSRodney W. Grimes /* 2671cfdefbbSSemen Ustimenko * Atomically insert our new node into the hash or vget existing 2681cfdefbbSSemen Ustimenko * if someone else has beaten us to it. 269df8bae1dSRodney W. Grimes */ 27054939875STim J. Robbins *vpp = null_hashins(mp, xp); 2711cfdefbbSSemen Ustimenko if (*vpp != NULL) { 2724451405fSBoris Popov vrele(lowervp); 2731cfdefbbSSemen Ustimenko VOP_UNLOCK(vp, LK_THISLAYER, td); 2741cfdefbbSSemen Ustimenko vp->v_vnlock = NULL; 2751cfdefbbSSemen Ustimenko xp->null_lowervp = NULL; 2761cfdefbbSSemen Ustimenko vrele(vp); 277df8bae1dSRodney W. Grimes return (0); 278df8bae1dSRodney W. Grimes } 279e958d078SKATO Takenori 2801cfdefbbSSemen Ustimenko /* 2811cfdefbbSSemen Ustimenko * XXX We take extra vref just to workaround UFS's XXX: 2821cfdefbbSSemen Ustimenko * UFS can vrele() vnode in VOP_CLOSE() in some cases. Luckily, this 2831cfdefbbSSemen Ustimenko * can only happen if v_usecount == 1. To workaround, we just don't 2841cfdefbbSSemen Ustimenko * let v_usecount be 1, it will be 2 or more. 2851cfdefbbSSemen Ustimenko */ 2861cfdefbbSSemen Ustimenko VREF(lowervp); 2871cfdefbbSSemen Ustimenko 2881cfdefbbSSemen Ustimenko *vpp = vp; 2891cfdefbbSSemen Ustimenko 2901cfdefbbSSemen Ustimenko return (0); 2911cfdefbbSSemen Ustimenko } 2921cfdefbbSSemen Ustimenko 2931cfdefbbSSemen Ustimenko /* 2941cfdefbbSSemen Ustimenko * Remove node from hash. 2951cfdefbbSSemen Ustimenko */ 29608720e34SSemen Ustimenko void 29708720e34SSemen Ustimenko null_hashrem(xp) 29808720e34SSemen Ustimenko struct null_node *xp; 29908720e34SSemen Ustimenko { 30008720e34SSemen Ustimenko 30115420031SSemen Ustimenko mtx_lock(&null_hashmtx); 30208720e34SSemen Ustimenko LIST_REMOVE(xp, null_hash); 30315420031SSemen Ustimenko mtx_unlock(&null_hashmtx); 30408720e34SSemen Ustimenko } 30508720e34SSemen Ustimenko 306a0f40f54SBruce Evans #ifdef DIAGNOSTIC 3071bf978ceSKATO Takenori 3084ea4f1f9SMarcel Moolenaar #ifdef KDB 309e958d078SKATO Takenori #define null_checkvp_barrier 1 310e958d078SKATO Takenori #else 311e958d078SKATO Takenori #define null_checkvp_barrier 0 312e958d078SKATO Takenori #endif 313e958d078SKATO Takenori 314df8bae1dSRodney W. Grimes struct vnode * 315df8bae1dSRodney W. Grimes null_checkvp(vp, fil, lno) 316df8bae1dSRodney W. Grimes struct vnode *vp; 317df8bae1dSRodney W. Grimes char *fil; 318df8bae1dSRodney W. Grimes int lno; 319df8bae1dSRodney W. Grimes { 320df8bae1dSRodney W. Grimes struct null_node *a = VTONULL(vp); 321df8bae1dSRodney W. Grimes #ifdef notyet 322df8bae1dSRodney W. Grimes /* 323df8bae1dSRodney W. Grimes * Can't do this check because vop_reclaim runs 324df8bae1dSRodney W. Grimes * with a funny vop vector. 325df8bae1dSRodney W. Grimes */ 326df8bae1dSRodney W. Grimes if (vp->v_op != null_vnodeop_p) { 327df8bae1dSRodney W. Grimes printf ("null_checkvp: on non-null-node\n"); 328df8bae1dSRodney W. Grimes while (null_checkvp_barrier) /*WAIT*/ ; 329df8bae1dSRodney W. Grimes panic("null_checkvp"); 330df8bae1dSRodney W. Grimes }; 331df8bae1dSRodney W. Grimes #endif 332c5e17d9eSKATO Takenori if (a->null_lowervp == NULLVP) { 333df8bae1dSRodney W. Grimes /* Should never happen */ 334df8bae1dSRodney W. Grimes int i; u_long *p; 33589785a16SBruce Evans printf("vp = %p, ZERO ptr\n", (void *)vp); 336df8bae1dSRodney W. Grimes for (p = (u_long *) a, i = 0; i < 8; i++) 33789785a16SBruce Evans printf(" %lx", p[i]); 338df8bae1dSRodney W. Grimes printf("\n"); 339df8bae1dSRodney W. Grimes /* wait for debugger */ 340df8bae1dSRodney W. Grimes while (null_checkvp_barrier) /*WAIT*/ ; 341df8bae1dSRodney W. Grimes panic("null_checkvp"); 342df8bae1dSRodney W. Grimes } 3434d93c0beSJeff Roberson if (vrefcnt(a->null_lowervp) < 1) { 344df8bae1dSRodney W. Grimes int i; u_long *p; 34589785a16SBruce Evans printf("vp = %p, unref'ed lowervp\n", (void *)vp); 346df8bae1dSRodney W. Grimes for (p = (u_long *) a, i = 0; i < 8; i++) 34789785a16SBruce Evans printf(" %lx", p[i]); 348df8bae1dSRodney W. Grimes printf("\n"); 349df8bae1dSRodney W. Grimes /* wait for debugger */ 350df8bae1dSRodney W. Grimes while (null_checkvp_barrier) /*WAIT*/ ; 351df8bae1dSRodney W. Grimes panic ("null with unref'ed lowervp"); 352df8bae1dSRodney W. Grimes }; 353df8bae1dSRodney W. Grimes #ifdef notyet 354df8bae1dSRodney W. Grimes printf("null %x/%d -> %x/%d [%s, %d]\n", 3554d93c0beSJeff Roberson NULLTOV(a), vrefcnt(NULLTOV(a)), 3564d93c0beSJeff Roberson a->null_lowervp, vrefcnt(a->null_lowervp), 357df8bae1dSRodney W. Grimes fil, lno); 358df8bae1dSRodney W. Grimes #endif 359df8bae1dSRodney W. Grimes return a->null_lowervp; 360df8bae1dSRodney W. Grimes } 361df8bae1dSRodney W. Grimes #endif 362