1 /*- 2 * Copyright (c) 2001 Dag-Erling Co�dan Sm�rgrav 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer 10 * in this position and unchanged. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/kernel.h> 33 #include <sys/systm.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/sysctl.h> 39 #include <sys/vnode.h> 40 41 #include <fs/pseudofs/pseudofs.h> 42 #include <fs/pseudofs/pseudofs_internal.h> 43 44 static MALLOC_DEFINE(M_PFSVNCACHE, "pfs_vncache", "pseudofs vnode cache"); 45 46 static struct mtx pfs_vncache_mutex; 47 static struct pfs_vdata *pfs_vncache; 48 static void pfs_exit(struct proc *p); 49 50 SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW, 0, 51 "pseudofs vnode cache"); 52 53 static int pfs_vncache_entries; 54 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, entries, CTLFLAG_RD, 55 &pfs_vncache_entries, 0, 56 "number of entries in the vnode cache"); 57 58 static int pfs_vncache_maxentries; 59 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, maxentries, CTLFLAG_RD, 60 &pfs_vncache_maxentries, 0, 61 "highest number of entries in the vnode cache"); 62 63 static int pfs_vncache_hits; 64 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, hits, CTLFLAG_RD, 65 &pfs_vncache_hits, 0, 66 "number of cache hits since initialization"); 67 68 static int pfs_vncache_misses; 69 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, misses, CTLFLAG_RD, 70 &pfs_vncache_misses, 0, 71 "number of cache misses since initialization"); 72 73 extern vop_t **pfs_vnodeop_p; 74 75 /* 76 * Initialize vnode cache 77 */ 78 void 79 pfs_vncache_load(void) 80 { 81 mtx_init(&pfs_vncache_mutex, "pseudofs_vncache", NULL, 82 MTX_DEF | MTX_RECURSE); 83 /* XXX at_exit() can fail with ENOMEN */ 84 at_exit(pfs_exit); 85 } 86 87 /* 88 * Tear down vnode cache 89 */ 90 void 91 pfs_vncache_unload(void) 92 { 93 rm_at_exit(pfs_exit); 94 if (pfs_vncache_entries != 0) 95 printf("pfs_vncache_unload(): %d entries remaining\n", 96 pfs_vncache_entries); 97 mtx_destroy(&pfs_vncache_mutex); 98 } 99 100 /* 101 * Allocate a vnode 102 */ 103 int 104 pfs_vncache_alloc(struct mount *mp, struct vnode **vpp, 105 struct pfs_node *pn, pid_t pid) 106 { 107 struct pfs_vdata *pvd; 108 int error; 109 110 /* 111 * See if the vnode is in the cache. 112 * XXX linear search is not very efficient. 113 */ 114 mtx_lock(&pfs_vncache_mutex); 115 for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) { 116 if (pvd->pvd_pn == pn && pvd->pvd_pid == pid && 117 pvd->pvd_vnode->v_mount == mp) { 118 if (vget(pvd->pvd_vnode, 0, curthread) == 0) { 119 ++pfs_vncache_hits; 120 *vpp = pvd->pvd_vnode; 121 mtx_unlock(&pfs_vncache_mutex); 122 /* XXX see comment at top of pfs_lookup() */ 123 cache_purge(*vpp); 124 vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, 125 curthread); 126 return (0); 127 } 128 /* XXX if this can happen, we're in trouble */ 129 break; 130 } 131 } 132 mtx_unlock(&pfs_vncache_mutex); 133 ++pfs_vncache_misses; 134 135 /* nope, get a new one */ 136 MALLOC(pvd, struct pfs_vdata *, sizeof *pvd, M_PFSVNCACHE, M_WAITOK); 137 if (++pfs_vncache_entries > pfs_vncache_maxentries) 138 pfs_vncache_maxentries = pfs_vncache_entries; 139 error = getnewvnode("pseudofs", mp, pfs_vnodeop_p, vpp); 140 if (error) { 141 FREE(pvd, M_PFSVNCACHE); 142 return (error); 143 } 144 pvd->pvd_pn = pn; 145 pvd->pvd_pid = pid; 146 (*vpp)->v_data = pvd; 147 switch (pn->pn_type) { 148 case pfstype_root: 149 (*vpp)->v_vflag = VV_ROOT; 150 #if 0 151 printf("root vnode allocated\n"); 152 #endif 153 /* fall through */ 154 case pfstype_dir: 155 case pfstype_this: 156 case pfstype_parent: 157 case pfstype_procdir: 158 (*vpp)->v_type = VDIR; 159 break; 160 case pfstype_file: 161 (*vpp)->v_type = VREG; 162 break; 163 case pfstype_symlink: 164 (*vpp)->v_type = VLNK; 165 break; 166 case pfstype_none: 167 KASSERT(0, ("pfs_vncache_alloc called for null node\n")); 168 default: 169 panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type); 170 } 171 /* 172 * Propagate flag through to vnode so users know it can change 173 * if the process changes (i.e. execve) 174 */ 175 if ((pn->pn_flags & PFS_PROCDEP) != 0) 176 (*vpp)->v_vflag |= VV_PROCDEP; 177 pvd->pvd_vnode = *vpp; 178 mtx_lock(&pfs_vncache_mutex); 179 pvd->pvd_prev = NULL; 180 pvd->pvd_next = pfs_vncache; 181 if (pvd->pvd_next) 182 pvd->pvd_next->pvd_prev = pvd; 183 pfs_vncache = pvd; 184 mtx_unlock(&pfs_vncache_mutex); 185 (*vpp)->v_vnlock->lk_flags |= LK_CANRECURSE; 186 vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread); 187 return (0); 188 } 189 190 /* 191 * Free a vnode 192 */ 193 int 194 pfs_vncache_free(struct vnode *vp) 195 { 196 struct pfs_vdata *pvd; 197 198 cache_purge(vp); 199 200 mtx_lock(&pfs_vncache_mutex); 201 pvd = (struct pfs_vdata *)vp->v_data; 202 KASSERT(pvd != NULL, ("pfs_vncache_free(): no vnode data\n")); 203 if (pvd->pvd_next) 204 pvd->pvd_next->pvd_prev = pvd->pvd_prev; 205 if (pvd->pvd_prev) 206 pvd->pvd_prev->pvd_next = pvd->pvd_next; 207 else 208 pfs_vncache = pvd->pvd_next; 209 mtx_unlock(&pfs_vncache_mutex); 210 211 --pfs_vncache_entries; 212 FREE(pvd, M_PFSVNCACHE); 213 vp->v_data = NULL; 214 return (0); 215 } 216 217 /* 218 * Free all vnodes associated with a defunct process 219 */ 220 static void 221 pfs_exit(struct proc *p) 222 { 223 struct pfs_vdata *pvd, *prev; 224 225 mtx_lock(&pfs_vncache_mutex); 226 /* 227 * The double loop is necessary because vgone() indirectly 228 * calls pfs_vncache_free() which frees pvd, so we have to 229 * backtrace one step every time we free a vnode. 230 */ 231 /* XXX linear search... not very efficient */ 232 for (pvd = pfs_vncache; pvd != NULL; pvd = pvd->pvd_next) { 233 while (pvd != NULL && pvd->pvd_pid == p->p_pid) { 234 prev = pvd->pvd_prev; 235 vgone(pvd->pvd_vnode); 236 pvd = prev ? prev->pvd_next : pfs_vncache; 237 } 238 if (pvd == NULL) 239 break; 240 } 241 mtx_unlock(&pfs_vncache_mutex); 242 } 243 244 /* 245 * Disable a pseudofs node, and free all vnodes associated with it 246 */ 247 int 248 pfs_disable(struct pfs_node *pn) 249 { 250 struct pfs_vdata *pvd, *prev; 251 252 if (pn->pn_flags & PFS_DISABLED) 253 return (0); 254 mtx_lock(&pfs_vncache_mutex); 255 pn->pn_flags |= PFS_DISABLED; 256 /* see the comment about the double loop in pfs_exit() */ 257 /* XXX linear search... not very efficient */ 258 for (pvd = pfs_vncache; pvd != NULL; pvd = pvd->pvd_next) { 259 while (pvd != NULL && pvd->pvd_pn == pn) { 260 prev = pvd->pvd_prev; 261 vgone(pvd->pvd_vnode); 262 pvd = prev ? prev->pvd_next : pfs_vncache; 263 } 264 if (pvd == NULL) 265 break; 266 } 267 mtx_unlock(&pfs_vncache_mutex); 268 return (0); 269 } 270 271 /* 272 * Re-enable a disabled pseudofs node 273 */ 274 int 275 pfs_enable(struct pfs_node *pn) 276 { 277 pn->pn_flags &= ~PFS_DISABLED; 278 return (0); 279 } 280