1 /*- 2 * Copyright (c) 2001 Dag-Erling Coïdan Smørgrav 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer 10 * in this position and unchanged. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_pseudofs.h" 33 34 #include <sys/param.h> 35 #include <sys/kernel.h> 36 #include <sys/systm.h> 37 #include <sys/eventhandler.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/sysctl.h> 43 #include <sys/vnode.h> 44 45 #include <fs/pseudofs/pseudofs.h> 46 #include <fs/pseudofs/pseudofs_internal.h> 47 48 static MALLOC_DEFINE(M_PFSVNCACHE, "pfs_vncache", "pseudofs vnode cache"); 49 50 static struct mtx pfs_vncache_mutex; 51 static struct pfs_vdata *pfs_vncache; 52 static eventhandler_tag pfs_exit_tag; 53 static void pfs_exit(void *arg, struct proc *p); 54 static void pfs_purge_locked(struct pfs_node *pn, bool force); 55 56 static SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW, 0, 57 "pseudofs vnode cache"); 58 59 static int pfs_vncache_entries; 60 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, entries, CTLFLAG_RD, 61 &pfs_vncache_entries, 0, 62 "number of entries in the vnode cache"); 63 64 static int pfs_vncache_maxentries; 65 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, maxentries, CTLFLAG_RD, 66 &pfs_vncache_maxentries, 0, 67 "highest number of entries in the vnode cache"); 68 69 static int pfs_vncache_hits; 70 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, hits, CTLFLAG_RD, 71 &pfs_vncache_hits, 0, 72 "number of cache hits since initialization"); 73 74 static int pfs_vncache_misses; 75 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, misses, CTLFLAG_RD, 76 &pfs_vncache_misses, 0, 77 "number of cache misses since initialization"); 78 79 extern struct vop_vector pfs_vnodeops; /* XXX -> .h file */ 80 81 /* 82 * Initialize vnode cache 83 */ 84 void 85 pfs_vncache_load(void) 86 { 87 88 mtx_init(&pfs_vncache_mutex, "pfs_vncache", NULL, MTX_DEF); 89 pfs_exit_tag = EVENTHANDLER_REGISTER(process_exit, pfs_exit, NULL, 90 EVENTHANDLER_PRI_ANY); 91 } 92 93 /* 94 * Tear down vnode cache 95 */ 96 void 97 pfs_vncache_unload(void) 98 { 99 100 EVENTHANDLER_DEREGISTER(process_exit, pfs_exit_tag); 101 mtx_lock(&pfs_vncache_mutex); 102 pfs_purge_locked(NULL, true); 103 mtx_unlock(&pfs_vncache_mutex); 104 KASSERT(pfs_vncache_entries == 0, 105 ("%d vncache entries remaining", pfs_vncache_entries)); 106 mtx_destroy(&pfs_vncache_mutex); 107 } 108 109 /* 110 * Allocate a vnode 111 */ 112 int 113 pfs_vncache_alloc(struct mount *mp, struct vnode **vpp, 114 struct pfs_node *pn, pid_t pid) 115 { 116 struct pfs_vdata *pvd, *pvd2; 117 struct vnode *vp; 118 int error; 119 120 /* 121 * See if the vnode is in the cache. 122 * XXX linear search is not very efficient. 123 */ 124 retry: 125 mtx_lock(&pfs_vncache_mutex); 126 for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) { 127 if (pvd->pvd_pn == pn && pvd->pvd_pid == pid && 128 pvd->pvd_vnode->v_mount == mp) { 129 vp = pvd->pvd_vnode; 130 VI_LOCK(vp); 131 mtx_unlock(&pfs_vncache_mutex); 132 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) { 133 ++pfs_vncache_hits; 134 *vpp = vp; 135 /* 136 * Some callers cache_enter(vp) later, so 137 * we have to make sure it's not in the 138 * VFS cache so it doesn't get entered 139 * twice. A better solution would be to 140 * make pfs_vncache_alloc() responsible 141 * for entering the vnode in the VFS 142 * cache. 143 */ 144 cache_purge(vp); 145 return (0); 146 } 147 goto retry; 148 } 149 } 150 mtx_unlock(&pfs_vncache_mutex); 151 152 /* nope, get a new one */ 153 pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK); 154 pvd->pvd_next = pvd->pvd_prev = NULL; 155 error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp); 156 if (error) { 157 free(pvd, M_PFSVNCACHE); 158 return (error); 159 } 160 pvd->pvd_pn = pn; 161 pvd->pvd_pid = pid; 162 (*vpp)->v_data = pvd; 163 switch (pn->pn_type) { 164 case pfstype_root: 165 (*vpp)->v_vflag = VV_ROOT; 166 #if 0 167 printf("root vnode allocated\n"); 168 #endif 169 /* fall through */ 170 case pfstype_dir: 171 case pfstype_this: 172 case pfstype_parent: 173 case pfstype_procdir: 174 (*vpp)->v_type = VDIR; 175 break; 176 case pfstype_file: 177 (*vpp)->v_type = VREG; 178 break; 179 case pfstype_symlink: 180 (*vpp)->v_type = VLNK; 181 break; 182 case pfstype_none: 183 KASSERT(0, ("pfs_vncache_alloc called for null node\n")); 184 default: 185 panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type); 186 } 187 /* 188 * Propagate flag through to vnode so users know it can change 189 * if the process changes (i.e. execve) 190 */ 191 if ((pn->pn_flags & PFS_PROCDEP) != 0) 192 (*vpp)->v_vflag |= VV_PROCDEP; 193 pvd->pvd_vnode = *vpp; 194 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); 195 VN_LOCK_AREC(*vpp); 196 error = insmntque(*vpp, mp); 197 if (error != 0) { 198 free(pvd, M_PFSVNCACHE); 199 *vpp = NULLVP; 200 return (error); 201 } 202 retry2: 203 mtx_lock(&pfs_vncache_mutex); 204 /* 205 * Other thread may race with us, creating the entry we are 206 * going to insert into the cache. Recheck after 207 * pfs_vncache_mutex is reacquired. 208 */ 209 for (pvd2 = pfs_vncache; pvd2; pvd2 = pvd2->pvd_next) { 210 if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid && 211 pvd2->pvd_vnode->v_mount == mp) { 212 vp = pvd2->pvd_vnode; 213 VI_LOCK(vp); 214 mtx_unlock(&pfs_vncache_mutex); 215 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) { 216 ++pfs_vncache_hits; 217 vgone(*vpp); 218 vput(*vpp); 219 *vpp = vp; 220 cache_purge(vp); 221 return (0); 222 } 223 goto retry2; 224 } 225 } 226 ++pfs_vncache_misses; 227 if (++pfs_vncache_entries > pfs_vncache_maxentries) 228 pfs_vncache_maxentries = pfs_vncache_entries; 229 pvd->pvd_prev = NULL; 230 pvd->pvd_next = pfs_vncache; 231 if (pvd->pvd_next) 232 pvd->pvd_next->pvd_prev = pvd; 233 pfs_vncache = pvd; 234 mtx_unlock(&pfs_vncache_mutex); 235 return (0); 236 } 237 238 /* 239 * Free a vnode 240 */ 241 int 242 pfs_vncache_free(struct vnode *vp) 243 { 244 struct pfs_vdata *pvd; 245 246 mtx_lock(&pfs_vncache_mutex); 247 pvd = (struct pfs_vdata *)vp->v_data; 248 KASSERT(pvd != NULL, ("pfs_vncache_free(): no vnode data\n")); 249 if (pvd->pvd_next) 250 pvd->pvd_next->pvd_prev = pvd->pvd_prev; 251 if (pvd->pvd_prev) { 252 pvd->pvd_prev->pvd_next = pvd->pvd_next; 253 --pfs_vncache_entries; 254 } else if (pfs_vncache == pvd) { 255 pfs_vncache = pvd->pvd_next; 256 --pfs_vncache_entries; 257 } 258 mtx_unlock(&pfs_vncache_mutex); 259 260 free(pvd, M_PFSVNCACHE); 261 vp->v_data = NULL; 262 return (0); 263 } 264 265 /* 266 * Purge the cache of dead entries 267 * 268 * This is extremely inefficient due to the fact that vgone() not only 269 * indirectly modifies the vnode cache, but may also sleep. We can 270 * neither hold pfs_vncache_mutex across a vgone() call, nor make any 271 * assumptions about the state of the cache after vgone() returns. In 272 * consequence, we must start over after every vgone() call, and keep 273 * trying until we manage to traverse the entire cache. 274 * 275 * The only way to improve this situation is to change the data structure 276 * used to implement the cache. 277 */ 278 static void 279 pfs_purge_locked(struct pfs_node *pn, bool force) 280 { 281 struct pfs_vdata *pvd; 282 struct vnode *vnp; 283 284 mtx_assert(&pfs_vncache_mutex, MA_OWNED); 285 pvd = pfs_vncache; 286 while (pvd != NULL) { 287 if (force || pvd->pvd_dead || 288 (pn != NULL && pvd->pvd_pn == pn)) { 289 vnp = pvd->pvd_vnode; 290 vhold(vnp); 291 mtx_unlock(&pfs_vncache_mutex); 292 VOP_LOCK(vnp, LK_EXCLUSIVE); 293 vgone(vnp); 294 VOP_UNLOCK(vnp, 0); 295 mtx_lock(&pfs_vncache_mutex); 296 vdrop(vnp); 297 pvd = pfs_vncache; 298 } else { 299 pvd = pvd->pvd_next; 300 } 301 } 302 } 303 304 void 305 pfs_purge(struct pfs_node *pn) 306 { 307 308 mtx_lock(&pfs_vncache_mutex); 309 pfs_purge_locked(pn, false); 310 mtx_unlock(&pfs_vncache_mutex); 311 } 312 313 /* 314 * Free all vnodes associated with a defunct process 315 */ 316 static void 317 pfs_exit(void *arg, struct proc *p) 318 { 319 struct pfs_vdata *pvd; 320 int dead; 321 322 if (pfs_vncache == NULL) 323 return; 324 mtx_lock(&pfs_vncache_mutex); 325 for (pvd = pfs_vncache, dead = 0; pvd != NULL; pvd = pvd->pvd_next) 326 if (pvd->pvd_pid == p->p_pid) 327 dead = pvd->pvd_dead = 1; 328 if (dead) 329 pfs_purge_locked(NULL, false); 330 mtx_unlock(&pfs_vncache_mutex); 331 } 332