1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001 Dag-Erling Coïdan Smørgrav 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer 12 * in this position and unchanged. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_pseudofs.h" 35 36 #include <sys/param.h> 37 #include <sys/kernel.h> 38 #include <sys/systm.h> 39 #include <sys/eventhandler.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mutex.h> 43 #include <sys/proc.h> 44 #include <sys/sysctl.h> 45 #include <sys/vnode.h> 46 47 #include <fs/pseudofs/pseudofs.h> 48 #include <fs/pseudofs/pseudofs_internal.h> 49 50 static MALLOC_DEFINE(M_PFSVNCACHE, "pfs_vncache", "pseudofs vnode cache"); 51 52 static struct mtx pfs_vncache_mutex; 53 static eventhandler_tag pfs_exit_tag; 54 static void pfs_exit(void *arg, struct proc *p); 55 static void pfs_purge_all(void); 56 57 static SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW, 0, 58 "pseudofs vnode cache"); 59 60 static int pfs_vncache_entries; 61 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, entries, CTLFLAG_RD, 62 &pfs_vncache_entries, 0, 63 "number of entries in the vnode cache"); 64 65 static int pfs_vncache_maxentries; 66 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, maxentries, CTLFLAG_RD, 67 &pfs_vncache_maxentries, 0, 68 "highest number of entries in the vnode cache"); 69 70 static int pfs_vncache_hits; 71 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, hits, CTLFLAG_RD, 72 &pfs_vncache_hits, 0, 73 "number of cache hits since initialization"); 74 75 static int pfs_vncache_misses; 76 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, misses, CTLFLAG_RD, 77 &pfs_vncache_misses, 0, 78 "number of cache misses since initialization"); 79 80 extern struct vop_vector pfs_vnodeops; /* XXX -> .h file */ 81 82 static SLIST_HEAD(pfs_vncache_head, pfs_vdata) *pfs_vncache_hashtbl; 83 static u_long pfs_vncache_hash; 84 #define PFS_VNCACHE_HASH(pid) (&pfs_vncache_hashtbl[(pid) & pfs_vncache_hash]) 85 86 /* 87 * Initialize vnode cache 88 */ 89 void 90 pfs_vncache_load(void) 91 { 92 93 mtx_init(&pfs_vncache_mutex, "pfs_vncache", NULL, MTX_DEF); 94 pfs_vncache_hashtbl = hashinit(maxproc / 4, M_PFSVNCACHE, &pfs_vncache_hash); 95 pfs_exit_tag = EVENTHANDLER_REGISTER(process_exit, pfs_exit, NULL, 96 EVENTHANDLER_PRI_ANY); 97 } 98 99 /* 100 * Tear down vnode cache 101 */ 102 void 103 pfs_vncache_unload(void) 104 { 105 106 EVENTHANDLER_DEREGISTER(process_exit, pfs_exit_tag); 107 pfs_purge_all(); 108 KASSERT(pfs_vncache_entries == 0, 109 ("%d vncache entries remaining", pfs_vncache_entries)); 110 mtx_destroy(&pfs_vncache_mutex); 111 } 112 113 /* 114 * Allocate a vnode 115 */ 116 int 117 pfs_vncache_alloc(struct mount *mp, struct vnode **vpp, 118 struct pfs_node *pn, pid_t pid) 119 { 120 struct pfs_vncache_head *hash; 121 struct pfs_vdata *pvd, *pvd2; 122 struct vnode *vp; 123 int error; 124 125 /* 126 * See if the vnode is in the cache. 127 */ 128 hash = PFS_VNCACHE_HASH(pid); 129 if (SLIST_EMPTY(hash)) 130 goto alloc; 131 retry: 132 mtx_lock(&pfs_vncache_mutex); 133 SLIST_FOREACH(pvd, hash, pvd_hash) { 134 if (pvd->pvd_pn == pn && pvd->pvd_pid == pid && 135 pvd->pvd_vnode->v_mount == mp) { 136 vp = pvd->pvd_vnode; 137 VI_LOCK(vp); 138 mtx_unlock(&pfs_vncache_mutex); 139 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) { 140 ++pfs_vncache_hits; 141 *vpp = vp; 142 /* 143 * Some callers cache_enter(vp) later, so 144 * we have to make sure it's not in the 145 * VFS cache so it doesn't get entered 146 * twice. A better solution would be to 147 * make pfs_vncache_alloc() responsible 148 * for entering the vnode in the VFS 149 * cache. 150 */ 151 cache_purge(vp); 152 return (0); 153 } 154 goto retry; 155 } 156 } 157 mtx_unlock(&pfs_vncache_mutex); 158 alloc: 159 /* nope, get a new one */ 160 pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK); 161 error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp); 162 if (error) { 163 free(pvd, M_PFSVNCACHE); 164 return (error); 165 } 166 pvd->pvd_pn = pn; 167 pvd->pvd_pid = pid; 168 (*vpp)->v_data = pvd; 169 switch (pn->pn_type) { 170 case pfstype_root: 171 (*vpp)->v_vflag = VV_ROOT; 172 #if 0 173 printf("root vnode allocated\n"); 174 #endif 175 /* fall through */ 176 case pfstype_dir: 177 case pfstype_this: 178 case pfstype_parent: 179 case pfstype_procdir: 180 (*vpp)->v_type = VDIR; 181 break; 182 case pfstype_file: 183 (*vpp)->v_type = VREG; 184 break; 185 case pfstype_symlink: 186 (*vpp)->v_type = VLNK; 187 break; 188 case pfstype_none: 189 KASSERT(0, ("pfs_vncache_alloc called for null node\n")); 190 default: 191 panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type); 192 } 193 /* 194 * Propagate flag through to vnode so users know it can change 195 * if the process changes (i.e. execve) 196 */ 197 if ((pn->pn_flags & PFS_PROCDEP) != 0) 198 (*vpp)->v_vflag |= VV_PROCDEP; 199 pvd->pvd_vnode = *vpp; 200 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); 201 VN_LOCK_AREC(*vpp); 202 error = insmntque(*vpp, mp); 203 if (error != 0) { 204 free(pvd, M_PFSVNCACHE); 205 *vpp = NULLVP; 206 return (error); 207 } 208 retry2: 209 mtx_lock(&pfs_vncache_mutex); 210 /* 211 * Other thread may race with us, creating the entry we are 212 * going to insert into the cache. Recheck after 213 * pfs_vncache_mutex is reacquired. 214 */ 215 SLIST_FOREACH(pvd2, hash, pvd_hash) { 216 if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid && 217 pvd2->pvd_vnode->v_mount == mp) { 218 vp = pvd2->pvd_vnode; 219 VI_LOCK(vp); 220 mtx_unlock(&pfs_vncache_mutex); 221 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) { 222 ++pfs_vncache_hits; 223 vgone(*vpp); 224 vput(*vpp); 225 *vpp = vp; 226 cache_purge(vp); 227 return (0); 228 } 229 goto retry2; 230 } 231 } 232 ++pfs_vncache_misses; 233 if (++pfs_vncache_entries > pfs_vncache_maxentries) 234 pfs_vncache_maxentries = pfs_vncache_entries; 235 SLIST_INSERT_HEAD(hash, pvd, pvd_hash); 236 mtx_unlock(&pfs_vncache_mutex); 237 return (0); 238 } 239 240 /* 241 * Free a vnode 242 */ 243 int 244 pfs_vncache_free(struct vnode *vp) 245 { 246 struct pfs_vdata *pvd, *pvd2; 247 248 mtx_lock(&pfs_vncache_mutex); 249 pvd = (struct pfs_vdata *)vp->v_data; 250 KASSERT(pvd != NULL, ("pfs_vncache_free(): no vnode data\n")); 251 SLIST_FOREACH(pvd2, PFS_VNCACHE_HASH(pvd->pvd_pid), pvd_hash) { 252 if (pvd2 != pvd) 253 continue; 254 SLIST_REMOVE(PFS_VNCACHE_HASH(pvd->pvd_pid), pvd, pfs_vdata, pvd_hash); 255 --pfs_vncache_entries; 256 break; 257 } 258 mtx_unlock(&pfs_vncache_mutex); 259 260 free(pvd, M_PFSVNCACHE); 261 vp->v_data = NULL; 262 return (0); 263 } 264 265 /* 266 * Purge the cache of dead entries 267 * 268 * The code is not very efficient and this perhaps can be addressed without 269 * a complete rewrite. Previous iteration was walking a linked list from 270 * scratch every time. This code only walks the relevant hash chain (if pid 271 * is provided), but still resorts to scanning the entire cache at least twice 272 * if a specific component is to be removed which is slower. This can be 273 * augmented with resizing the hash. 274 * 275 * Explanation of the previous state: 276 * 277 * This is extremely inefficient due to the fact that vgone() not only 278 * indirectly modifies the vnode cache, but may also sleep. We can 279 * neither hold pfs_vncache_mutex across a vgone() call, nor make any 280 * assumptions about the state of the cache after vgone() returns. In 281 * consequence, we must start over after every vgone() call, and keep 282 * trying until we manage to traverse the entire cache. 283 * 284 * The only way to improve this situation is to change the data structure 285 * used to implement the cache. 286 */ 287 288 static void 289 pfs_purge_one(struct vnode *vnp) 290 { 291 292 VOP_LOCK(vnp, LK_EXCLUSIVE); 293 vgone(vnp); 294 VOP_UNLOCK(vnp, 0); 295 vdrop(vnp); 296 } 297 298 void 299 pfs_purge(struct pfs_node *pn) 300 { 301 struct pfs_vdata *pvd; 302 struct vnode *vnp; 303 u_long i, removed; 304 305 mtx_lock(&pfs_vncache_mutex); 306 restart: 307 removed = 0; 308 for (i = 0; i < pfs_vncache_hash; i++) { 309 restart_chain: 310 SLIST_FOREACH(pvd, &pfs_vncache_hashtbl[i], pvd_hash) { 311 if (pn != NULL && pvd->pvd_pn != pn) 312 continue; 313 vnp = pvd->pvd_vnode; 314 vhold(vnp); 315 mtx_unlock(&pfs_vncache_mutex); 316 pfs_purge_one(vnp); 317 removed++; 318 mtx_lock(&pfs_vncache_mutex); 319 goto restart_chain; 320 } 321 } 322 if (removed > 0) 323 goto restart; 324 mtx_unlock(&pfs_vncache_mutex); 325 } 326 327 static void 328 pfs_purge_all(void) 329 { 330 331 pfs_purge(NULL); 332 } 333 334 /* 335 * Free all vnodes associated with a defunct process 336 */ 337 static void 338 pfs_exit(void *arg, struct proc *p) 339 { 340 struct pfs_vncache_head *hash; 341 struct pfs_vdata *pvd; 342 struct vnode *vnp; 343 int pid; 344 345 pid = p->p_pid; 346 hash = PFS_VNCACHE_HASH(pid); 347 if (SLIST_EMPTY(hash)) 348 return; 349 restart: 350 mtx_lock(&pfs_vncache_mutex); 351 SLIST_FOREACH(pvd, hash, pvd_hash) { 352 if (pvd->pvd_pid != pid) 353 continue; 354 vnp = pvd->pvd_vnode; 355 vhold(vnp); 356 mtx_unlock(&pfs_vncache_mutex); 357 pfs_purge_one(vnp); 358 goto restart; 359 } 360 mtx_unlock(&pfs_vncache_mutex); 361 } 362