xref: /freebsd/sys/fs/pseudofs/pseudofs_vncache.c (revision f7c4bd95ba735bd6a5454b4953945a99cefbb80c)
1 /*-
2  * Copyright (c) 2001 Dag-Erling Co�dan Sm�rgrav
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer
10  *    in this position and unchanged.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_pseudofs.h"
33 
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/eventhandler.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/sysctl.h>
43 #include <sys/vnode.h>
44 
45 #include <fs/pseudofs/pseudofs.h>
46 #include <fs/pseudofs/pseudofs_internal.h>
47 
48 static MALLOC_DEFINE(M_PFSVNCACHE, "pfs_vncache", "pseudofs vnode cache");
49 
50 static struct mtx pfs_vncache_mutex;
51 static struct pfs_vdata *pfs_vncache;
52 static eventhandler_tag pfs_exit_tag;
53 static void pfs_exit(void *arg, struct proc *p);
54 
55 SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW, 0,
56     "pseudofs vnode cache");
57 
58 static int pfs_vncache_entries;
59 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, entries, CTLFLAG_RD,
60     &pfs_vncache_entries, 0,
61     "number of entries in the vnode cache");
62 
63 static int pfs_vncache_maxentries;
64 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, maxentries, CTLFLAG_RD,
65     &pfs_vncache_maxentries, 0,
66     "highest number of entries in the vnode cache");
67 
68 static int pfs_vncache_hits;
69 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, hits, CTLFLAG_RD,
70     &pfs_vncache_hits, 0,
71     "number of cache hits since initialization");
72 
73 static int pfs_vncache_misses;
74 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, misses, CTLFLAG_RD,
75     &pfs_vncache_misses, 0,
76     "number of cache misses since initialization");
77 
78 extern struct vop_vector pfs_vnodeops;	/* XXX -> .h file */
79 
80 /*
81  * Initialize vnode cache
82  */
83 void
84 pfs_vncache_load(void)
85 {
86 
87 	mtx_assert(&Giant, MA_OWNED);
88 	mtx_init(&pfs_vncache_mutex, "pfs_vncache", NULL, MTX_DEF);
89 	pfs_exit_tag = EVENTHANDLER_REGISTER(process_exit, pfs_exit, NULL,
90 	    EVENTHANDLER_PRI_ANY);
91 }
92 
93 /*
94  * Tear down vnode cache
95  */
96 void
97 pfs_vncache_unload(void)
98 {
99 
100 	mtx_assert(&Giant, MA_OWNED);
101 	EVENTHANDLER_DEREGISTER(process_exit, pfs_exit_tag);
102 	KASSERT(pfs_vncache_entries == 0,
103 	    ("%d vncache entries remaining", pfs_vncache_entries));
104 	mtx_destroy(&pfs_vncache_mutex);
105 }
106 
107 /*
108  * Allocate a vnode
109  */
110 int
111 pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
112 		  struct pfs_node *pn, pid_t pid)
113 {
114 	struct pfs_vdata *pvd;
115 	struct vnode *vp;
116 	int error;
117 
118 	/*
119 	 * See if the vnode is in the cache.
120 	 * XXX linear search is not very efficient.
121 	 */
122 retry:
123 	mtx_lock(&pfs_vncache_mutex);
124 	for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) {
125 		if (pvd->pvd_pn == pn && pvd->pvd_pid == pid &&
126 		    pvd->pvd_vnode->v_mount == mp) {
127 			vp = pvd->pvd_vnode;
128 			VI_LOCK(vp);
129 			mtx_unlock(&pfs_vncache_mutex);
130 			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
131 				++pfs_vncache_hits;
132 				*vpp = vp;
133 				/*
134 				 * Some callers cache_enter(vp) later, so
135 				 * we have to make sure it's not in the
136 				 * VFS cache so it doesn't get entered
137 				 * twice.  A better solution would be to
138 				 * make pfs_vncache_alloc() responsible
139 				 * for entering the vnode in the VFS
140 				 * cache.
141 				 */
142 				cache_purge(vp);
143 				return (0);
144 			}
145 			goto retry;
146 		}
147 	}
148 	mtx_unlock(&pfs_vncache_mutex);
149 	++pfs_vncache_misses;
150 
151 	/* nope, get a new one */
152 	MALLOC(pvd, struct pfs_vdata *, sizeof *pvd, M_PFSVNCACHE, M_WAITOK);
153 	mtx_lock(&pfs_vncache_mutex);
154 	if (++pfs_vncache_entries > pfs_vncache_maxentries)
155 		pfs_vncache_maxentries = pfs_vncache_entries;
156 	mtx_unlock(&pfs_vncache_mutex);
157 	error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp);
158 	if (error) {
159 		mtx_lock(&pfs_vncache_mutex);
160 		--pfs_vncache_entries;
161 		mtx_unlock(&pfs_vncache_mutex);
162 		FREE(pvd, M_PFSVNCACHE);
163 		return (error);
164 	}
165 	pvd->pvd_pn = pn;
166 	pvd->pvd_pid = pid;
167 	(*vpp)->v_data = pvd;
168 	switch (pn->pn_type) {
169 	case pfstype_root:
170 		(*vpp)->v_vflag = VV_ROOT;
171 #if 0
172 		printf("root vnode allocated\n");
173 #endif
174 		/* fall through */
175 	case pfstype_dir:
176 	case pfstype_this:
177 	case pfstype_parent:
178 	case pfstype_procdir:
179 		(*vpp)->v_type = VDIR;
180 		break;
181 	case pfstype_file:
182 		(*vpp)->v_type = VREG;
183 		break;
184 	case pfstype_symlink:
185 		(*vpp)->v_type = VLNK;
186 		break;
187 	case pfstype_none:
188 		KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
189 	default:
190 		panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type);
191 	}
192 	/*
193 	 * Propagate flag through to vnode so users know it can change
194 	 * if the process changes (i.e. execve)
195 	 */
196 	if ((pn->pn_flags & PFS_PROCDEP) != 0)
197 		(*vpp)->v_vflag |= VV_PROCDEP;
198 	pvd->pvd_vnode = *vpp;
199 	VN_LOCK_AREC(*vpp);
200 	vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
201 	error = insmntque(*vpp, mp);
202 	if (error != 0) {
203 		mtx_lock(&pfs_vncache_mutex);
204 		--pfs_vncache_entries;
205 		mtx_unlock(&pfs_vncache_mutex);
206 		FREE(pvd, M_PFSVNCACHE);
207 		*vpp = NULLVP;
208 		return (error);
209 	}
210 	mtx_lock(&pfs_vncache_mutex);
211 	pvd->pvd_prev = NULL;
212 	pvd->pvd_next = pfs_vncache;
213 	if (pvd->pvd_next)
214 		pvd->pvd_next->pvd_prev = pvd;
215 	pfs_vncache = pvd;
216 	mtx_unlock(&pfs_vncache_mutex);
217 	return (0);
218 }
219 
220 /*
221  * Free a vnode
222  */
223 int
224 pfs_vncache_free(struct vnode *vp)
225 {
226 	struct pfs_vdata *pvd;
227 
228 	mtx_lock(&pfs_vncache_mutex);
229 	pvd = (struct pfs_vdata *)vp->v_data;
230 	KASSERT(pvd != NULL, ("pfs_vncache_free(): no vnode data\n"));
231 	if (pvd->pvd_next)
232 		pvd->pvd_next->pvd_prev = pvd->pvd_prev;
233 	if (pvd->pvd_prev)
234 		pvd->pvd_prev->pvd_next = pvd->pvd_next;
235 	else
236 		pfs_vncache = pvd->pvd_next;
237 	--pfs_vncache_entries;
238 	mtx_unlock(&pfs_vncache_mutex);
239 
240 	FREE(pvd, M_PFSVNCACHE);
241 	vp->v_data = NULL;
242 	return (0);
243 }
244 
245 /*
246  * Purge the cache of dead entries
247  *
248  * This is extremely inefficient due to the fact that vgone() not only
249  * indirectly modifies the vnode cache, but may also sleep.  We can
250  * neither hold pfs_vncache_mutex across a vgone() call, nor make any
251  * assumptions about the state of the cache after vgone() returns.  In
252  * consequence, we must start over after every vgone() call, and keep
253  * trying until we manage to traverse the entire cache.
254  *
255  * The only way to improve this situation is to change the data structure
256  * used to implement the cache.
257  */
258 void
259 pfs_purge(struct pfs_node *pn)
260 {
261 	struct pfs_vdata *pvd;
262 	struct vnode *vnp;
263 
264 	mtx_lock(&pfs_vncache_mutex);
265 	pvd = pfs_vncache;
266 	while (pvd != NULL) {
267 		if (pvd->pvd_dead || (pn != NULL && pvd->pvd_pn == pn)) {
268 			vnp = pvd->pvd_vnode;
269 			vhold(vnp);
270 			mtx_unlock(&pfs_vncache_mutex);
271 			VOP_LOCK(vnp, LK_EXCLUSIVE);
272 			vgone(vnp);
273 			VOP_UNLOCK(vnp, 0);
274 			vdrop(vnp);
275 			mtx_lock(&pfs_vncache_mutex);
276 			pvd = pfs_vncache;
277 		} else {
278 			pvd = pvd->pvd_next;
279 		}
280 	}
281 	mtx_unlock(&pfs_vncache_mutex);
282 }
283 
284 /*
285  * Free all vnodes associated with a defunct process
286  *
287  * XXXRW: It is unfortunate that pfs_exit() always acquires and releases two
288  * mutexes (one of which is Giant) for every process exit, even if procfs
289  * isn't mounted.
290  */
291 static void
292 pfs_exit(void *arg, struct proc *p)
293 {
294 	struct pfs_vdata *pvd;
295 	int dead;
296 
297 	if (pfs_vncache == NULL)
298 		return;
299 	mtx_lock(&Giant);
300 	mtx_lock(&pfs_vncache_mutex);
301 	for (pvd = pfs_vncache, dead = 0; pvd != NULL; pvd = pvd->pvd_next)
302 		if (pvd->pvd_pid == p->p_pid)
303 			dead = pvd->pvd_dead = 1;
304 	mtx_unlock(&pfs_vncache_mutex);
305 	if (dead)
306 		pfs_purge(NULL);
307 	mtx_unlock(&Giant);
308 }
309