xref: /freebsd/sys/kern/vfs_cache.c (revision d429ea332342fcb98d27a350d0c4944bf9aec3f9)
1 /*-
2  * Copyright (c) 1989, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Poul-Henning Kamp of the FreeBSD Project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/sysctl.h>
44 #include <sys/mount.h>
45 #include <sys/vnode.h>
46 #include <sys/namei.h>
47 #include <sys/malloc.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/sysproto.h>
50 #include <sys/proc.h>
51 #include <sys/filedesc.h>
52 #include <sys/fnv_hash.h>
53 
54 #include <vm/uma.h>
55 
56 /*
57  * This structure describes the elements in the cache of recent
58  * names looked up by namei.
59  */
60 
61 struct	namecache {
62 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
63 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
64 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
65 	struct	vnode *nc_dvp;		/* vnode of parent of name */
66 	struct	vnode *nc_vp;		/* vnode the name refers to */
67 	u_char	nc_flag;		/* flag bits */
68 	u_char	nc_nlen;		/* length of name */
69 	char	nc_name[0];		/* segment name */
70 };
71 
72 /*
73  * Name caching works as follows:
74  *
75  * Names found by directory scans are retained in a cache
76  * for future reference.  It is managed LRU, so frequently
77  * used names will hang around.  Cache is indexed by hash value
78  * obtained from (vp, name) where vp refers to the directory
79  * containing name.
80  *
81  * If it is a "negative" entry, (i.e. for a name that is known NOT to
82  * exist) the vnode pointer will be NULL.
83  *
84  * Upon reaching the last segment of a path, if the reference
85  * is for DELETE, or NOCACHE is set (rewrite), and the
86  * name is located in the cache, it will be dropped.
87  */
88 
89 /*
90  * Structures associated with name cacheing.
91  */
92 #define NCHHASH(hash) \
93 	(&nchashtbl[(hash) & nchash])
94 static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
95 static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
96 static u_long	nchash;			/* size of hash table */
97 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
98 static u_long	ncnegfactor = 16;	/* ratio of negative entries */
99 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
100 static u_long	numneg;			/* number of cache entries allocated */
101 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
102 static u_long	numcache;		/* number of cache entries allocated */
103 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
104 static u_long	numcachehv;		/* number of cache entries with vnodes held */
105 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, "");
106 #if 0
107 static u_long	numcachepl;		/* number of cache purge for leaf entries */
108 SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, "");
109 #endif
110 struct	nchstats nchstats;		/* cache effectiveness statistics */
111 
112 static struct mtx cache_lock;
113 MTX_SYSINIT(vfscache, &cache_lock, "Name Cache", MTX_DEF);
114 
115 #define	CACHE_LOCK()	mtx_lock(&cache_lock)
116 #define	CACHE_UNLOCK()	mtx_unlock(&cache_lock)
117 
118 /*
119  * UMA zones for the VFS cache.
120  *
121  * The small cache is used for entries with short names, which are the
122  * most common.  The large cache is used for entries which are too big to
123  * fit in the small cache.
124  */
125 static uma_zone_t cache_zone_small;
126 static uma_zone_t cache_zone_large;
127 
128 #define	CACHE_PATH_CUTOFF	32
129 #define	CACHE_ZONE_SMALL	(sizeof(struct namecache) + CACHE_PATH_CUTOFF)
130 #define	CACHE_ZONE_LARGE	(sizeof(struct namecache) + NAME_MAX)
131 
132 #define cache_alloc(len)	uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
133 	cache_zone_small : cache_zone_large, M_WAITOK)
134 #define cache_free(ncp)		do { \
135 	if (ncp != NULL) \
136 		uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
137 		    cache_zone_small : cache_zone_large, (ncp)); \
138 } while (0)
139 
140 static int	doingcache = 1;		/* 1 => enable the cache */
141 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
142 
143 /* Export size information to userland */
144 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
145 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
146 
147 /*
148  * The new name cache statistics
149  */
150 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
151 #define STATNODE(mode, name, var) \
152 	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
153 STATNODE(CTLFLAG_RD, numneg, &numneg);
154 STATNODE(CTLFLAG_RD, numcache, &numcache);
155 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
156 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
157 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
158 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
159 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
160 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
161 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
162 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
163 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
164 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
165 
166 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD, &nchstats,
167 	sizeof(nchstats), "LU", "VFS cache effectiveness statistics");
168 
169 
170 
171 static void cache_zap(struct namecache *ncp);
172 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
173     char *buf, char **retbuf, u_int buflen);
174 
175 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
176 
177 /*
178  * Flags in namecache.nc_flag
179  */
180 #define NCF_WHITE	1
181 
182 /*
183  * Grab an atomic snapshot of the name cache hash chain lengths
184  */
185 SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
186 
187 static int
188 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
189 {
190 	int error;
191 	struct nchashhead *ncpp;
192 	struct namecache *ncp;
193 	int n_nchash;
194 	int count;
195 
196 	n_nchash = nchash + 1;	/* nchash is max index, not count */
197 	if (!req->oldptr)
198 		return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
199 
200 	/* Scan hash tables for applicable entries */
201 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
202 		count = 0;
203 		LIST_FOREACH(ncp, ncpp, nc_hash) {
204 			count++;
205 		}
206 		error = SYSCTL_OUT(req, &count, sizeof(count));
207 		if (error)
208 			return (error);
209 	}
210 	return (0);
211 }
212 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD,
213 	0, 0, sysctl_debug_hashstat_rawnchash, "S,int", "nchash chain lengths");
214 
215 static int
216 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
217 {
218 	int error;
219 	struct nchashhead *ncpp;
220 	struct namecache *ncp;
221 	int n_nchash;
222 	int count, maxlength, used, pct;
223 
224 	if (!req->oldptr)
225 		return SYSCTL_OUT(req, 0, 4 * sizeof(int));
226 
227 	n_nchash = nchash + 1;	/* nchash is max index, not count */
228 	used = 0;
229 	maxlength = 0;
230 
231 	/* Scan hash tables for applicable entries */
232 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
233 		count = 0;
234 		LIST_FOREACH(ncp, ncpp, nc_hash) {
235 			count++;
236 		}
237 		if (count)
238 			used++;
239 		if (maxlength < count)
240 			maxlength = count;
241 	}
242 	n_nchash = nchash + 1;
243 	pct = (used * 100 * 100) / n_nchash;
244 	error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
245 	if (error)
246 		return (error);
247 	error = SYSCTL_OUT(req, &used, sizeof(used));
248 	if (error)
249 		return (error);
250 	error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
251 	if (error)
252 		return (error);
253 	error = SYSCTL_OUT(req, &pct, sizeof(pct));
254 	if (error)
255 		return (error);
256 	return (0);
257 }
258 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD,
259 	0, 0, sysctl_debug_hashstat_nchash, "I", "nchash chain lengths");
260 
261 /*
262  * cache_zap():
263  *
264  *   Removes a namecache entry from cache, whether it contains an actual
265  *   pointer to a vnode or if it is just a negative cache entry.
266  */
267 static void
268 cache_zap(ncp)
269 	struct namecache *ncp;
270 {
271 	struct vnode *vp;
272 
273 	mtx_assert(&cache_lock, MA_OWNED);
274 	CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
275 	vp = NULL;
276 	LIST_REMOVE(ncp, nc_hash);
277 	LIST_REMOVE(ncp, nc_src);
278 	if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
279 		vp = ncp->nc_dvp;
280 		numcachehv--;
281 	}
282 	if (ncp->nc_vp) {
283 		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
284 		ncp->nc_vp->v_dd = NULL;
285 	} else {
286 		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
287 		numneg--;
288 	}
289 	numcache--;
290 	cache_free(ncp);
291 	if (vp)
292 		vdrop(vp);
293 }
294 
295 /*
296  * cache_leaf_test()
297  *
298  *      Test whether this (directory) vnode's namei cache entry contains
299  *      subdirectories or not.  Used to determine whether the directory is
300  *      a leaf in the namei cache or not.  Note: the directory may still
301  *      contain files in the namei cache.
302  *
303  *      Returns 0 if the directory is a leaf, -1 if it isn't.
304  */
305 int
306 cache_leaf_test(struct vnode *vp)
307 {
308 	struct namecache *ncpc;
309 	int leaf;
310 
311 	leaf = 0;
312 	CACHE_LOCK();
313 	for (ncpc = LIST_FIRST(&vp->v_cache_src);
314 	     ncpc != NULL;
315 	     ncpc = LIST_NEXT(ncpc, nc_src)
316 	 ) {
317 		if (ncpc->nc_vp != NULL && ncpc->nc_vp->v_type == VDIR) {
318 			leaf = -1;
319 			break;
320 		}
321 	}
322 	CACHE_UNLOCK();
323 	return (leaf);
324 }
325 
326 /*
327  * Lookup an entry in the cache
328  *
329  * Lookup is called with dvp pointing to the directory to search,
330  * cnp pointing to the name of the entry being sought. If the lookup
331  * succeeds, the vnode is returned in *vpp, and a status of -1 is
332  * returned. If the lookup determines that the name does not exist
333  * (negative cacheing), a status of ENOENT is returned. If the lookup
334  * fails, a status of zero is returned.
335  *
336  * vpp is locked and ref'd on return.  If we're looking up DOTDOT, dvp is
337  * unlocked.  If we're looking up . an extra ref is taken, but the lock is
338  * not recursively acquired.
339  */
340 
341 int
342 cache_lookup(dvp, vpp, cnp)
343 	struct vnode *dvp;
344 	struct vnode **vpp;
345 	struct componentname *cnp;
346 {
347 	struct namecache *ncp;
348 	u_int32_t hash;
349 	int error;
350 
351 	if (!doingcache) {
352 		cnp->cn_flags &= ~MAKEENTRY;
353 		return (0);
354 	}
355 retry:
356 	CACHE_LOCK();
357 	numcalls++;
358 
359 	if (cnp->cn_nameptr[0] == '.') {
360 		if (cnp->cn_namelen == 1) {
361 			*vpp = dvp;
362 			CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
363 			    dvp, cnp->cn_nameptr);
364 			dothits++;
365 			goto success;
366 		}
367 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
368 			dotdothits++;
369 			if (dvp->v_dd == NULL ||
370 			    (cnp->cn_flags & MAKEENTRY) == 0) {
371 				CACHE_UNLOCK();
372 				return (0);
373 			}
374 			*vpp = dvp->v_dd;
375 			CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
376 			    dvp, cnp->cn_nameptr, *vpp);
377 			goto success;
378 		}
379 	}
380 
381 	hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
382 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
383 	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
384 		numchecks++;
385 		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
386 		    !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
387 			break;
388 	}
389 
390 	/* We failed to find an entry */
391 	if (ncp == 0) {
392 		if ((cnp->cn_flags & MAKEENTRY) == 0) {
393 			nummisszap++;
394 		} else {
395 			nummiss++;
396 		}
397 		nchstats.ncs_miss++;
398 		CACHE_UNLOCK();
399 		return (0);
400 	}
401 
402 	/* We don't want to have an entry, so dump it */
403 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
404 		numposzaps++;
405 		nchstats.ncs_badhits++;
406 		cache_zap(ncp);
407 		CACHE_UNLOCK();
408 		return (0);
409 	}
410 
411 	/* We found a "positive" match, return the vnode */
412 	if (ncp->nc_vp) {
413 		numposhits++;
414 		nchstats.ncs_goodhits++;
415 		*vpp = ncp->nc_vp;
416 		CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
417 		    dvp, cnp->cn_nameptr, *vpp, ncp);
418 		goto success;
419 	}
420 
421 	/* We found a negative match, and want to create it, so purge */
422 	if (cnp->cn_nameiop == CREATE) {
423 		numnegzaps++;
424 		nchstats.ncs_badhits++;
425 		cache_zap(ncp);
426 		CACHE_UNLOCK();
427 		return (0);
428 	}
429 
430 	numneghits++;
431 	/*
432 	 * We found a "negative" match, so we shift it to the end of
433 	 * the "negative" cache entries queue to satisfy LRU.  Also,
434 	 * check to see if the entry is a whiteout; indicate this to
435 	 * the componentname, if so.
436 	 */
437 	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
438 	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
439 	nchstats.ncs_neghits++;
440 	if (ncp->nc_flag & NCF_WHITE)
441 		cnp->cn_flags |= ISWHITEOUT;
442 	CACHE_UNLOCK();
443 	return (ENOENT);
444 
445 success:
446 	/*
447 	 * On success we return a locked and ref'd vnode as per the lookup
448 	 * protocol.
449 	 */
450 	if (dvp == *vpp) {   /* lookup on "." */
451 		VREF(*vpp);
452 		CACHE_UNLOCK();
453 		return (-1);
454 	}
455 	if (cnp->cn_flags & ISDOTDOT)
456 		VOP_UNLOCK(dvp, 0, cnp->cn_thread);
457 	VI_LOCK(*vpp);
458 	CACHE_UNLOCK();
459 	error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
460 	if (cnp->cn_flags & ISDOTDOT)
461 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
462 	if (error) {
463 		*vpp = NULL;
464 		goto retry;
465 	}
466 	return (-1);
467 }
468 
469 /*
470  * Add an entry to the cache.
471  */
472 void
473 cache_enter(dvp, vp, cnp)
474 	struct vnode *dvp;
475 	struct vnode *vp;
476 	struct componentname *cnp;
477 {
478 	struct namecache *ncp;
479 	struct nchashhead *ncpp;
480 	u_int32_t hash;
481 	int hold;
482 	int zap;
483 	int len;
484 
485 	CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
486 	VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
487 	    ("cahe_enter: Adding a doomed vnode"));
488 
489 	if (!doingcache)
490 		return;
491 
492 	if (cnp->cn_nameptr[0] == '.') {
493 		if (cnp->cn_namelen == 1) {
494 			return;
495 		}
496 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
497 			dvp->v_dd = vp;
498 			return;
499 		}
500 	}
501 
502 	hold = 0;
503 	zap = 0;
504 	ncp = cache_alloc(cnp->cn_namelen);
505 	CACHE_LOCK();
506 	numcache++;
507 	if (!vp) {
508 		numneg++;
509 		ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
510 	} else if (vp->v_type == VDIR) {
511 		vp->v_dd = dvp;
512 	} else {
513 		vp->v_dd = NULL;
514 	}
515 
516 	/*
517 	 * Set the rest of the namecache entry elements, calculate it's
518 	 * hash key and insert it into the appropriate chain within
519 	 * the cache entries table.
520 	 */
521 	ncp->nc_vp = vp;
522 	ncp->nc_dvp = dvp;
523 	len = ncp->nc_nlen = cnp->cn_namelen;
524 	hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
525 	bcopy(cnp->cn_nameptr, ncp->nc_name, len);
526 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
527 	ncpp = NCHHASH(hash);
528 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
529 	if (LIST_EMPTY(&dvp->v_cache_src)) {
530 		hold = 1;
531 		numcachehv++;
532 	}
533 	LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
534 	/*
535 	 * If the entry is "negative", we place it into the
536 	 * "negative" cache queue, otherwise, we place it into the
537 	 * destination vnode's cache entries queue.
538 	 */
539 	if (vp) {
540 		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
541 	} else {
542 		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
543 	}
544 	if (numneg * ncnegfactor > numcache) {
545 		ncp = TAILQ_FIRST(&ncneg);
546 		zap = 1;
547 	}
548 	if (hold)
549 		vhold(dvp);
550 	if (zap)
551 		cache_zap(ncp);
552 	CACHE_UNLOCK();
553 }
554 
555 /*
556  * Name cache initialization, from vfs_init() when we are booting
557  */
558 static void
559 nchinit(void *dummy __unused)
560 {
561 
562 	TAILQ_INIT(&ncneg);
563 
564 	cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
565 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
566 	cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
567 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
568 
569 	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
570 }
571 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL)
572 
573 
574 /*
575  * Invalidate all entries to a particular vnode.
576  */
577 void
578 cache_purge(vp)
579 	struct vnode *vp;
580 {
581 
582 	CTR1(KTR_VFS, "cache_purge(%p)", vp);
583 	CACHE_LOCK();
584 	while (!LIST_EMPTY(&vp->v_cache_src))
585 		cache_zap(LIST_FIRST(&vp->v_cache_src));
586 	while (!TAILQ_EMPTY(&vp->v_cache_dst))
587 		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
588 	vp->v_dd = NULL;
589 	CACHE_UNLOCK();
590 }
591 
592 /*
593  * Flush all entries referencing a particular filesystem.
594  *
595  * Since we need to check it anyway, we will flush all the invalid
596  * entries at the same time.
597  */
598 void
599 cache_purgevfs(mp)
600 	struct mount *mp;
601 {
602 	struct nchashhead *ncpp;
603 	struct namecache *ncp, *nnp;
604 	struct nchashhead mplist;
605 
606 	LIST_INIT(&mplist);
607 	ncp = NULL;
608 
609 	/* Scan hash tables for applicable entries */
610 	CACHE_LOCK();
611 	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
612 		for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) {
613 			nnp = LIST_NEXT(ncp, nc_hash);
614 			if (ncp->nc_dvp->v_mount == mp) {
615 				LIST_REMOVE(ncp, nc_hash);
616 				LIST_INSERT_HEAD(&mplist, ncp, nc_hash);
617 			}
618 		}
619 	}
620 	while (!LIST_EMPTY(&mplist))
621 		cache_zap(LIST_FIRST(&mplist));
622 	CACHE_UNLOCK();
623 }
624 
625 /*
626  * Perform canonical checks and cache lookup and pass on to filesystem
627  * through the vop_cachedlookup only if needed.
628  */
629 
630 int
631 vfs_cache_lookup(ap)
632 	struct vop_lookup_args /* {
633 		struct vnode *a_dvp;
634 		struct vnode **a_vpp;
635 		struct componentname *a_cnp;
636 	} */ *ap;
637 {
638 	struct vnode *dvp;
639 	int error;
640 	struct vnode **vpp = ap->a_vpp;
641 	struct componentname *cnp = ap->a_cnp;
642 	struct ucred *cred = cnp->cn_cred;
643 	int flags = cnp->cn_flags;
644 	struct thread *td = cnp->cn_thread;
645 
646 	*vpp = NULL;
647 	dvp = ap->a_dvp;
648 
649 	if (dvp->v_type != VDIR)
650 		return (ENOTDIR);
651 
652 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
653 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
654 		return (EROFS);
655 
656 	error = VOP_ACCESS(dvp, VEXEC, cred, td);
657 	if (error)
658 		return (error);
659 
660 	error = cache_lookup(dvp, vpp, cnp);
661 	if (error == 0)
662 		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
663 	if (error == ENOENT)
664 		return (error);
665 	return (0);
666 }
667 
668 
669 #ifndef _SYS_SYSPROTO_H_
670 struct  __getcwd_args {
671 	u_char	*buf;
672 	u_int	buflen;
673 };
674 #endif
675 
676 /*
677  * XXX All of these sysctls would probably be more productive dead.
678  */
679 static int disablecwd;
680 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
681    "Disable the getcwd syscall");
682 
683 /* Implementation of the getcwd syscall */
684 int
685 __getcwd(td, uap)
686 	struct thread *td;
687 	struct __getcwd_args *uap;
688 {
689 
690 	return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
691 }
692 
693 int
694 kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen)
695 {
696 	char *bp, *tmpbuf;
697 	struct filedesc *fdp;
698 	int error;
699 
700 	if (disablecwd)
701 		return (ENODEV);
702 	if (buflen < 2)
703 		return (EINVAL);
704 	if (buflen > MAXPATHLEN)
705 		buflen = MAXPATHLEN;
706 
707 	tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
708 	fdp = td->td_proc->p_fd;
709 	mtx_lock(&Giant);
710 	FILEDESC_LOCK(fdp);
711 	error = vn_fullpath1(td, fdp->fd_cdir, fdp->fd_rdir, tmpbuf,
712 	    &bp, buflen);
713 	FILEDESC_UNLOCK(fdp);
714 	mtx_unlock(&Giant);
715 
716 	if (!error) {
717 		if (bufseg == UIO_SYSSPACE)
718 			bcopy(bp, buf, strlen(bp) + 1);
719 		else
720 			error = copyout(bp, buf, strlen(bp) + 1);
721 	}
722 	free(tmpbuf, M_TEMP);
723 	return (error);
724 }
725 
726 /*
727  * Thus begins the fullpath magic.
728  */
729 
730 #undef STATNODE
731 #define STATNODE(name)							\
732 	static u_int name;						\
733 	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
734 
735 static int disablefullpath;
736 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
737 	"Disable the vn_fullpath function");
738 
739 /* These count for kern___getcwd(), too. */
740 STATNODE(numfullpathcalls);
741 STATNODE(numfullpathfail1);
742 STATNODE(numfullpathfail2);
743 STATNODE(numfullpathfail4);
744 STATNODE(numfullpathfound);
745 
746 /*
747  * Retrieve the full filesystem path that correspond to a vnode from the name
748  * cache (if available)
749  */
750 int
751 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
752 {
753 	char *buf;
754 	struct filedesc *fdp;
755 	int error;
756 
757 	if (disablefullpath)
758 		return (ENODEV);
759 	if (vn == NULL)
760 		return (EINVAL);
761 
762 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
763 	fdp = td->td_proc->p_fd;
764 	mtx_lock(&Giant);
765 	FILEDESC_LOCK(fdp);
766 	error = vn_fullpath1(td, vn, fdp->fd_rdir, buf, retbuf, MAXPATHLEN);
767 	FILEDESC_UNLOCK(fdp);
768 	mtx_unlock(&Giant);
769 
770 	if (!error)
771 		*freebuf = buf;
772 	else
773 		free(buf, M_TEMP);
774 	return (error);
775 }
776 
777 /*
778  * The magic behind kern___getcwd() and vn_fullpath().
779  */
780 static int
781 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
782     char *buf, char **retbuf, u_int buflen)
783 {
784 	char *bp;
785 	int error, i, slash_prefixed;
786 	struct namecache *ncp;
787 
788 	mtx_assert(&Giant, MA_OWNED);
789 
790 	bp = buf + buflen - 1;
791 	*bp = '\0';
792 	error = 0;
793 	slash_prefixed = 0;
794 
795 	CACHE_LOCK();
796 	numfullpathcalls++;
797 	if (vp->v_type != VDIR) {
798 		ncp = TAILQ_FIRST(&vp->v_cache_dst);
799 		if (!ncp) {
800 			numfullpathfail2++;
801 			CACHE_UNLOCK();
802 			return (ENOENT);
803 		}
804 		for (i = ncp->nc_nlen - 1; i >= 0 && bp > buf; i--)
805 			*--bp = ncp->nc_name[i];
806 		if (bp == buf) {
807 			numfullpathfail4++;
808 			CACHE_UNLOCK();
809 			return (ENOMEM);
810 		}
811 		*--bp = '/';
812 		slash_prefixed = 1;
813 		vp = ncp->nc_dvp;
814 	}
815 	while (vp != rdir && vp != rootvnode) {
816 		if (vp->v_vflag & VV_ROOT) {
817 			if (vp->v_mount == NULL) {	/* forced unmount */
818 				error = EBADF;
819 				break;
820 			}
821 			vp = vp->v_mount->mnt_vnodecovered;
822 			continue;
823 		}
824 		if (vp->v_dd == NULL) {
825 			numfullpathfail1++;
826 			error = ENOTDIR;
827 			break;
828 		}
829 		ncp = TAILQ_FIRST(&vp->v_cache_dst);
830 		if (!ncp) {
831 			numfullpathfail2++;
832 			error = ENOENT;
833 			break;
834 		}
835 		MPASS(ncp->nc_dvp == vp->v_dd);
836 		for (i = ncp->nc_nlen - 1; i >= 0 && bp != buf; i--)
837 			*--bp = ncp->nc_name[i];
838 		if (bp == buf) {
839 			numfullpathfail4++;
840 			error = ENOMEM;
841 			break;
842 		}
843 		*--bp = '/';
844 		slash_prefixed = 1;
845 		vp = ncp->nc_dvp;
846 	}
847 	if (error) {
848 		CACHE_UNLOCK();
849 		return (error);
850 	}
851 	if (!slash_prefixed) {
852 		if (bp == buf) {
853 			numfullpathfail4++;
854 			CACHE_UNLOCK();
855 			return (ENOMEM);
856 		} else {
857 			*--bp = '/';
858 		}
859 	}
860 	numfullpathfound++;
861 	CACHE_UNLOCK();
862 
863 	*retbuf = bp;
864 	return (0);
865 }
866