xref: /freebsd/sys/kern/vfs_cache.c (revision 271c3a9060f2ee55607ebe146523f888e1db2654)
1 /*-
2  * Copyright (c) 1989, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Poul-Henning Kamp of the FreeBSD Project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/sysctl.h>
44 #include <sys/mount.h>
45 #include <sys/vnode.h>
46 #include <sys/namei.h>
47 #include <sys/malloc.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/sysproto.h>
50 #include <sys/proc.h>
51 #include <sys/filedesc.h>
52 #include <sys/fnv_hash.h>
53 
54 #include <vm/uma.h>
55 
56 /*
57  * This structure describes the elements in the cache of recent
58  * names looked up by namei.
59  */
60 
61 struct	namecache {
62 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
63 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
64 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
65 	struct	vnode *nc_dvp;		/* vnode of parent of name */
66 	struct	vnode *nc_vp;		/* vnode the name refers to */
67 	u_char	nc_flag;		/* flag bits */
68 	u_char	nc_nlen;		/* length of name */
69 	char	nc_name[0];		/* segment name */
70 };
71 
72 /*
73  * Name caching works as follows:
74  *
75  * Names found by directory scans are retained in a cache
76  * for future reference.  It is managed LRU, so frequently
77  * used names will hang around.  Cache is indexed by hash value
78  * obtained from (vp, name) where vp refers to the directory
79  * containing name.
80  *
81  * If it is a "negative" entry, (i.e. for a name that is known NOT to
82  * exist) the vnode pointer will be NULL.
83  *
84  * Upon reaching the last segment of a path, if the reference
85  * is for DELETE, or NOCACHE is set (rewrite), and the
86  * name is located in the cache, it will be dropped.
87  */
88 
89 /*
90  * Structures associated with name cacheing.
91  */
92 #define NCHHASH(hash) \
93 	(&nchashtbl[(hash) & nchash])
94 static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
95 static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
96 static u_long	nchash;			/* size of hash table */
97 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
98 static u_long	ncnegfactor = 16;	/* ratio of negative entries */
99 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
100 static u_long	numneg;			/* number of cache entries allocated */
101 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
102 static u_long	numcache;		/* number of cache entries allocated */
103 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
104 static u_long	numcachehv;		/* number of cache entries with vnodes held */
105 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, "");
106 #if 0
107 static u_long	numcachepl;		/* number of cache purge for leaf entries */
108 SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, "");
109 #endif
110 struct	nchstats nchstats;		/* cache effectiveness statistics */
111 
112 static struct mtx cache_lock;
113 MTX_SYSINIT(vfscache, &cache_lock, "Name Cache", MTX_DEF);
114 
115 #define	CACHE_LOCK()	mtx_lock(&cache_lock)
116 #define	CACHE_UNLOCK()	mtx_unlock(&cache_lock)
117 
118 /*
119  * UMA zones for the VFS cache.
120  *
121  * The small cache is used for entries with short names, which are the
122  * most common.  The large cache is used for entries which are too big to
123  * fit in the small cache.
124  */
125 static uma_zone_t cache_zone_small;
126 static uma_zone_t cache_zone_large;
127 
128 #define	CACHE_PATH_CUTOFF	32
129 #define	CACHE_ZONE_SMALL	(sizeof(struct namecache) + CACHE_PATH_CUTOFF)
130 #define	CACHE_ZONE_LARGE	(sizeof(struct namecache) + NAME_MAX)
131 
132 #define cache_alloc(len)	uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
133 	cache_zone_small : cache_zone_large, M_WAITOK)
134 #define cache_free(ncp)		do { \
135 	if (ncp != NULL) \
136 		uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
137 		    cache_zone_small : cache_zone_large, (ncp)); \
138 } while (0)
139 
140 static int	doingcache = 1;		/* 1 => enable the cache */
141 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
142 
143 /* Export size information to userland */
144 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, 0,
145 	sizeof(struct namecache), "");
146 
147 /*
148  * The new name cache statistics
149  */
150 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
151 #define STATNODE(mode, name, var) \
152 	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
153 STATNODE(CTLFLAG_RD, numneg, &numneg);
154 STATNODE(CTLFLAG_RD, numcache, &numcache);
155 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
156 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
157 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
158 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
159 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
160 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
161 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
162 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
163 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
164 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
165 
166 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD, &nchstats,
167 	sizeof(nchstats), "LU", "VFS cache effectiveness statistics");
168 
169 
170 
171 static void cache_zap(struct namecache *ncp);
172 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
173     char *buf, char **retbuf, u_int buflen);
174 
175 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
176 
177 /*
178  * Flags in namecache.nc_flag
179  */
180 #define NCF_WHITE	1
181 
182 /*
183  * Grab an atomic snapshot of the name cache hash chain lengths
184  */
185 SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
186 
187 static int
188 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
189 {
190 	int error;
191 	struct nchashhead *ncpp;
192 	struct namecache *ncp;
193 	int n_nchash;
194 	int count;
195 
196 	n_nchash = nchash + 1;	/* nchash is max index, not count */
197 	if (!req->oldptr)
198 		return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
199 
200 	/* Scan hash tables for applicable entries */
201 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
202 		CACHE_LOCK();
203 		count = 0;
204 		LIST_FOREACH(ncp, ncpp, nc_hash) {
205 			count++;
206 		}
207 		CACHE_UNLOCK();
208 		error = SYSCTL_OUT(req, &count, sizeof(count));
209 		if (error)
210 			return (error);
211 	}
212 	return (0);
213 }
214 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD,
215 	0, 0, sysctl_debug_hashstat_rawnchash, "S,int", "nchash chain lengths");
216 
217 static int
218 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
219 {
220 	int error;
221 	struct nchashhead *ncpp;
222 	struct namecache *ncp;
223 	int n_nchash;
224 	int count, maxlength, used, pct;
225 
226 	if (!req->oldptr)
227 		return SYSCTL_OUT(req, 0, 4 * sizeof(int));
228 
229 	n_nchash = nchash + 1;	/* nchash is max index, not count */
230 	used = 0;
231 	maxlength = 0;
232 
233 	/* Scan hash tables for applicable entries */
234 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
235 		count = 0;
236 		CACHE_LOCK();
237 		LIST_FOREACH(ncp, ncpp, nc_hash) {
238 			count++;
239 		}
240 		CACHE_UNLOCK();
241 		if (count)
242 			used++;
243 		if (maxlength < count)
244 			maxlength = count;
245 	}
246 	n_nchash = nchash + 1;
247 	pct = (used * 100 * 100) / n_nchash;
248 	error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
249 	if (error)
250 		return (error);
251 	error = SYSCTL_OUT(req, &used, sizeof(used));
252 	if (error)
253 		return (error);
254 	error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
255 	if (error)
256 		return (error);
257 	error = SYSCTL_OUT(req, &pct, sizeof(pct));
258 	if (error)
259 		return (error);
260 	return (0);
261 }
262 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD,
263 	0, 0, sysctl_debug_hashstat_nchash, "I", "nchash chain lengths");
264 
265 /*
266  * cache_zap():
267  *
268  *   Removes a namecache entry from cache, whether it contains an actual
269  *   pointer to a vnode or if it is just a negative cache entry.
270  */
271 static void
272 cache_zap(ncp)
273 	struct namecache *ncp;
274 {
275 	struct vnode *vp;
276 
277 	mtx_assert(&cache_lock, MA_OWNED);
278 	CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
279 	vp = NULL;
280 	LIST_REMOVE(ncp, nc_hash);
281 	LIST_REMOVE(ncp, nc_src);
282 	if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
283 		vp = ncp->nc_dvp;
284 		numcachehv--;
285 	}
286 	if (ncp->nc_vp) {
287 		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
288 		ncp->nc_vp->v_dd = NULL;
289 	} else {
290 		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
291 		numneg--;
292 	}
293 	numcache--;
294 	cache_free(ncp);
295 	if (vp)
296 		vdrop(vp);
297 }
298 
299 /*
300  * Lookup an entry in the cache
301  *
302  * Lookup is called with dvp pointing to the directory to search,
303  * cnp pointing to the name of the entry being sought. If the lookup
304  * succeeds, the vnode is returned in *vpp, and a status of -1 is
305  * returned. If the lookup determines that the name does not exist
306  * (negative cacheing), a status of ENOENT is returned. If the lookup
307  * fails, a status of zero is returned.
308  *
309  * vpp is locked and ref'd on return.  If we're looking up DOTDOT, dvp is
310  * unlocked.  If we're looking up . an extra ref is taken, but the lock is
311  * not recursively acquired.
312  */
313 
314 int
315 cache_lookup(dvp, vpp, cnp)
316 	struct vnode *dvp;
317 	struct vnode **vpp;
318 	struct componentname *cnp;
319 {
320 	struct namecache *ncp;
321 	u_int32_t hash;
322 	int error, ltype;
323 
324 	if (!doingcache) {
325 		cnp->cn_flags &= ~MAKEENTRY;
326 		return (0);
327 	}
328 retry:
329 	CACHE_LOCK();
330 	numcalls++;
331 
332 	if (cnp->cn_nameptr[0] == '.') {
333 		if (cnp->cn_namelen == 1) {
334 			*vpp = dvp;
335 			CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
336 			    dvp, cnp->cn_nameptr);
337 			dothits++;
338 			goto success;
339 		}
340 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
341 			dotdothits++;
342 			if (dvp->v_dd == NULL ||
343 			    (cnp->cn_flags & MAKEENTRY) == 0) {
344 				CACHE_UNLOCK();
345 				return (0);
346 			}
347 			*vpp = dvp->v_dd;
348 			CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
349 			    dvp, cnp->cn_nameptr, *vpp);
350 			goto success;
351 		}
352 	}
353 
354 	hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
355 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
356 	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
357 		numchecks++;
358 		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
359 		    !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
360 			break;
361 	}
362 
363 	/* We failed to find an entry */
364 	if (ncp == 0) {
365 		if ((cnp->cn_flags & MAKEENTRY) == 0) {
366 			nummisszap++;
367 		} else {
368 			nummiss++;
369 		}
370 		nchstats.ncs_miss++;
371 		CACHE_UNLOCK();
372 		return (0);
373 	}
374 
375 	/* We don't want to have an entry, so dump it */
376 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
377 		numposzaps++;
378 		nchstats.ncs_badhits++;
379 		cache_zap(ncp);
380 		CACHE_UNLOCK();
381 		return (0);
382 	}
383 
384 	/* We found a "positive" match, return the vnode */
385 	if (ncp->nc_vp) {
386 		numposhits++;
387 		nchstats.ncs_goodhits++;
388 		*vpp = ncp->nc_vp;
389 		CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
390 		    dvp, cnp->cn_nameptr, *vpp, ncp);
391 		goto success;
392 	}
393 
394 	/* We found a negative match, and want to create it, so purge */
395 	if (cnp->cn_nameiop == CREATE) {
396 		numnegzaps++;
397 		nchstats.ncs_badhits++;
398 		cache_zap(ncp);
399 		CACHE_UNLOCK();
400 		return (0);
401 	}
402 
403 	numneghits++;
404 	/*
405 	 * We found a "negative" match, so we shift it to the end of
406 	 * the "negative" cache entries queue to satisfy LRU.  Also,
407 	 * check to see if the entry is a whiteout; indicate this to
408 	 * the componentname, if so.
409 	 */
410 	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
411 	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
412 	nchstats.ncs_neghits++;
413 	if (ncp->nc_flag & NCF_WHITE)
414 		cnp->cn_flags |= ISWHITEOUT;
415 	CACHE_UNLOCK();
416 	return (ENOENT);
417 
418 success:
419 	/*
420 	 * On success we return a locked and ref'd vnode as per the lookup
421 	 * protocol.
422 	 */
423 	if (dvp == *vpp) {   /* lookup on "." */
424 		VREF(*vpp);
425 		CACHE_UNLOCK();
426 		/*
427 		 * When we lookup "." we still can be asked to lock it
428 		 * differently...
429 		 */
430 		ltype = cnp->cn_lkflags & LK_TYPE_MASK;
431 		if (ltype == VOP_ISLOCKED(*vpp))
432 			return (-1);
433 		else if (ltype == LK_EXCLUSIVE)
434 			vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
435 		return (-1);
436 	}
437 	ltype = 0;	/* silence gcc warning */
438 	if (cnp->cn_flags & ISDOTDOT) {
439 		ltype = VOP_ISLOCKED(dvp);
440 		VOP_UNLOCK(dvp, 0);
441 	}
442 	VI_LOCK(*vpp);
443 	CACHE_UNLOCK();
444 	error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
445 	if (cnp->cn_flags & ISDOTDOT)
446 		vn_lock(dvp, ltype | LK_RETRY);
447 	if (error) {
448 		*vpp = NULL;
449 		goto retry;
450 	}
451 	if ((cnp->cn_flags & ISLASTCN) &&
452 	    (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
453 		ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
454 	}
455 	return (-1);
456 }
457 
458 /*
459  * Add an entry to the cache.
460  */
461 void
462 cache_enter(dvp, vp, cnp)
463 	struct vnode *dvp;
464 	struct vnode *vp;
465 	struct componentname *cnp;
466 {
467 	struct namecache *ncp;
468 	struct nchashhead *ncpp;
469 	u_int32_t hash;
470 	int hold;
471 	int zap;
472 	int len;
473 
474 	CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
475 	VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
476 	    ("cahe_enter: Adding a doomed vnode"));
477 
478 	if (!doingcache)
479 		return;
480 
481 	if (cnp->cn_nameptr[0] == '.') {
482 		if (cnp->cn_namelen == 1) {
483 			return;
484 		}
485 		/*
486 		 * For dotdot lookups only cache the v_dd pointer if the
487 		 * directory has a link back to its parent via v_cache_dst.
488 		 * Without this an unlinked directory would keep a soft
489 		 * reference to its parent which could not be NULLd at
490 		 * cache_purge() time.
491 		 */
492 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
493 			CACHE_LOCK();
494 			if (!TAILQ_EMPTY(&dvp->v_cache_dst))
495 				dvp->v_dd = vp;
496 			CACHE_UNLOCK();
497 			return;
498 		}
499 	}
500 
501 	hold = 0;
502 	zap = 0;
503 	ncp = cache_alloc(cnp->cn_namelen);
504 	CACHE_LOCK();
505 	numcache++;
506 	if (!vp) {
507 		numneg++;
508 		ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
509 	} else if (vp->v_type == VDIR) {
510 		vp->v_dd = dvp;
511 	} else {
512 		vp->v_dd = NULL;
513 	}
514 
515 	/*
516 	 * Set the rest of the namecache entry elements, calculate it's
517 	 * hash key and insert it into the appropriate chain within
518 	 * the cache entries table.
519 	 */
520 	ncp->nc_vp = vp;
521 	ncp->nc_dvp = dvp;
522 	len = ncp->nc_nlen = cnp->cn_namelen;
523 	hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
524 	bcopy(cnp->cn_nameptr, ncp->nc_name, len);
525 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
526 	ncpp = NCHHASH(hash);
527 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
528 	if (LIST_EMPTY(&dvp->v_cache_src)) {
529 		hold = 1;
530 		numcachehv++;
531 	}
532 	LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
533 	/*
534 	 * If the entry is "negative", we place it into the
535 	 * "negative" cache queue, otherwise, we place it into the
536 	 * destination vnode's cache entries queue.
537 	 */
538 	if (vp) {
539 		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
540 	} else {
541 		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
542 	}
543 	if (numneg * ncnegfactor > numcache) {
544 		ncp = TAILQ_FIRST(&ncneg);
545 		zap = 1;
546 	}
547 	if (hold)
548 		vhold(dvp);
549 	if (zap)
550 		cache_zap(ncp);
551 	CACHE_UNLOCK();
552 }
553 
554 /*
555  * Name cache initialization, from vfs_init() when we are booting
556  */
557 static void
558 nchinit(void *dummy __unused)
559 {
560 
561 	TAILQ_INIT(&ncneg);
562 
563 	cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
564 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
565 	cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
566 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
567 
568 	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
569 }
570 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
571 
572 
573 /*
574  * Invalidate all entries to a particular vnode.
575  */
576 void
577 cache_purge(vp)
578 	struct vnode *vp;
579 {
580 
581 	CTR1(KTR_VFS, "cache_purge(%p)", vp);
582 	CACHE_LOCK();
583 	while (!LIST_EMPTY(&vp->v_cache_src))
584 		cache_zap(LIST_FIRST(&vp->v_cache_src));
585 	while (!TAILQ_EMPTY(&vp->v_cache_dst))
586 		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
587 	vp->v_dd = NULL;
588 	CACHE_UNLOCK();
589 }
590 
591 /*
592  * Flush all entries referencing a particular filesystem.
593  */
594 void
595 cache_purgevfs(mp)
596 	struct mount *mp;
597 {
598 	struct nchashhead *ncpp;
599 	struct namecache *ncp, *nnp;
600 
601 	/* Scan hash tables for applicable entries */
602 	CACHE_LOCK();
603 	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
604 		LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) {
605 			if (ncp->nc_dvp->v_mount == mp)
606 				cache_zap(ncp);
607 		}
608 	}
609 	CACHE_UNLOCK();
610 }
611 
612 /*
613  * Perform canonical checks and cache lookup and pass on to filesystem
614  * through the vop_cachedlookup only if needed.
615  */
616 
617 int
618 vfs_cache_lookup(ap)
619 	struct vop_lookup_args /* {
620 		struct vnode *a_dvp;
621 		struct vnode **a_vpp;
622 		struct componentname *a_cnp;
623 	} */ *ap;
624 {
625 	struct vnode *dvp;
626 	int error;
627 	struct vnode **vpp = ap->a_vpp;
628 	struct componentname *cnp = ap->a_cnp;
629 	struct ucred *cred = cnp->cn_cred;
630 	int flags = cnp->cn_flags;
631 	struct thread *td = cnp->cn_thread;
632 
633 	*vpp = NULL;
634 	dvp = ap->a_dvp;
635 
636 	if (dvp->v_type != VDIR)
637 		return (ENOTDIR);
638 
639 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
640 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
641 		return (EROFS);
642 
643 	error = VOP_ACCESS(dvp, VEXEC, cred, td);
644 	if (error)
645 		return (error);
646 
647 	error = cache_lookup(dvp, vpp, cnp);
648 	if (error == 0)
649 		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
650 	if (error == ENOENT)
651 		return (error);
652 	return (0);
653 }
654 
655 
656 #ifndef _SYS_SYSPROTO_H_
657 struct  __getcwd_args {
658 	u_char	*buf;
659 	u_int	buflen;
660 };
661 #endif
662 
663 /*
664  * XXX All of these sysctls would probably be more productive dead.
665  */
666 static int disablecwd;
667 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
668    "Disable the getcwd syscall");
669 
670 /* Implementation of the getcwd syscall. */
671 int
672 __getcwd(td, uap)
673 	struct thread *td;
674 	struct __getcwd_args *uap;
675 {
676 
677 	return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
678 }
679 
680 int
681 kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen)
682 {
683 	char *bp, *tmpbuf;
684 	struct filedesc *fdp;
685 	int error;
686 
687 	if (disablecwd)
688 		return (ENODEV);
689 	if (buflen < 2)
690 		return (EINVAL);
691 	if (buflen > MAXPATHLEN)
692 		buflen = MAXPATHLEN;
693 
694 	tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
695 	fdp = td->td_proc->p_fd;
696 	FILEDESC_SLOCK(fdp);
697 	error = vn_fullpath1(td, fdp->fd_cdir, fdp->fd_rdir, tmpbuf,
698 	    &bp, buflen);
699 	FILEDESC_SUNLOCK(fdp);
700 
701 	if (!error) {
702 		if (bufseg == UIO_SYSSPACE)
703 			bcopy(bp, buf, strlen(bp) + 1);
704 		else
705 			error = copyout(bp, buf, strlen(bp) + 1);
706 	}
707 	free(tmpbuf, M_TEMP);
708 	return (error);
709 }
710 
711 /*
712  * Thus begins the fullpath magic.
713  */
714 
715 #undef STATNODE
716 #define STATNODE(name)							\
717 	static u_int name;						\
718 	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
719 
720 static int disablefullpath;
721 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
722 	"Disable the vn_fullpath function");
723 
724 /* These count for kern___getcwd(), too. */
725 STATNODE(numfullpathcalls);
726 STATNODE(numfullpathfail1);
727 STATNODE(numfullpathfail2);
728 STATNODE(numfullpathfail4);
729 STATNODE(numfullpathfound);
730 
731 /*
732  * Retrieve the full filesystem path that correspond to a vnode from the name
733  * cache (if available)
734  */
735 int
736 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
737 {
738 	char *buf;
739 	struct filedesc *fdp;
740 	int error;
741 
742 	if (disablefullpath)
743 		return (ENODEV);
744 	if (vn == NULL)
745 		return (EINVAL);
746 
747 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
748 	fdp = td->td_proc->p_fd;
749 	FILEDESC_SLOCK(fdp);
750 	error = vn_fullpath1(td, vn, fdp->fd_rdir, buf, retbuf, MAXPATHLEN);
751 	FILEDESC_SUNLOCK(fdp);
752 
753 	if (!error)
754 		*freebuf = buf;
755 	else
756 		free(buf, M_TEMP);
757 	return (error);
758 }
759 
760 /*
761  * This function is similar to vn_fullpath, but it attempts to lookup the
762  * pathname relative to the global root mount point.  This is required for the
763  * auditing sub-system, as audited pathnames must be absolute, relative to the
764  * global root mount point.
765  */
766 int
767 vn_fullpath_global(struct thread *td, struct vnode *vn,
768     char **retbuf, char **freebuf)
769 {
770 	char *buf;
771 	int error;
772 
773 	if (disablefullpath)
774 		return (ENODEV);
775 	if (vn == NULL)
776 		return (EINVAL);
777 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
778 	error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
779 	if (!error)
780 		*freebuf = buf;
781 	else
782 		free(buf, M_TEMP);
783 	return (error);
784 }
785 
786 /*
787  * The magic behind kern___getcwd() and vn_fullpath().
788  */
789 static int
790 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
791     char *buf, char **retbuf, u_int buflen)
792 {
793 	char *bp;
794 	int error, i, slash_prefixed;
795 	struct namecache *ncp;
796 
797 	bp = buf + buflen - 1;
798 	*bp = '\0';
799 	error = 0;
800 	slash_prefixed = 0;
801 
802 	CACHE_LOCK();
803 	numfullpathcalls++;
804 	if (vp->v_type != VDIR) {
805 		ncp = TAILQ_FIRST(&vp->v_cache_dst);
806 		if (!ncp) {
807 			numfullpathfail2++;
808 			CACHE_UNLOCK();
809 			return (ENOENT);
810 		}
811 		for (i = ncp->nc_nlen - 1; i >= 0 && bp > buf; i--)
812 			*--bp = ncp->nc_name[i];
813 		if (bp == buf) {
814 			numfullpathfail4++;
815 			CACHE_UNLOCK();
816 			return (ENOMEM);
817 		}
818 		*--bp = '/';
819 		slash_prefixed = 1;
820 		vp = ncp->nc_dvp;
821 	}
822 	while (vp != rdir && vp != rootvnode) {
823 		if (vp->v_vflag & VV_ROOT) {
824 			if (vp->v_iflag & VI_DOOMED) {	/* forced unmount */
825 				error = EBADF;
826 				break;
827 			}
828 			vp = vp->v_mount->mnt_vnodecovered;
829 			continue;
830 		}
831 		if (vp->v_dd == NULL) {
832 			numfullpathfail1++;
833 			error = ENOTDIR;
834 			break;
835 		}
836 		ncp = TAILQ_FIRST(&vp->v_cache_dst);
837 		if (!ncp) {
838 			numfullpathfail2++;
839 			error = ENOENT;
840 			break;
841 		}
842 		MPASS(ncp->nc_dvp == vp->v_dd);
843 		for (i = ncp->nc_nlen - 1; i >= 0 && bp != buf; i--)
844 			*--bp = ncp->nc_name[i];
845 		if (bp == buf) {
846 			numfullpathfail4++;
847 			error = ENOMEM;
848 			break;
849 		}
850 		*--bp = '/';
851 		slash_prefixed = 1;
852 		vp = ncp->nc_dvp;
853 	}
854 	if (error) {
855 		CACHE_UNLOCK();
856 		return (error);
857 	}
858 	if (!slash_prefixed) {
859 		if (bp == buf) {
860 			numfullpathfail4++;
861 			CACHE_UNLOCK();
862 			return (ENOMEM);
863 		} else {
864 			*--bp = '/';
865 		}
866 	}
867 	numfullpathfound++;
868 	CACHE_UNLOCK();
869 
870 	*retbuf = bp;
871 	return (0);
872 }
873 
874 int
875 vn_commname(struct vnode *vp, char *buf, u_int buflen)
876 {
877 	struct namecache *ncp;
878 	int l;
879 
880 	CACHE_LOCK();
881 	ncp = TAILQ_FIRST(&vp->v_cache_dst);
882 	if (!ncp) {
883 		CACHE_UNLOCK();
884 		return (ENOENT);
885 	}
886 	l = min(ncp->nc_nlen, buflen - 1);
887 	memcpy(buf, ncp->nc_name, l);
888 	CACHE_UNLOCK();
889 	buf[l] = '\0';
890 	return (0);
891 }
892