xref: /freebsd/sys/kern/vfs_cache.c (revision 7afc53b8dfcc7d5897920ce6cc7e842fbb4ab813)
1 /*-
2  * Copyright (c) 1989, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Poul-Henning Kamp of the FreeBSD Project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/sysctl.h>
44 #include <sys/mount.h>
45 #include <sys/vnode.h>
46 #include <sys/namei.h>
47 #include <sys/malloc.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/sysproto.h>
50 #include <sys/proc.h>
51 #include <sys/filedesc.h>
52 #include <sys/fnv_hash.h>
53 
54 #include <vm/uma.h>
55 
56 /*
57  * This structure describes the elements in the cache of recent
58  * names looked up by namei.
59  */
60 
61 struct	namecache {
62 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
63 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
64 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
65 	struct	vnode *nc_dvp;		/* vnode of parent of name */
66 	struct	vnode *nc_vp;		/* vnode the name refers to */
67 	u_char	nc_flag;		/* flag bits */
68 	u_char	nc_nlen;		/* length of name */
69 	char	nc_name[0];		/* segment name */
70 };
71 
72 /*
73  * Name caching works as follows:
74  *
75  * Names found by directory scans are retained in a cache
76  * for future reference.  It is managed LRU, so frequently
77  * used names will hang around.  Cache is indexed by hash value
78  * obtained from (vp, name) where vp refers to the directory
79  * containing name.
80  *
81  * If it is a "negative" entry, (i.e. for a name that is known NOT to
82  * exist) the vnode pointer will be NULL.
83  *
84  * Upon reaching the last segment of a path, if the reference
85  * is for DELETE, or NOCACHE is set (rewrite), and the
86  * name is located in the cache, it will be dropped.
87  */
88 
89 /*
90  * Structures associated with name cacheing.
91  */
92 #define NCHHASH(hash) \
93 	(&nchashtbl[(hash) & nchash])
94 static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
95 static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
96 static u_long	nchash;			/* size of hash table */
97 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
98 static u_long	ncnegfactor = 16;	/* ratio of negative entries */
99 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
100 static u_long	numneg;			/* number of cache entries allocated */
101 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
102 static u_long	numcache;		/* number of cache entries allocated */
103 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
104 static u_long	numcachehv;		/* number of cache entries with vnodes held */
105 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, "");
106 #if 0
107 static u_long	numcachepl;		/* number of cache purge for leaf entries */
108 SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, "");
109 #endif
110 struct	nchstats nchstats;		/* cache effectiveness statistics */
111 
112 static struct mtx cache_lock;
113 MTX_SYSINIT(vfscache, &cache_lock, "Name Cache", MTX_DEF);
114 
115 #define	CACHE_LOCK()	mtx_lock(&cache_lock)
116 #define	CACHE_UNLOCK()	mtx_unlock(&cache_lock)
117 
118 /*
119  * UMA zones for the VFS cache.
120  *
121  * The small cache is used for entries with short names, which are the
122  * most common.  The large cache is used for entries which are too big to
123  * fit in the small cache.
124  */
125 static uma_zone_t cache_zone_small;
126 static uma_zone_t cache_zone_large;
127 
128 #define	CACHE_PATH_CUTOFF	32
129 #define	CACHE_ZONE_SMALL	(sizeof(struct namecache) + CACHE_PATH_CUTOFF)
130 #define	CACHE_ZONE_LARGE	(sizeof(struct namecache) + NAME_MAX)
131 
132 #define cache_alloc(len)	uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
133 	cache_zone_small : cache_zone_large, M_WAITOK)
134 #define cache_free(ncp)		do { \
135 	if (ncp != NULL) \
136 		uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
137 		    cache_zone_small : cache_zone_large, (ncp)); \
138 } while (0)
139 
140 static int	doingcache = 1;		/* 1 => enable the cache */
141 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
142 
143 /* Export size information to userland */
144 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
145 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
146 
147 /*
148  * The new name cache statistics
149  */
150 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
151 #define STATNODE(mode, name, var) \
152 	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
153 STATNODE(CTLFLAG_RD, numneg, &numneg);
154 STATNODE(CTLFLAG_RD, numcache, &numcache);
155 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
156 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
157 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
158 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
159 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
160 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
161 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
162 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
163 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
164 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
165 
166 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD, &nchstats,
167 	sizeof(nchstats), "LU", "VFS cache effectiveness statistics");
168 
169 
170 
171 static void cache_zap(struct namecache *ncp);
172 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
173     char *buf, char **retbuf, u_int buflen);
174 
175 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
176 
177 /*
178  * Flags in namecache.nc_flag
179  */
180 #define NCF_WHITE	1
181 
182 /*
183  * Grab an atomic snapshot of the name cache hash chain lengths
184  */
185 SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
186 
187 static int
188 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
189 {
190 	int error;
191 	struct nchashhead *ncpp;
192 	struct namecache *ncp;
193 	int n_nchash;
194 	int count;
195 
196 	n_nchash = nchash + 1;	/* nchash is max index, not count */
197 	if (!req->oldptr)
198 		return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
199 
200 	/* Scan hash tables for applicable entries */
201 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
202 		count = 0;
203 		LIST_FOREACH(ncp, ncpp, nc_hash) {
204 			count++;
205 		}
206 		error = SYSCTL_OUT(req, &count, sizeof(count));
207 		if (error)
208 			return (error);
209 	}
210 	return (0);
211 }
212 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD,
213 	0, 0, sysctl_debug_hashstat_rawnchash, "S,int", "nchash chain lengths");
214 
215 static int
216 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
217 {
218 	int error;
219 	struct nchashhead *ncpp;
220 	struct namecache *ncp;
221 	int n_nchash;
222 	int count, maxlength, used, pct;
223 
224 	if (!req->oldptr)
225 		return SYSCTL_OUT(req, 0, 4 * sizeof(int));
226 
227 	n_nchash = nchash + 1;	/* nchash is max index, not count */
228 	used = 0;
229 	maxlength = 0;
230 
231 	/* Scan hash tables for applicable entries */
232 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
233 		count = 0;
234 		LIST_FOREACH(ncp, ncpp, nc_hash) {
235 			count++;
236 		}
237 		if (count)
238 			used++;
239 		if (maxlength < count)
240 			maxlength = count;
241 	}
242 	n_nchash = nchash + 1;
243 	pct = (used * 100 * 100) / n_nchash;
244 	error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
245 	if (error)
246 		return (error);
247 	error = SYSCTL_OUT(req, &used, sizeof(used));
248 	if (error)
249 		return (error);
250 	error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
251 	if (error)
252 		return (error);
253 	error = SYSCTL_OUT(req, &pct, sizeof(pct));
254 	if (error)
255 		return (error);
256 	return (0);
257 }
258 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD,
259 	0, 0, sysctl_debug_hashstat_nchash, "I", "nchash chain lengths");
260 
261 /*
262  * cache_zap():
263  *
264  *   Removes a namecache entry from cache, whether it contains an actual
265  *   pointer to a vnode or if it is just a negative cache entry.
266  */
267 static void
268 cache_zap(ncp)
269 	struct namecache *ncp;
270 {
271 	struct vnode *vp;
272 
273 	mtx_assert(&cache_lock, MA_OWNED);
274 	vp = NULL;
275 	LIST_REMOVE(ncp, nc_hash);
276 	LIST_REMOVE(ncp, nc_src);
277 	if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
278 		vp = ncp->nc_dvp;
279 		numcachehv--;
280 	}
281 	if (ncp->nc_vp) {
282 		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
283 	} else {
284 		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
285 		numneg--;
286 	}
287 	numcache--;
288 	cache_free(ncp);
289 	if (vp)
290 		vdrop(vp);
291 }
292 
293 /*
294  * cache_leaf_test()
295  *
296  *      Test whether this (directory) vnode's namei cache entry contains
297  *      subdirectories or not.  Used to determine whether the directory is
298  *      a leaf in the namei cache or not.  Note: the directory may still
299  *      contain files in the namei cache.
300  *
301  *      Returns 0 if the directory is a leaf, -1 if it isn't.
302  */
303 int
304 cache_leaf_test(struct vnode *vp)
305 {
306 	struct namecache *ncpc;
307 	int leaf;
308 
309 	leaf = 0;
310 	CACHE_LOCK();
311 	for (ncpc = LIST_FIRST(&vp->v_cache_src);
312 	     ncpc != NULL;
313 	     ncpc = LIST_NEXT(ncpc, nc_src)
314 	 ) {
315 		if (ncpc->nc_vp != NULL && ncpc->nc_vp->v_type == VDIR) {
316 			leaf = -1;
317 			break;
318 		}
319 	}
320 	CACHE_UNLOCK();
321 	return (leaf);
322 }
323 
324 /*
325  * Lookup an entry in the cache
326  *
327  * Lookup is called with dvp pointing to the directory to search,
328  * cnp pointing to the name of the entry being sought. If the lookup
329  * succeeds, the vnode is returned in *vpp, and a status of -1 is
330  * returned. If the lookup determines that the name does not exist
331  * (negative cacheing), a status of ENOENT is returned. If the lookup
332  * fails, a status of zero is returned.
333  *
334  * vpp is locked and ref'd on return.  If we're looking up DOTDOT, dvp is
335  * unlocked.  If we're looking up . an extra ref is taken, but the lock is
336  * not recursively acquired.
337  */
338 
339 int
340 cache_lookup(dvp, vpp, cnp)
341 	struct vnode *dvp;
342 	struct vnode **vpp;
343 	struct componentname *cnp;
344 {
345 	struct namecache *ncp;
346 	u_int32_t hash;
347 	int error;
348 
349 	if (!doingcache) {
350 		cnp->cn_flags &= ~MAKEENTRY;
351 		return (0);
352 	}
353 retry:
354 	CACHE_LOCK();
355 	numcalls++;
356 
357 	if (cnp->cn_nameptr[0] == '.') {
358 		if (cnp->cn_namelen == 1) {
359 			*vpp = dvp;
360 			dothits++;
361 			goto success;
362 		}
363 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
364 			dotdothits++;
365 			if (dvp->v_dd == NULL ||
366 			    (cnp->cn_flags & MAKEENTRY) == 0) {
367 				CACHE_UNLOCK();
368 				return (0);
369 			}
370 			*vpp = dvp->v_dd;
371 			goto success;
372 		}
373 	}
374 
375 	hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
376 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
377 	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
378 		numchecks++;
379 		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
380 		    !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
381 			break;
382 	}
383 
384 	/* We failed to find an entry */
385 	if (ncp == 0) {
386 		if ((cnp->cn_flags & MAKEENTRY) == 0) {
387 			nummisszap++;
388 		} else {
389 			nummiss++;
390 		}
391 		nchstats.ncs_miss++;
392 		CACHE_UNLOCK();
393 		return (0);
394 	}
395 
396 	/* We don't want to have an entry, so dump it */
397 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
398 		numposzaps++;
399 		nchstats.ncs_badhits++;
400 		cache_zap(ncp);
401 		CACHE_UNLOCK();
402 		return (0);
403 	}
404 
405 	/* We found a "positive" match, return the vnode */
406 	if (ncp->nc_vp) {
407 		numposhits++;
408 		nchstats.ncs_goodhits++;
409 		*vpp = ncp->nc_vp;
410 		goto success;
411 	}
412 
413 	/* We found a negative match, and want to create it, so purge */
414 	if (cnp->cn_nameiop == CREATE) {
415 		numnegzaps++;
416 		nchstats.ncs_badhits++;
417 		cache_zap(ncp);
418 		CACHE_UNLOCK();
419 		return (0);
420 	}
421 
422 	numneghits++;
423 	/*
424 	 * We found a "negative" match, so we shift it to the end of
425 	 * the "negative" cache entries queue to satisfy LRU.  Also,
426 	 * check to see if the entry is a whiteout; indicate this to
427 	 * the componentname, if so.
428 	 */
429 	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
430 	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
431 	nchstats.ncs_neghits++;
432 	if (ncp->nc_flag & NCF_WHITE)
433 		cnp->cn_flags |= ISWHITEOUT;
434 	CACHE_UNLOCK();
435 	return (ENOENT);
436 
437 success:
438 	/*
439 	 * On success we return a locked and ref'd vnode as per the lookup
440 	 * protocol.
441 	 */
442 	if (dvp == *vpp) {   /* lookup on "." */
443 		VREF(*vpp);
444 		CACHE_UNLOCK();
445 		return (-1);
446 	}
447 	if (cnp->cn_flags & ISDOTDOT)
448 		VOP_UNLOCK(dvp, 0, cnp->cn_thread);
449 	VI_LOCK(*vpp);
450 	CACHE_UNLOCK();
451 	error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
452 	if (cnp->cn_flags & ISDOTDOT)
453 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
454 	if (error) {
455 		*vpp = NULL;
456 		goto retry;
457 	}
458 	return (-1);
459 }
460 
461 /*
462  * Add an entry to the cache.
463  */
464 void
465 cache_enter(dvp, vp, cnp)
466 	struct vnode *dvp;
467 	struct vnode *vp;
468 	struct componentname *cnp;
469 {
470 	struct namecache *ncp;
471 	struct nchashhead *ncpp;
472 	u_int32_t hash;
473 	int hold;
474 	int zap;
475 	int len;
476 
477 	if (!doingcache)
478 		return;
479 
480 	if (cnp->cn_nameptr[0] == '.') {
481 		if (cnp->cn_namelen == 1) {
482 			return;
483 		}
484 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
485 			dvp->v_dd = vp;
486 			return;
487 		}
488 	}
489 
490 	hold = 0;
491 	zap = 0;
492 	ncp = cache_alloc(cnp->cn_namelen);
493 	CACHE_LOCK();
494 	numcache++;
495 	if (!vp) {
496 		numneg++;
497 		ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
498 	} else if (vp->v_type == VDIR) {
499 		vp->v_dd = dvp;
500 	} else {
501 		vp->v_dd = NULL;
502 	}
503 
504 	/*
505 	 * Set the rest of the namecache entry elements, calculate it's
506 	 * hash key and insert it into the appropriate chain within
507 	 * the cache entries table.
508 	 */
509 	ncp->nc_vp = vp;
510 	ncp->nc_dvp = dvp;
511 	len = ncp->nc_nlen = cnp->cn_namelen;
512 	hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
513 	bcopy(cnp->cn_nameptr, ncp->nc_name, len);
514 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
515 	ncpp = NCHHASH(hash);
516 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
517 	if (LIST_EMPTY(&dvp->v_cache_src)) {
518 		hold = 1;
519 		numcachehv++;
520 	}
521 	LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
522 	/*
523 	 * If the entry is "negative", we place it into the
524 	 * "negative" cache queue, otherwise, we place it into the
525 	 * destination vnode's cache entries queue.
526 	 */
527 	if (vp) {
528 		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
529 	} else {
530 		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
531 	}
532 	if (numneg * ncnegfactor > numcache) {
533 		ncp = TAILQ_FIRST(&ncneg);
534 		zap = 1;
535 	}
536 	if (hold)
537 		vhold(dvp);
538 	if (zap)
539 		cache_zap(ncp);
540 	CACHE_UNLOCK();
541 }
542 
543 /*
544  * Name cache initialization, from vfs_init() when we are booting
545  */
546 static void
547 nchinit(void *dummy __unused)
548 {
549 
550 	TAILQ_INIT(&ncneg);
551 
552 	cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
553 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
554 	cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
555 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
556 
557 	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
558 }
559 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL)
560 
561 
562 /*
563  * Invalidate all entries to a particular vnode.
564  */
565 void
566 cache_purge(vp)
567 	struct vnode *vp;
568 {
569 	struct namecache *ncp;
570 
571 	CACHE_LOCK();
572 	while (!LIST_EMPTY(&vp->v_cache_src)) {
573 		struct vnode *cvp;
574 
575 		ncp = LIST_FIRST(&vp->v_cache_src);
576 		/*
577 		 * We must reset v_dd of any children so they don't
578 		 * continue to point to us.
579 		 */
580 		if ((cvp = ncp->nc_vp) && cvp->v_dd == vp)
581 			cvp->v_dd = NULL;
582 		cache_zap(ncp);
583 	}
584 	while (!TAILQ_EMPTY(&vp->v_cache_dst))
585 		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
586 	vp->v_dd = NULL;
587 	CACHE_UNLOCK();
588 }
589 
590 /*
591  * Flush all entries referencing a particular filesystem.
592  *
593  * Since we need to check it anyway, we will flush all the invalid
594  * entries at the same time.
595  */
596 void
597 cache_purgevfs(mp)
598 	struct mount *mp;
599 {
600 	struct nchashhead *ncpp;
601 	struct namecache *ncp, *nnp;
602 	struct nchashhead mplist;
603 
604 	LIST_INIT(&mplist);
605 	ncp = NULL;
606 
607 	/* Scan hash tables for applicable entries */
608 	CACHE_LOCK();
609 	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
610 		for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) {
611 			nnp = LIST_NEXT(ncp, nc_hash);
612 			if (ncp->nc_dvp->v_mount == mp) {
613 				LIST_REMOVE(ncp, nc_hash);
614 				LIST_INSERT_HEAD(&mplist, ncp, nc_hash);
615 			}
616 		}
617 	}
618 	while (!LIST_EMPTY(&mplist))
619 		cache_zap(LIST_FIRST(&mplist));
620 	CACHE_UNLOCK();
621 }
622 
623 /*
624  * Perform canonical checks and cache lookup and pass on to filesystem
625  * through the vop_cachedlookup only if needed.
626  */
627 
628 int
629 vfs_cache_lookup(ap)
630 	struct vop_lookup_args /* {
631 		struct vnode *a_dvp;
632 		struct vnode **a_vpp;
633 		struct componentname *a_cnp;
634 	} */ *ap;
635 {
636 	struct vnode *dvp;
637 	int error;
638 	struct vnode **vpp = ap->a_vpp;
639 	struct componentname *cnp = ap->a_cnp;
640 	struct ucred *cred = cnp->cn_cred;
641 	int flags = cnp->cn_flags;
642 	struct thread *td = cnp->cn_thread;
643 
644 	*vpp = NULL;
645 	dvp = ap->a_dvp;
646 
647 	if (dvp->v_type != VDIR)
648 		return (ENOTDIR);
649 
650 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
651 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
652 		return (EROFS);
653 
654 	error = VOP_ACCESS(dvp, VEXEC, cred, td);
655 	if (error)
656 		return (error);
657 
658 	error = cache_lookup(dvp, vpp, cnp);
659 	if (error == 0)
660 		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
661 	if (error == ENOENT)
662 		return (error);
663 	return (0);
664 }
665 
666 
667 #ifndef _SYS_SYSPROTO_H_
668 struct  __getcwd_args {
669 	u_char	*buf;
670 	u_int	buflen;
671 };
672 #endif
673 
674 /*
675  * XXX All of these sysctls would probably be more productive dead.
676  */
677 static int disablecwd;
678 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
679    "Disable the getcwd syscall");
680 
681 /* Implementation of the getcwd syscall */
682 int
683 __getcwd(td, uap)
684 	struct thread *td;
685 	struct __getcwd_args *uap;
686 {
687 
688 	return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
689 }
690 
691 int
692 kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen)
693 {
694 	char *bp, *tmpbuf;
695 	struct filedesc *fdp;
696 	int error;
697 
698 	if (disablecwd)
699 		return (ENODEV);
700 	if (buflen < 2)
701 		return (EINVAL);
702 	if (buflen > MAXPATHLEN)
703 		buflen = MAXPATHLEN;
704 
705 	tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
706 	fdp = td->td_proc->p_fd;
707 	mtx_lock(&Giant);
708 	FILEDESC_LOCK(fdp);
709 	error = vn_fullpath1(td, fdp->fd_cdir, fdp->fd_rdir, tmpbuf,
710 	    &bp, buflen);
711 	FILEDESC_UNLOCK(fdp);
712 	mtx_unlock(&Giant);
713 
714 	if (!error) {
715 		if (bufseg == UIO_SYSSPACE)
716 			bcopy(bp, buf, strlen(bp) + 1);
717 		else
718 			error = copyout(bp, buf, strlen(bp) + 1);
719 	}
720 	free(tmpbuf, M_TEMP);
721 	return (error);
722 }
723 
724 /*
725  * Thus begins the fullpath magic.
726  */
727 
728 #undef STATNODE
729 #define STATNODE(name)							\
730 	static u_int name;						\
731 	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
732 
733 static int disablefullpath;
734 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
735 	"Disable the vn_fullpath function");
736 
737 /* These count for kern___getcwd(), too. */
738 STATNODE(numfullpathcalls);
739 STATNODE(numfullpathfail1);
740 STATNODE(numfullpathfail2);
741 STATNODE(numfullpathfail4);
742 STATNODE(numfullpathfound);
743 
744 /*
745  * Retrieve the full filesystem path that correspond to a vnode from the name
746  * cache (if available)
747  */
748 int
749 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
750 {
751 	char *buf;
752 	struct filedesc *fdp;
753 	int error;
754 
755 	if (disablefullpath)
756 		return (ENODEV);
757 	if (vn == NULL)
758 		return (EINVAL);
759 
760 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
761 	fdp = td->td_proc->p_fd;
762 	mtx_lock(&Giant);
763 	FILEDESC_LOCK(fdp);
764 	error = vn_fullpath1(td, vn, fdp->fd_rdir, buf, retbuf, MAXPATHLEN);
765 	FILEDESC_UNLOCK(fdp);
766 	mtx_unlock(&Giant);
767 
768 	if (!error)
769 		*freebuf = buf;
770 	else
771 		free(buf, M_TEMP);
772 	return (error);
773 }
774 
775 /*
776  * The magic behind kern___getcwd() and vn_fullpath().
777  */
778 static int
779 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
780     char *buf, char **retbuf, u_int buflen)
781 {
782 	char *bp;
783 	int error, i, slash_prefixed;
784 	struct namecache *ncp;
785 
786 	mtx_assert(&Giant, MA_OWNED);
787 
788 	bp = buf + buflen - 1;
789 	*bp = '\0';
790 	error = 0;
791 	slash_prefixed = 0;
792 
793 	CACHE_LOCK();
794 	numfullpathcalls++;
795 	if (vp->v_type != VDIR) {
796 		ncp = TAILQ_FIRST(&vp->v_cache_dst);
797 		if (!ncp) {
798 			numfullpathfail2++;
799 			CACHE_UNLOCK();
800 			return (ENOENT);
801 		}
802 		for (i = ncp->nc_nlen - 1; i >= 0 && bp > buf; i--)
803 			*--bp = ncp->nc_name[i];
804 		if (bp == buf) {
805 			numfullpathfail4++;
806 			CACHE_UNLOCK();
807 			return (ENOMEM);
808 		}
809 		*--bp = '/';
810 		slash_prefixed = 1;
811 		vp = ncp->nc_dvp;
812 	}
813 	while (vp != rdir && vp != rootvnode) {
814 		if (vp->v_vflag & VV_ROOT) {
815 			if (vp->v_mount == NULL) {	/* forced unmount */
816 				error = EBADF;
817 				break;
818 			}
819 			vp = vp->v_mount->mnt_vnodecovered;
820 			continue;
821 		}
822 		if (vp->v_dd == NULL) {
823 			numfullpathfail1++;
824 			error = ENOTDIR;
825 			break;
826 		}
827 		ncp = TAILQ_FIRST(&vp->v_cache_dst);
828 		if (!ncp) {
829 			numfullpathfail2++;
830 			error = ENOENT;
831 			break;
832 		}
833 		MPASS(ncp->nc_dvp == vp->v_dd);
834 		for (i = ncp->nc_nlen - 1; i >= 0 && bp != buf; i--)
835 			*--bp = ncp->nc_name[i];
836 		if (bp == buf) {
837 			numfullpathfail4++;
838 			error = ENOMEM;
839 			break;
840 		}
841 		*--bp = '/';
842 		slash_prefixed = 1;
843 		vp = ncp->nc_dvp;
844 	}
845 	if (error) {
846 		CACHE_UNLOCK();
847 		return (error);
848 	}
849 	if (!slash_prefixed) {
850 		if (bp == buf) {
851 			numfullpathfail4++;
852 			CACHE_UNLOCK();
853 			return (ENOMEM);
854 		} else {
855 			*--bp = '/';
856 		}
857 	}
858 	numfullpathfound++;
859 	CACHE_UNLOCK();
860 
861 	*retbuf = bp;
862 	return (0);
863 }
864