xref: /freebsd/sys/kern/vfs_cache.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 1989, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Poul-Henning Kamp of the FreeBSD Project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_kdtrace.h"
39 #include "opt_ktrace.h"
40 
41 #include <sys/param.h>
42 #include <sys/filedesc.h>
43 #include <sys/fnv_hash.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/proc.h>
50 #include <sys/rwlock.h>
51 #include <sys/sdt.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysproto.h>
55 #include <sys/systm.h>
56 #include <sys/vnode.h>
57 #ifdef KTRACE
58 #include <sys/ktrace.h>
59 #endif
60 
61 #include <vm/uma.h>
62 
63 SDT_PROVIDER_DECLARE(vfs);
64 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, done, "struct vnode *", "char *",
65     "struct vnode *");
66 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, done, "struct vnode *",
67     "char *");
68 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, entry, "struct vnode *");
69 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, hit, "struct vnode *",
70     "struct char *", "struct vnode *");
71 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, miss, "struct vnode *");
72 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, return, "int",
73     "struct vnode *", "struct char *");
74 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, hit, "struct vnode *", "char *",
75     "struct vnode *");
76 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit_negative, hit-negative,
77     "struct vnode *", "char *");
78 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, miss, "struct vnode *",
79     "char *");
80 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, done, "struct vnode *");
81 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, done, "struct vnode *");
82 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, done, "struct mount *");
83 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, done, "struct vnode *", "char *",
84     "struct vnode *");
85 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, done, "struct vnode *",
86     "char *");
87 
88 /*
89  * This structure describes the elements in the cache of recent
90  * names looked up by namei.
91  */
92 
93 struct	namecache {
94 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
95 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
96 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
97 	struct	vnode *nc_dvp;		/* vnode of parent of name */
98 	struct	vnode *nc_vp;		/* vnode the name refers to */
99 	u_char	nc_flag;		/* flag bits */
100 	u_char	nc_nlen;		/* length of name */
101 	char	nc_name[0];		/* segment name + nul */
102 };
103 
104 /*
105  * Name caching works as follows:
106  *
107  * Names found by directory scans are retained in a cache
108  * for future reference.  It is managed LRU, so frequently
109  * used names will hang around.  Cache is indexed by hash value
110  * obtained from (vp, name) where vp refers to the directory
111  * containing name.
112  *
113  * If it is a "negative" entry, (i.e. for a name that is known NOT to
114  * exist) the vnode pointer will be NULL.
115  *
116  * Upon reaching the last segment of a path, if the reference
117  * is for DELETE, or NOCACHE is set (rewrite), and the
118  * name is located in the cache, it will be dropped.
119  */
120 
121 /*
122  * Structures associated with name cacheing.
123  */
124 #define NCHHASH(hash) \
125 	(&nchashtbl[(hash) & nchash])
126 static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
127 static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
128 static u_long	nchash;			/* size of hash table */
129 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
130     "Size of namecache hash table");
131 static u_long	ncnegfactor = 16;	/* ratio of negative entries */
132 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
133     "Ratio of negative namecache entries");
134 static u_long	numneg;			/* number of negative entries allocated */
135 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
136     "Number of negative entries in namecache");
137 static u_long	numcache;		/* number of cache entries allocated */
138 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
139     "Number of namecache entries");
140 static u_long	numcachehv;		/* number of cache entries with vnodes held */
141 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0,
142     "Number of namecache entries with vnodes held");
143 static u_int	ncsizefactor = 2;
144 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
145     "Size factor for namecache");
146 
147 struct nchstats	nchstats;		/* cache effectiveness statistics */
148 
149 static struct rwlock cache_lock;
150 RW_SYSINIT(vfscache, &cache_lock, "Name Cache");
151 
152 #define	CACHE_UPGRADE_LOCK()	rw_try_upgrade(&cache_lock)
153 #define	CACHE_RLOCK()		rw_rlock(&cache_lock)
154 #define	CACHE_RUNLOCK()		rw_runlock(&cache_lock)
155 #define	CACHE_WLOCK()		rw_wlock(&cache_lock)
156 #define	CACHE_WUNLOCK()		rw_wunlock(&cache_lock)
157 
158 /*
159  * UMA zones for the VFS cache.
160  *
161  * The small cache is used for entries with short names, which are the
162  * most common.  The large cache is used for entries which are too big to
163  * fit in the small cache.
164  */
165 static uma_zone_t cache_zone_small;
166 static uma_zone_t cache_zone_large;
167 
168 #define	CACHE_PATH_CUTOFF	35
169 #define	CACHE_ZONE_SMALL	(sizeof(struct namecache) + CACHE_PATH_CUTOFF \
170 				    + 1)
171 #define	CACHE_ZONE_LARGE	(sizeof(struct namecache) + NAME_MAX + 1)
172 
173 #define cache_alloc(len)	uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
174 	cache_zone_small : cache_zone_large, M_WAITOK)
175 #define cache_free(ncp)		do { \
176 	if (ncp != NULL) \
177 		uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
178 		    cache_zone_small : cache_zone_large, (ncp)); \
179 } while (0)
180 
181 static int	doingcache = 1;		/* 1 => enable the cache */
182 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
183     "VFS namecache enabled");
184 
185 /* Export size information to userland */
186 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, 0,
187     sizeof(struct namecache), "sizeof(struct namecache)");
188 
189 /*
190  * The new name cache statistics
191  */
192 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0,
193     "Name cache statistics");
194 #define STATNODE(mode, name, var, descr) \
195 	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, descr);
196 STATNODE(CTLFLAG_RD, numneg, &numneg, "Number of negative cache entries");
197 STATNODE(CTLFLAG_RD, numcache, &numcache, "Number of cache entries");
198 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls,
199     "Number of cache lookups");
200 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits,
201     "Number of '.' hits");
202 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits,
203     "Number of '..' hits");
204 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks,
205     "Number of checks in lookup");
206 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss,
207     "Number of cache misses");
208 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap,
209     "Number of cache misses we do not want to cache");
210 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps,
211     "Number of cache hits (positive) we do not want to cache");
212 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits,
213     "Number of cache hits (positive)");
214 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps,
215     "Number of cache hits (negative) we do not want to cache");
216 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits,
217     "Number of cache hits (negative)");
218 static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades,
219     "Number of updates of the cache after lookup (write lock + retry)");
220 
221 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE,
222     &nchstats, sizeof(nchstats), "LU",
223     "VFS cache effectiveness statistics");
224 
225 
226 
227 static void cache_zap(struct namecache *ncp);
228 static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
229     u_int *buflen);
230 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
231     char *buf, char **retbuf, u_int buflen);
232 
233 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
234 
235 /*
236  * Flags in namecache.nc_flag
237  */
238 #define NCF_WHITE	0x01
239 #define NCF_ISDOTDOT	0x02
240 
241 #ifdef DIAGNOSTIC
242 /*
243  * Grab an atomic snapshot of the name cache hash chain lengths
244  */
245 SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
246 
247 static int
248 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
249 {
250 	int error;
251 	struct nchashhead *ncpp;
252 	struct namecache *ncp;
253 	int n_nchash;
254 	int count;
255 
256 	n_nchash = nchash + 1;	/* nchash is max index, not count */
257 	if (!req->oldptr)
258 		return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
259 
260 	/* Scan hash tables for applicable entries */
261 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
262 		CACHE_RLOCK();
263 		count = 0;
264 		LIST_FOREACH(ncp, ncpp, nc_hash) {
265 			count++;
266 		}
267 		CACHE_RUNLOCK();
268 		error = SYSCTL_OUT(req, &count, sizeof(count));
269 		if (error)
270 			return (error);
271 	}
272 	return (0);
273 }
274 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
275     CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
276     "nchash chain lengths");
277 
278 static int
279 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
280 {
281 	int error;
282 	struct nchashhead *ncpp;
283 	struct namecache *ncp;
284 	int n_nchash;
285 	int count, maxlength, used, pct;
286 
287 	if (!req->oldptr)
288 		return SYSCTL_OUT(req, 0, 4 * sizeof(int));
289 
290 	n_nchash = nchash + 1;	/* nchash is max index, not count */
291 	used = 0;
292 	maxlength = 0;
293 
294 	/* Scan hash tables for applicable entries */
295 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
296 		count = 0;
297 		CACHE_RLOCK();
298 		LIST_FOREACH(ncp, ncpp, nc_hash) {
299 			count++;
300 		}
301 		CACHE_RUNLOCK();
302 		if (count)
303 			used++;
304 		if (maxlength < count)
305 			maxlength = count;
306 	}
307 	n_nchash = nchash + 1;
308 	pct = (used * 100 * 100) / n_nchash;
309 	error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
310 	if (error)
311 		return (error);
312 	error = SYSCTL_OUT(req, &used, sizeof(used));
313 	if (error)
314 		return (error);
315 	error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
316 	if (error)
317 		return (error);
318 	error = SYSCTL_OUT(req, &pct, sizeof(pct));
319 	if (error)
320 		return (error);
321 	return (0);
322 }
323 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
324     CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
325     "nchash chain lengths");
326 #endif
327 
328 /*
329  * cache_zap():
330  *
331  *   Removes a namecache entry from cache, whether it contains an actual
332  *   pointer to a vnode or if it is just a negative cache entry.
333  */
334 static void
335 cache_zap(ncp)
336 	struct namecache *ncp;
337 {
338 	struct vnode *vp;
339 
340 	rw_assert(&cache_lock, RA_WLOCKED);
341 	CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
342 #ifdef KDTRACE_HOOKS
343 	if (ncp->nc_vp != NULL) {
344 		SDT_PROBE(vfs, namecache, zap, done, ncp->nc_dvp,
345 		    ncp->nc_name, ncp->nc_vp, 0, 0);
346 	} else {
347 		SDT_PROBE(vfs, namecache, zap_negative, done, ncp->nc_dvp,
348 		    ncp->nc_name, 0, 0, 0);
349 	}
350 #endif
351 	vp = NULL;
352 	LIST_REMOVE(ncp, nc_hash);
353 	if (ncp->nc_flag & NCF_ISDOTDOT) {
354 		if (ncp == ncp->nc_dvp->v_cache_dd)
355 			ncp->nc_dvp->v_cache_dd = NULL;
356 	} else {
357 		LIST_REMOVE(ncp, nc_src);
358 		if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
359 			vp = ncp->nc_dvp;
360 			numcachehv--;
361 		}
362 	}
363 	if (ncp->nc_vp) {
364 		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
365 		if (ncp == ncp->nc_vp->v_cache_dd)
366 			ncp->nc_vp->v_cache_dd = NULL;
367 	} else {
368 		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
369 		numneg--;
370 	}
371 	numcache--;
372 	cache_free(ncp);
373 	if (vp)
374 		vdrop(vp);
375 }
376 
377 /*
378  * Lookup an entry in the cache
379  *
380  * Lookup is called with dvp pointing to the directory to search,
381  * cnp pointing to the name of the entry being sought. If the lookup
382  * succeeds, the vnode is returned in *vpp, and a status of -1 is
383  * returned. If the lookup determines that the name does not exist
384  * (negative cacheing), a status of ENOENT is returned. If the lookup
385  * fails, a status of zero is returned.  If the directory vnode is
386  * recycled out from under us due to a forced unmount, a status of
387  * ENOENT is returned.
388  *
389  * vpp is locked and ref'd on return.  If we're looking up DOTDOT, dvp is
390  * unlocked.  If we're looking up . an extra ref is taken, but the lock is
391  * not recursively acquired.
392  */
393 
394 int
395 cache_lookup(dvp, vpp, cnp)
396 	struct vnode *dvp;
397 	struct vnode **vpp;
398 	struct componentname *cnp;
399 {
400 	struct namecache *ncp;
401 	uint32_t hash;
402 	int error, ltype, wlocked;
403 
404 	if (!doingcache) {
405 		cnp->cn_flags &= ~MAKEENTRY;
406 		return (0);
407 	}
408 retry:
409 	CACHE_RLOCK();
410 	wlocked = 0;
411 	numcalls++;
412 	error = 0;
413 
414 retry_wlocked:
415 	if (cnp->cn_nameptr[0] == '.') {
416 		if (cnp->cn_namelen == 1) {
417 			*vpp = dvp;
418 			CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
419 			    dvp, cnp->cn_nameptr);
420 			dothits++;
421 			SDT_PROBE(vfs, namecache, lookup, hit, dvp, ".",
422 			    *vpp, 0, 0);
423 			goto success;
424 		}
425 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
426 			dotdothits++;
427 			if (dvp->v_cache_dd == NULL) {
428 				SDT_PROBE(vfs, namecache, lookup, miss, dvp,
429 				    "..", NULL, 0, 0);
430 				goto unlock;
431 			}
432 			if ((cnp->cn_flags & MAKEENTRY) == 0) {
433 				if (!wlocked && !CACHE_UPGRADE_LOCK())
434 					goto wlock;
435 				if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
436 					cache_zap(dvp->v_cache_dd);
437 				dvp->v_cache_dd = NULL;
438 				CACHE_WUNLOCK();
439 				return (0);
440 			}
441 			if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
442 				*vpp = dvp->v_cache_dd->nc_vp;
443 			else
444 				*vpp = dvp->v_cache_dd->nc_dvp;
445 			/* Return failure if negative entry was found. */
446 			if (*vpp == NULL) {
447 				ncp = dvp->v_cache_dd;
448 				goto negative_success;
449 			}
450 			CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
451 			    dvp, cnp->cn_nameptr, *vpp);
452 			SDT_PROBE(vfs, namecache, lookup, hit, dvp, "..",
453 			    *vpp, 0, 0);
454 			goto success;
455 		}
456 	}
457 
458 	hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
459 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
460 	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
461 		numchecks++;
462 		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
463 		    !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
464 			break;
465 	}
466 
467 	/* We failed to find an entry */
468 	if (ncp == NULL) {
469 		SDT_PROBE(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
470 		    NULL, 0, 0);
471 		if ((cnp->cn_flags & MAKEENTRY) == 0) {
472 			nummisszap++;
473 		} else {
474 			nummiss++;
475 		}
476 		nchstats.ncs_miss++;
477 		goto unlock;
478 	}
479 
480 	/* We don't want to have an entry, so dump it */
481 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
482 		numposzaps++;
483 		nchstats.ncs_badhits++;
484 		if (!wlocked && !CACHE_UPGRADE_LOCK())
485 			goto wlock;
486 		cache_zap(ncp);
487 		CACHE_WUNLOCK();
488 		return (0);
489 	}
490 
491 	/* We found a "positive" match, return the vnode */
492 	if (ncp->nc_vp) {
493 		numposhits++;
494 		nchstats.ncs_goodhits++;
495 		*vpp = ncp->nc_vp;
496 		CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
497 		    dvp, cnp->cn_nameptr, *vpp, ncp);
498 		SDT_PROBE(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
499 		    *vpp, 0, 0);
500 		goto success;
501 	}
502 
503 negative_success:
504 	/* We found a negative match, and want to create it, so purge */
505 	if (cnp->cn_nameiop == CREATE) {
506 		numnegzaps++;
507 		nchstats.ncs_badhits++;
508 		if (!wlocked && !CACHE_UPGRADE_LOCK())
509 			goto wlock;
510 		cache_zap(ncp);
511 		CACHE_WUNLOCK();
512 		return (0);
513 	}
514 
515 	if (!wlocked && !CACHE_UPGRADE_LOCK())
516 		goto wlock;
517 	numneghits++;
518 	/*
519 	 * We found a "negative" match, so we shift it to the end of
520 	 * the "negative" cache entries queue to satisfy LRU.  Also,
521 	 * check to see if the entry is a whiteout; indicate this to
522 	 * the componentname, if so.
523 	 */
524 	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
525 	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
526 	nchstats.ncs_neghits++;
527 	if (ncp->nc_flag & NCF_WHITE)
528 		cnp->cn_flags |= ISWHITEOUT;
529 	SDT_PROBE(vfs, namecache, lookup, hit_negative, dvp, ncp->nc_name,
530 	    0, 0, 0);
531 	CACHE_WUNLOCK();
532 	return (ENOENT);
533 
534 wlock:
535 	/*
536 	 * We need to update the cache after our lookup, so upgrade to
537 	 * a write lock and retry the operation.
538 	 */
539 	CACHE_RUNLOCK();
540 	CACHE_WLOCK();
541 	numupgrades++;
542 	wlocked = 1;
543 	goto retry_wlocked;
544 
545 success:
546 	/*
547 	 * On success we return a locked and ref'd vnode as per the lookup
548 	 * protocol.
549 	 */
550 	if (dvp == *vpp) {   /* lookup on "." */
551 		VREF(*vpp);
552 		if (wlocked)
553 			CACHE_WUNLOCK();
554 		else
555 			CACHE_RUNLOCK();
556 		/*
557 		 * When we lookup "." we still can be asked to lock it
558 		 * differently...
559 		 */
560 		ltype = cnp->cn_lkflags & LK_TYPE_MASK;
561 		if (ltype != VOP_ISLOCKED(*vpp)) {
562 			if (ltype == LK_EXCLUSIVE) {
563 				vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
564 				if ((*vpp)->v_iflag & VI_DOOMED) {
565 					/* forced unmount */
566 					vrele(*vpp);
567 					*vpp = NULL;
568 					return (ENOENT);
569 				}
570 			} else
571 				vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
572 		}
573 		return (-1);
574 	}
575 	ltype = 0;	/* silence gcc warning */
576 	if (cnp->cn_flags & ISDOTDOT) {
577 		ltype = VOP_ISLOCKED(dvp);
578 		VOP_UNLOCK(dvp, 0);
579 	}
580 	VI_LOCK(*vpp);
581 	if (wlocked)
582 		CACHE_WUNLOCK();
583 	else
584 		CACHE_RUNLOCK();
585 	error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
586 	if (cnp->cn_flags & ISDOTDOT) {
587 		vn_lock(dvp, ltype | LK_RETRY);
588 		if (dvp->v_iflag & VI_DOOMED) {
589 			if (error == 0)
590 				vput(*vpp);
591 			*vpp = NULL;
592 			return (ENOENT);
593 		}
594 	}
595 	if (error) {
596 		*vpp = NULL;
597 		goto retry;
598 	}
599 	if ((cnp->cn_flags & ISLASTCN) &&
600 	    (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
601 		ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
602 	}
603 	return (-1);
604 
605 unlock:
606 	if (wlocked)
607 		CACHE_WUNLOCK();
608 	else
609 		CACHE_RUNLOCK();
610 	return (0);
611 }
612 
613 /*
614  * Add an entry to the cache.
615  */
616 void
617 cache_enter(dvp, vp, cnp)
618 	struct vnode *dvp;
619 	struct vnode *vp;
620 	struct componentname *cnp;
621 {
622 	struct namecache *ncp, *n2;
623 	struct nchashhead *ncpp;
624 	uint32_t hash;
625 	int flag;
626 	int hold;
627 	int zap;
628 	int len;
629 
630 	CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
631 	VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
632 	    ("cache_enter: Adding a doomed vnode"));
633 	VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
634 	    ("cache_enter: Doomed vnode used as src"));
635 
636 	if (!doingcache)
637 		return;
638 
639 	/*
640 	 * Avoid blowout in namecache entries.
641 	 */
642 	if (numcache >= desiredvnodes * ncsizefactor)
643 		return;
644 
645 	flag = 0;
646 	if (cnp->cn_nameptr[0] == '.') {
647 		if (cnp->cn_namelen == 1)
648 			return;
649 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
650 			CACHE_WLOCK();
651 			/*
652 			 * If dotdot entry already exists, just retarget it
653 			 * to new parent vnode, otherwise continue with new
654 			 * namecache entry allocation.
655 			 */
656 			if ((ncp = dvp->v_cache_dd) != NULL &&
657 			    ncp->nc_flag & NCF_ISDOTDOT) {
658 				KASSERT(ncp->nc_dvp == dvp,
659 				    ("wrong isdotdot parent"));
660 				if (ncp->nc_vp != NULL)
661 					TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
662 					    ncp, nc_dst);
663 				else
664 					TAILQ_REMOVE(&ncneg, ncp, nc_dst);
665 				if (vp != NULL)
666 					TAILQ_INSERT_HEAD(&vp->v_cache_dst,
667 					    ncp, nc_dst);
668 				else
669 					TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
670 				ncp->nc_vp = vp;
671 				CACHE_WUNLOCK();
672 				return;
673 			}
674 			dvp->v_cache_dd = NULL;
675 			SDT_PROBE(vfs, namecache, enter, done, dvp, "..", vp,
676 			    0, 0);
677 			CACHE_WUNLOCK();
678 			flag = NCF_ISDOTDOT;
679 		}
680 	}
681 
682 	hold = 0;
683 	zap = 0;
684 
685 	/*
686 	 * Calculate the hash key and setup as much of the new
687 	 * namecache entry as possible before acquiring the lock.
688 	 */
689 	ncp = cache_alloc(cnp->cn_namelen);
690 	ncp->nc_vp = vp;
691 	ncp->nc_dvp = dvp;
692 	ncp->nc_flag = flag;
693 	len = ncp->nc_nlen = cnp->cn_namelen;
694 	hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
695 	strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1);
696 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
697 	CACHE_WLOCK();
698 
699 	/*
700 	 * See if this vnode or negative entry is already in the cache
701 	 * with this name.  This can happen with concurrent lookups of
702 	 * the same path name.
703 	 */
704 	ncpp = NCHHASH(hash);
705 	LIST_FOREACH(n2, ncpp, nc_hash) {
706 		if (n2->nc_dvp == dvp &&
707 		    n2->nc_nlen == cnp->cn_namelen &&
708 		    !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
709 			CACHE_WUNLOCK();
710 			cache_free(ncp);
711 			return;
712 		}
713 	}
714 
715 	if (flag == NCF_ISDOTDOT) {
716 		/*
717 		 * See if we are trying to add .. entry, but some other lookup
718 		 * has populated v_cache_dd pointer already.
719 		 */
720 		if (dvp->v_cache_dd != NULL) {
721 		    CACHE_WUNLOCK();
722 		    cache_free(ncp);
723 		    return;
724 		}
725 		KASSERT(vp == NULL || vp->v_type == VDIR,
726 		    ("wrong vnode type %p", vp));
727 		dvp->v_cache_dd = ncp;
728 	}
729 
730 	numcache++;
731 	if (!vp) {
732 		numneg++;
733 		if (cnp->cn_flags & ISWHITEOUT)
734 			ncp->nc_flag |= NCF_WHITE;
735 	} else if (vp->v_type == VDIR) {
736 		if (flag != NCF_ISDOTDOT) {
737 			if ((n2 = vp->v_cache_dd) != NULL &&
738 			    (n2->nc_flag & NCF_ISDOTDOT) != 0)
739 				cache_zap(n2);
740 			vp->v_cache_dd = ncp;
741 		}
742 	} else {
743 		vp->v_cache_dd = NULL;
744 	}
745 
746 	/*
747 	 * Insert the new namecache entry into the appropriate chain
748 	 * within the cache entries table.
749 	 */
750 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
751 	if (flag != NCF_ISDOTDOT) {
752 		if (LIST_EMPTY(&dvp->v_cache_src)) {
753 			hold = 1;
754 			numcachehv++;
755 		}
756 		LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
757 	}
758 
759 	/*
760 	 * If the entry is "negative", we place it into the
761 	 * "negative" cache queue, otherwise, we place it into the
762 	 * destination vnode's cache entries queue.
763 	 */
764 	if (vp) {
765 		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
766 		SDT_PROBE(vfs, namecache, enter, done, dvp, ncp->nc_name, vp,
767 		    0, 0);
768 	} else {
769 		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
770 		SDT_PROBE(vfs, namecache, enter_negative, done, dvp,
771 		    ncp->nc_name, 0, 0, 0);
772 	}
773 	if (numneg * ncnegfactor > numcache) {
774 		ncp = TAILQ_FIRST(&ncneg);
775 		zap = 1;
776 	}
777 	if (hold)
778 		vhold(dvp);
779 	if (zap)
780 		cache_zap(ncp);
781 	CACHE_WUNLOCK();
782 }
783 
784 /*
785  * Name cache initialization, from vfs_init() when we are booting
786  */
787 static void
788 nchinit(void *dummy __unused)
789 {
790 
791 	TAILQ_INIT(&ncneg);
792 
793 	cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
794 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
795 	cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
796 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
797 
798 	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
799 }
800 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
801 
802 
803 /*
804  * Invalidate all entries to a particular vnode.
805  */
806 void
807 cache_purge(vp)
808 	struct vnode *vp;
809 {
810 
811 	CTR1(KTR_VFS, "cache_purge(%p)", vp);
812 	SDT_PROBE(vfs, namecache, purge, done, vp, 0, 0, 0, 0);
813 	CACHE_WLOCK();
814 	while (!LIST_EMPTY(&vp->v_cache_src))
815 		cache_zap(LIST_FIRST(&vp->v_cache_src));
816 	while (!TAILQ_EMPTY(&vp->v_cache_dst))
817 		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
818 	if (vp->v_cache_dd != NULL) {
819 		KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT,
820 		   ("lost dotdot link"));
821 		cache_zap(vp->v_cache_dd);
822 	}
823 	KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
824 	CACHE_WUNLOCK();
825 }
826 
827 /*
828  * Invalidate all negative entries for a particular directory vnode.
829  */
830 void
831 cache_purge_negative(vp)
832 	struct vnode *vp;
833 {
834 	struct namecache *cp, *ncp;
835 
836 	CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
837 	SDT_PROBE(vfs, namecache, purge_negative, done, vp, 0, 0, 0, 0);
838 	CACHE_WLOCK();
839 	LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) {
840 		if (cp->nc_vp == NULL)
841 			cache_zap(cp);
842 	}
843 	CACHE_WUNLOCK();
844 }
845 
846 /*
847  * Flush all entries referencing a particular filesystem.
848  */
849 void
850 cache_purgevfs(mp)
851 	struct mount *mp;
852 {
853 	struct nchashhead *ncpp;
854 	struct namecache *ncp, *nnp;
855 
856 	/* Scan hash tables for applicable entries */
857 	SDT_PROBE(vfs, namecache, purgevfs, done, mp, 0, 0, 0, 0);
858 	CACHE_WLOCK();
859 	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
860 		LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) {
861 			if (ncp->nc_dvp->v_mount == mp)
862 				cache_zap(ncp);
863 		}
864 	}
865 	CACHE_WUNLOCK();
866 }
867 
868 /*
869  * Perform canonical checks and cache lookup and pass on to filesystem
870  * through the vop_cachedlookup only if needed.
871  */
872 
873 int
874 vfs_cache_lookup(ap)
875 	struct vop_lookup_args /* {
876 		struct vnode *a_dvp;
877 		struct vnode **a_vpp;
878 		struct componentname *a_cnp;
879 	} */ *ap;
880 {
881 	struct vnode *dvp;
882 	int error;
883 	struct vnode **vpp = ap->a_vpp;
884 	struct componentname *cnp = ap->a_cnp;
885 	struct ucred *cred = cnp->cn_cred;
886 	int flags = cnp->cn_flags;
887 	struct thread *td = cnp->cn_thread;
888 
889 	*vpp = NULL;
890 	dvp = ap->a_dvp;
891 
892 	if (dvp->v_type != VDIR)
893 		return (ENOTDIR);
894 
895 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
896 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
897 		return (EROFS);
898 
899 	error = VOP_ACCESS(dvp, VEXEC, cred, td);
900 	if (error)
901 		return (error);
902 
903 	error = cache_lookup(dvp, vpp, cnp);
904 	if (error == 0)
905 		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
906 	if (error == -1)
907 		return (0);
908 	return (error);
909 }
910 
911 
912 #ifndef _SYS_SYSPROTO_H_
913 struct  __getcwd_args {
914 	u_char	*buf;
915 	u_int	buflen;
916 };
917 #endif
918 
919 /*
920  * XXX All of these sysctls would probably be more productive dead.
921  */
922 static int disablecwd;
923 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
924    "Disable the getcwd syscall");
925 
926 /* Implementation of the getcwd syscall. */
927 int
928 __getcwd(td, uap)
929 	struct thread *td;
930 	struct __getcwd_args *uap;
931 {
932 
933 	return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
934 }
935 
936 int
937 kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen)
938 {
939 	char *bp, *tmpbuf;
940 	struct filedesc *fdp;
941 	struct vnode *cdir, *rdir;
942 	int error, vfslocked;
943 
944 	if (disablecwd)
945 		return (ENODEV);
946 	if (buflen < 2)
947 		return (EINVAL);
948 	if (buflen > MAXPATHLEN)
949 		buflen = MAXPATHLEN;
950 
951 	tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
952 	fdp = td->td_proc->p_fd;
953 	FILEDESC_SLOCK(fdp);
954 	cdir = fdp->fd_cdir;
955 	VREF(cdir);
956 	rdir = fdp->fd_rdir;
957 	VREF(rdir);
958 	FILEDESC_SUNLOCK(fdp);
959 	error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
960 	vfslocked = VFS_LOCK_GIANT(rdir->v_mount);
961 	vrele(rdir);
962 	VFS_UNLOCK_GIANT(vfslocked);
963 	vfslocked = VFS_LOCK_GIANT(cdir->v_mount);
964 	vrele(cdir);
965 	VFS_UNLOCK_GIANT(vfslocked);
966 
967 	if (!error) {
968 		if (bufseg == UIO_SYSSPACE)
969 			bcopy(bp, buf, strlen(bp) + 1);
970 		else
971 			error = copyout(bp, buf, strlen(bp) + 1);
972 #ifdef KTRACE
973 	if (KTRPOINT(curthread, KTR_NAMEI))
974 		ktrnamei(bp);
975 #endif
976 	}
977 	free(tmpbuf, M_TEMP);
978 	return (error);
979 }
980 
981 /*
982  * Thus begins the fullpath magic.
983  */
984 
985 #undef STATNODE
986 #define STATNODE(name, descr)						\
987 	static u_int name;						\
988 	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr)
989 
990 static int disablefullpath;
991 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
992     "Disable the vn_fullpath function");
993 
994 /* These count for kern___getcwd(), too. */
995 STATNODE(numfullpathcalls, "Number of fullpath search calls");
996 STATNODE(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
997 STATNODE(numfullpathfail2,
998     "Number of fullpath search errors (VOP_VPTOCNP failures)");
999 STATNODE(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
1000 STATNODE(numfullpathfound, "Number of successful fullpath calls");
1001 
1002 /*
1003  * Retrieve the full filesystem path that correspond to a vnode from the name
1004  * cache (if available)
1005  */
1006 int
1007 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
1008 {
1009 	char *buf;
1010 	struct filedesc *fdp;
1011 	struct vnode *rdir;
1012 	int error, vfslocked;
1013 
1014 	if (disablefullpath)
1015 		return (ENODEV);
1016 	if (vn == NULL)
1017 		return (EINVAL);
1018 
1019 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1020 	fdp = td->td_proc->p_fd;
1021 	FILEDESC_SLOCK(fdp);
1022 	rdir = fdp->fd_rdir;
1023 	VREF(rdir);
1024 	FILEDESC_SUNLOCK(fdp);
1025 	error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
1026 	vfslocked = VFS_LOCK_GIANT(rdir->v_mount);
1027 	vrele(rdir);
1028 	VFS_UNLOCK_GIANT(vfslocked);
1029 
1030 	if (!error)
1031 		*freebuf = buf;
1032 	else
1033 		free(buf, M_TEMP);
1034 	return (error);
1035 }
1036 
1037 /*
1038  * This function is similar to vn_fullpath, but it attempts to lookup the
1039  * pathname relative to the global root mount point.  This is required for the
1040  * auditing sub-system, as audited pathnames must be absolute, relative to the
1041  * global root mount point.
1042  */
1043 int
1044 vn_fullpath_global(struct thread *td, struct vnode *vn,
1045     char **retbuf, char **freebuf)
1046 {
1047 	char *buf;
1048 	int error;
1049 
1050 	if (disablefullpath)
1051 		return (ENODEV);
1052 	if (vn == NULL)
1053 		return (EINVAL);
1054 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1055 	error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
1056 	if (!error)
1057 		*freebuf = buf;
1058 	else
1059 		free(buf, M_TEMP);
1060 	return (error);
1061 }
1062 
1063 int
1064 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
1065 {
1066 	int error;
1067 
1068 	CACHE_RLOCK();
1069 	error = vn_vptocnp_locked(vp, cred, buf, buflen);
1070 	if (error == 0) {
1071 		/*
1072 		 * vn_vptocnp_locked() dropped hold acquired by
1073 		 * VOP_VPTOCNP immediately after locking the
1074 		 * cache. Since we are going to drop the cache rlock,
1075 		 * re-hold the result.
1076 		 */
1077 		vhold(*vp);
1078 		CACHE_RUNLOCK();
1079 	}
1080 	return (error);
1081 }
1082 
1083 static int
1084 vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
1085     u_int *buflen)
1086 {
1087 	struct vnode *dvp;
1088 	struct namecache *ncp;
1089 	int error, vfslocked;
1090 
1091 	TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
1092 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1093 			break;
1094 	}
1095 	if (ncp != NULL) {
1096 		if (*buflen < ncp->nc_nlen) {
1097 			CACHE_RUNLOCK();
1098 			numfullpathfail4++;
1099 			error = ENOMEM;
1100 			SDT_PROBE(vfs, namecache, fullpath, return, error,
1101 			    vp, NULL, 0, 0);
1102 			return (error);
1103 		}
1104 		*buflen -= ncp->nc_nlen;
1105 		memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
1106 		SDT_PROBE(vfs, namecache, fullpath, hit, ncp->nc_dvp,
1107 		    ncp->nc_name, vp, 0, 0);
1108 		*vp = ncp->nc_dvp;
1109 		return (0);
1110 	}
1111 	SDT_PROBE(vfs, namecache, fullpath, miss, vp, 0, 0, 0, 0);
1112 
1113 	vhold(*vp);
1114 	CACHE_RUNLOCK();
1115 	vfslocked = VFS_LOCK_GIANT((*vp)->v_mount);
1116 	vn_lock(*vp, LK_SHARED | LK_RETRY);
1117 	error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
1118 	VOP_UNLOCK(*vp, 0);
1119 	vdrop(*vp);
1120 	VFS_UNLOCK_GIANT(vfslocked);
1121 	if (error) {
1122 		numfullpathfail2++;
1123 		SDT_PROBE(vfs, namecache, fullpath, return,  error, vp,
1124 		    NULL, 0, 0);
1125 		return (error);
1126 	}
1127 
1128 	*vp = dvp;
1129 	CACHE_RLOCK();
1130 	if ((*vp)->v_iflag & VI_DOOMED) {
1131 		/* forced unmount */
1132 		CACHE_RUNLOCK();
1133 		vdrop(*vp);
1134 		error = ENOENT;
1135 		SDT_PROBE(vfs, namecache, fullpath, return, error, vp,
1136 		    NULL, 0, 0);
1137 		return (error);
1138 	}
1139 	vdrop(*vp);
1140 
1141 	return (0);
1142 }
1143 
1144 /*
1145  * The magic behind kern___getcwd() and vn_fullpath().
1146  */
1147 static int
1148 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
1149     char *buf, char **retbuf, u_int buflen)
1150 {
1151 	int error, slash_prefixed;
1152 #ifdef KDTRACE_HOOKS
1153 	struct vnode *startvp = vp;
1154 #endif
1155 
1156 	buflen--;
1157 	buf[buflen] = '\0';
1158 	error = 0;
1159 	slash_prefixed = 0;
1160 
1161 	SDT_PROBE(vfs, namecache, fullpath, entry, vp, 0, 0, 0, 0);
1162 	numfullpathcalls++;
1163 	CACHE_RLOCK();
1164 	if (vp->v_type != VDIR) {
1165 		error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
1166 		if (error)
1167 			return (error);
1168 		if (buflen == 0) {
1169 			CACHE_RUNLOCK();
1170 			return (ENOMEM);
1171 		}
1172 		buf[--buflen] = '/';
1173 		slash_prefixed = 1;
1174 	}
1175 	while (vp != rdir && vp != rootvnode) {
1176 		if (vp->v_vflag & VV_ROOT) {
1177 			if (vp->v_iflag & VI_DOOMED) {	/* forced unmount */
1178 				CACHE_RUNLOCK();
1179 				error = ENOENT;
1180 				SDT_PROBE(vfs, namecache, fullpath, return,
1181 				    error, vp, NULL, 0, 0);
1182 				break;
1183 			}
1184 			vp = vp->v_mount->mnt_vnodecovered;
1185 			continue;
1186 		}
1187 		if (vp->v_type != VDIR) {
1188 			CACHE_RUNLOCK();
1189 			numfullpathfail1++;
1190 			error = ENOTDIR;
1191 			SDT_PROBE(vfs, namecache, fullpath, return,
1192 			    error, vp, NULL, 0, 0);
1193 			break;
1194 		}
1195 		error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
1196 		if (error)
1197 			break;
1198 		if (buflen == 0) {
1199 			CACHE_RUNLOCK();
1200 			error = ENOMEM;
1201 			SDT_PROBE(vfs, namecache, fullpath, return, error,
1202 			    startvp, NULL, 0, 0);
1203 			break;
1204 		}
1205 		buf[--buflen] = '/';
1206 		slash_prefixed = 1;
1207 	}
1208 	if (error)
1209 		return (error);
1210 	if (!slash_prefixed) {
1211 		if (buflen == 0) {
1212 			CACHE_RUNLOCK();
1213 			numfullpathfail4++;
1214 			SDT_PROBE(vfs, namecache, fullpath, return, ENOMEM,
1215 			    startvp, NULL, 0, 0);
1216 			return (ENOMEM);
1217 		}
1218 		buf[--buflen] = '/';
1219 	}
1220 	numfullpathfound++;
1221 	CACHE_RUNLOCK();
1222 
1223 	SDT_PROBE(vfs, namecache, fullpath, return, 0, startvp, buf + buflen,
1224 	    0, 0);
1225 	*retbuf = buf + buflen;
1226 	return (0);
1227 }
1228 
1229 int
1230 vn_commname(struct vnode *vp, char *buf, u_int buflen)
1231 {
1232 	struct namecache *ncp;
1233 	int l;
1234 
1235 	CACHE_RLOCK();
1236 	TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
1237 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1238 			break;
1239 	if (ncp == NULL) {
1240 		CACHE_RUNLOCK();
1241 		return (ENOENT);
1242 	}
1243 	l = min(ncp->nc_nlen, buflen - 1);
1244 	memcpy(buf, ncp->nc_name, l);
1245 	CACHE_RUNLOCK();
1246 	buf[l] = '\0';
1247 	return (0);
1248 }
1249