xref: /freebsd/sys/kern/vfs_cache.c (revision a3cf0ef5a295c885c895fabfd56470c0d1db322d)
1 /*-
2  * Copyright (c) 1989, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Poul-Henning Kamp of the FreeBSD Project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_kdtrace.h"
39 #include "opt_ktrace.h"
40 
41 #include <sys/param.h>
42 #include <sys/filedesc.h>
43 #include <sys/fnv_hash.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/proc.h>
50 #include <sys/rwlock.h>
51 #include <sys/sdt.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysproto.h>
55 #include <sys/systm.h>
56 #include <sys/vnode.h>
57 #ifdef KTRACE
58 #include <sys/ktrace.h>
59 #endif
60 
61 #include <vm/uma.h>
62 
63 SDT_PROVIDER_DECLARE(vfs);
64 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, done, "struct vnode *", "char *",
65     "struct vnode *");
66 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, done, "struct vnode *",
67     "char *");
68 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, entry, "struct vnode *");
69 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, hit, "struct vnode *",
70     "struct char *", "struct vnode *");
71 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, miss, "struct vnode *");
72 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, return, "int",
73     "struct vnode *", "struct char *");
74 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, hit, "struct vnode *", "char *",
75     "struct vnode *");
76 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit_negative, hit-negative,
77     "struct vnode *", "char *");
78 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, miss, "struct vnode *",
79     "char *");
80 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, done, "struct vnode *");
81 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, done, "struct vnode *");
82 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, done, "struct mount *");
83 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, done, "struct vnode *", "char *",
84     "struct vnode *");
85 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, done, "struct vnode *",
86     "char *");
87 
88 /*
89  * This structure describes the elements in the cache of recent
90  * names looked up by namei.
91  */
92 
93 struct	namecache {
94 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
95 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
96 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
97 	struct	vnode *nc_dvp;		/* vnode of parent of name */
98 	struct	vnode *nc_vp;		/* vnode the name refers to */
99 	u_char	nc_flag;		/* flag bits */
100 	u_char	nc_nlen;		/* length of name */
101 	char	nc_name[0];		/* segment name + nul */
102 };
103 
104 /*
105  * Name caching works as follows:
106  *
107  * Names found by directory scans are retained in a cache
108  * for future reference.  It is managed LRU, so frequently
109  * used names will hang around.  Cache is indexed by hash value
110  * obtained from (vp, name) where vp refers to the directory
111  * containing name.
112  *
113  * If it is a "negative" entry, (i.e. for a name that is known NOT to
114  * exist) the vnode pointer will be NULL.
115  *
116  * Upon reaching the last segment of a path, if the reference
117  * is for DELETE, or NOCACHE is set (rewrite), and the
118  * name is located in the cache, it will be dropped.
119  */
120 
121 /*
122  * Structures associated with name cacheing.
123  */
124 #define NCHHASH(hash) \
125 	(&nchashtbl[(hash) & nchash])
126 static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
127 static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
128 static u_long	nchash;			/* size of hash table */
129 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
130     "Size of namecache hash table");
131 static u_long	ncnegfactor = 16;	/* ratio of negative entries */
132 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
133     "Ratio of negative namecache entries");
134 static u_long	numneg;			/* number of negative entries allocated */
135 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
136     "Number of negative entries in namecache");
137 static u_long	numcache;		/* number of cache entries allocated */
138 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
139     "Number of namecache entries");
140 static u_long	numcachehv;		/* number of cache entries with vnodes held */
141 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0,
142     "Number of namecache entries with vnodes held");
143 static u_int	ncsizefactor = 2;
144 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
145     "Size factor for namecache");
146 
147 struct nchstats	nchstats;		/* cache effectiveness statistics */
148 
149 static struct rwlock cache_lock;
150 RW_SYSINIT(vfscache, &cache_lock, "Name Cache");
151 
152 #define	CACHE_UPGRADE_LOCK()	rw_try_upgrade(&cache_lock)
153 #define	CACHE_RLOCK()		rw_rlock(&cache_lock)
154 #define	CACHE_RUNLOCK()		rw_runlock(&cache_lock)
155 #define	CACHE_WLOCK()		rw_wlock(&cache_lock)
156 #define	CACHE_WUNLOCK()		rw_wunlock(&cache_lock)
157 
158 /*
159  * UMA zones for the VFS cache.
160  *
161  * The small cache is used for entries with short names, which are the
162  * most common.  The large cache is used for entries which are too big to
163  * fit in the small cache.
164  */
165 static uma_zone_t cache_zone_small;
166 static uma_zone_t cache_zone_large;
167 
168 #define	CACHE_PATH_CUTOFF	35
169 #define	CACHE_ZONE_SMALL	(sizeof(struct namecache) + CACHE_PATH_CUTOFF \
170 				    + 1)
171 #define	CACHE_ZONE_LARGE	(sizeof(struct namecache) + NAME_MAX + 1)
172 
173 #define cache_alloc(len)	uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
174 	cache_zone_small : cache_zone_large, M_WAITOK)
175 #define cache_free(ncp)		do { \
176 	if (ncp != NULL) \
177 		uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
178 		    cache_zone_small : cache_zone_large, (ncp)); \
179 } while (0)
180 
181 static int	doingcache = 1;		/* 1 => enable the cache */
182 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
183     "VFS namecache enabled");
184 
185 /* Export size information to userland */
186 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, 0,
187 	sizeof(struct namecache), "");
188 
189 /*
190  * The new name cache statistics
191  */
192 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
193 #define STATNODE(mode, name, var) \
194 	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
195 STATNODE(CTLFLAG_RD, numneg, &numneg);
196 STATNODE(CTLFLAG_RD, numcache, &numcache);
197 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
198 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
199 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
200 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
201 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
202 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
203 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
204 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
205 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
206 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
207 static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades);
208 
209 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE,
210 	&nchstats, sizeof(nchstats), "LU", "VFS cache effectiveness statistics");
211 
212 
213 
214 static void cache_zap(struct namecache *ncp);
215 static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
216     u_int *buflen);
217 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
218     char *buf, char **retbuf, u_int buflen);
219 
220 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
221 
222 /*
223  * Flags in namecache.nc_flag
224  */
225 #define NCF_WHITE	0x01
226 #define NCF_ISDOTDOT	0x02
227 
228 #ifdef DIAGNOSTIC
229 /*
230  * Grab an atomic snapshot of the name cache hash chain lengths
231  */
232 SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
233 
234 static int
235 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
236 {
237 	int error;
238 	struct nchashhead *ncpp;
239 	struct namecache *ncp;
240 	int n_nchash;
241 	int count;
242 
243 	n_nchash = nchash + 1;	/* nchash is max index, not count */
244 	if (!req->oldptr)
245 		return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
246 
247 	/* Scan hash tables for applicable entries */
248 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
249 		CACHE_RLOCK();
250 		count = 0;
251 		LIST_FOREACH(ncp, ncpp, nc_hash) {
252 			count++;
253 		}
254 		CACHE_RUNLOCK();
255 		error = SYSCTL_OUT(req, &count, sizeof(count));
256 		if (error)
257 			return (error);
258 	}
259 	return (0);
260 }
261 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
262 	CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
263 	"nchash chain lengths");
264 
265 static int
266 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
267 {
268 	int error;
269 	struct nchashhead *ncpp;
270 	struct namecache *ncp;
271 	int n_nchash;
272 	int count, maxlength, used, pct;
273 
274 	if (!req->oldptr)
275 		return SYSCTL_OUT(req, 0, 4 * sizeof(int));
276 
277 	n_nchash = nchash + 1;	/* nchash is max index, not count */
278 	used = 0;
279 	maxlength = 0;
280 
281 	/* Scan hash tables for applicable entries */
282 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
283 		count = 0;
284 		CACHE_RLOCK();
285 		LIST_FOREACH(ncp, ncpp, nc_hash) {
286 			count++;
287 		}
288 		CACHE_RUNLOCK();
289 		if (count)
290 			used++;
291 		if (maxlength < count)
292 			maxlength = count;
293 	}
294 	n_nchash = nchash + 1;
295 	pct = (used * 100 * 100) / n_nchash;
296 	error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
297 	if (error)
298 		return (error);
299 	error = SYSCTL_OUT(req, &used, sizeof(used));
300 	if (error)
301 		return (error);
302 	error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
303 	if (error)
304 		return (error);
305 	error = SYSCTL_OUT(req, &pct, sizeof(pct));
306 	if (error)
307 		return (error);
308 	return (0);
309 }
310 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
311 	CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
312 	"nchash chain lengths");
313 #endif
314 
315 /*
316  * cache_zap():
317  *
318  *   Removes a namecache entry from cache, whether it contains an actual
319  *   pointer to a vnode or if it is just a negative cache entry.
320  */
321 static void
322 cache_zap(ncp)
323 	struct namecache *ncp;
324 {
325 	struct vnode *vp;
326 
327 	rw_assert(&cache_lock, RA_WLOCKED);
328 	CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
329 #ifdef KDTRACE_HOOKS
330 	if (ncp->nc_vp != NULL) {
331 		SDT_PROBE(vfs, namecache, zap, done, ncp->nc_dvp,
332 		    ncp->nc_name, ncp->nc_vp, 0, 0);
333 	} else {
334 		SDT_PROBE(vfs, namecache, zap_negative, done, ncp->nc_dvp,
335 		    ncp->nc_name, 0, 0, 0);
336 	}
337 #endif
338 	vp = NULL;
339 	LIST_REMOVE(ncp, nc_hash);
340 	if (ncp->nc_flag & NCF_ISDOTDOT) {
341 		if (ncp == ncp->nc_dvp->v_cache_dd)
342 			ncp->nc_dvp->v_cache_dd = NULL;
343 	} else {
344 		LIST_REMOVE(ncp, nc_src);
345 		if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
346 			vp = ncp->nc_dvp;
347 			numcachehv--;
348 		}
349 	}
350 	if (ncp->nc_vp) {
351 		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
352 		if (ncp == ncp->nc_vp->v_cache_dd)
353 			ncp->nc_vp->v_cache_dd = NULL;
354 	} else {
355 		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
356 		numneg--;
357 	}
358 	numcache--;
359 	cache_free(ncp);
360 	if (vp)
361 		vdrop(vp);
362 }
363 
364 /*
365  * Lookup an entry in the cache
366  *
367  * Lookup is called with dvp pointing to the directory to search,
368  * cnp pointing to the name of the entry being sought. If the lookup
369  * succeeds, the vnode is returned in *vpp, and a status of -1 is
370  * returned. If the lookup determines that the name does not exist
371  * (negative cacheing), a status of ENOENT is returned. If the lookup
372  * fails, a status of zero is returned.  If the directory vnode is
373  * recycled out from under us due to a forced unmount, a status of
374  * ENOENT is returned.
375  *
376  * vpp is locked and ref'd on return.  If we're looking up DOTDOT, dvp is
377  * unlocked.  If we're looking up . an extra ref is taken, but the lock is
378  * not recursively acquired.
379  */
380 
381 int
382 cache_lookup(dvp, vpp, cnp)
383 	struct vnode *dvp;
384 	struct vnode **vpp;
385 	struct componentname *cnp;
386 {
387 	struct namecache *ncp;
388 	uint32_t hash;
389 	int error, ltype, wlocked;
390 
391 	if (!doingcache) {
392 		cnp->cn_flags &= ~MAKEENTRY;
393 		return (0);
394 	}
395 retry:
396 	CACHE_RLOCK();
397 	wlocked = 0;
398 	numcalls++;
399 	error = 0;
400 
401 retry_wlocked:
402 	if (cnp->cn_nameptr[0] == '.') {
403 		if (cnp->cn_namelen == 1) {
404 			*vpp = dvp;
405 			CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
406 			    dvp, cnp->cn_nameptr);
407 			dothits++;
408 			SDT_PROBE(vfs, namecache, lookup, hit, dvp, ".",
409 			    *vpp, 0, 0);
410 			goto success;
411 		}
412 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
413 			dotdothits++;
414 			if (dvp->v_cache_dd == NULL) {
415 				SDT_PROBE(vfs, namecache, lookup, miss, dvp,
416 				    "..", NULL, 0, 0);
417 				goto unlock;
418 			}
419 			if ((cnp->cn_flags & MAKEENTRY) == 0) {
420 				if (!wlocked && !CACHE_UPGRADE_LOCK())
421 					goto wlock;
422 				if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
423 					cache_zap(dvp->v_cache_dd);
424 				dvp->v_cache_dd = NULL;
425 				CACHE_WUNLOCK();
426 				return (0);
427 			}
428 			if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
429 				*vpp = dvp->v_cache_dd->nc_vp;
430 			else
431 				*vpp = dvp->v_cache_dd->nc_dvp;
432 			/* Return failure if negative entry was found. */
433 			if (*vpp == NULL) {
434 				ncp = dvp->v_cache_dd;
435 				goto negative_success;
436 			}
437 			CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
438 			    dvp, cnp->cn_nameptr, *vpp);
439 			SDT_PROBE(vfs, namecache, lookup, hit, dvp, "..",
440 			    *vpp, 0, 0);
441 			goto success;
442 		}
443 	}
444 
445 	hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
446 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
447 	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
448 		numchecks++;
449 		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
450 		    !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
451 			break;
452 	}
453 
454 	/* We failed to find an entry */
455 	if (ncp == NULL) {
456 		SDT_PROBE(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
457 		    NULL, 0, 0);
458 		if ((cnp->cn_flags & MAKEENTRY) == 0) {
459 			nummisszap++;
460 		} else {
461 			nummiss++;
462 		}
463 		nchstats.ncs_miss++;
464 		goto unlock;
465 	}
466 
467 	/* We don't want to have an entry, so dump it */
468 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
469 		numposzaps++;
470 		nchstats.ncs_badhits++;
471 		if (!wlocked && !CACHE_UPGRADE_LOCK())
472 			goto wlock;
473 		cache_zap(ncp);
474 		CACHE_WUNLOCK();
475 		return (0);
476 	}
477 
478 	/* We found a "positive" match, return the vnode */
479 	if (ncp->nc_vp) {
480 		numposhits++;
481 		nchstats.ncs_goodhits++;
482 		*vpp = ncp->nc_vp;
483 		CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
484 		    dvp, cnp->cn_nameptr, *vpp, ncp);
485 		SDT_PROBE(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
486 		    *vpp, 0, 0);
487 		goto success;
488 	}
489 
490 negative_success:
491 	/* We found a negative match, and want to create it, so purge */
492 	if (cnp->cn_nameiop == CREATE) {
493 		numnegzaps++;
494 		nchstats.ncs_badhits++;
495 		if (!wlocked && !CACHE_UPGRADE_LOCK())
496 			goto wlock;
497 		cache_zap(ncp);
498 		CACHE_WUNLOCK();
499 		return (0);
500 	}
501 
502 	if (!wlocked && !CACHE_UPGRADE_LOCK())
503 		goto wlock;
504 	numneghits++;
505 	/*
506 	 * We found a "negative" match, so we shift it to the end of
507 	 * the "negative" cache entries queue to satisfy LRU.  Also,
508 	 * check to see if the entry is a whiteout; indicate this to
509 	 * the componentname, if so.
510 	 */
511 	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
512 	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
513 	nchstats.ncs_neghits++;
514 	if (ncp->nc_flag & NCF_WHITE)
515 		cnp->cn_flags |= ISWHITEOUT;
516 	SDT_PROBE(vfs, namecache, lookup, hit_negative, dvp, ncp->nc_name,
517 	    0, 0, 0);
518 	CACHE_WUNLOCK();
519 	return (ENOENT);
520 
521 wlock:
522 	/*
523 	 * We need to update the cache after our lookup, so upgrade to
524 	 * a write lock and retry the operation.
525 	 */
526 	CACHE_RUNLOCK();
527 	CACHE_WLOCK();
528 	numupgrades++;
529 	wlocked = 1;
530 	goto retry_wlocked;
531 
532 success:
533 	/*
534 	 * On success we return a locked and ref'd vnode as per the lookup
535 	 * protocol.
536 	 */
537 	if (dvp == *vpp) {   /* lookup on "." */
538 		VREF(*vpp);
539 		if (wlocked)
540 			CACHE_WUNLOCK();
541 		else
542 			CACHE_RUNLOCK();
543 		/*
544 		 * When we lookup "." we still can be asked to lock it
545 		 * differently...
546 		 */
547 		ltype = cnp->cn_lkflags & LK_TYPE_MASK;
548 		if (ltype != VOP_ISLOCKED(*vpp)) {
549 			if (ltype == LK_EXCLUSIVE) {
550 				vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
551 				if ((*vpp)->v_iflag & VI_DOOMED) {
552 					/* forced unmount */
553 					vrele(*vpp);
554 					*vpp = NULL;
555 					return (ENOENT);
556 				}
557 			} else
558 				vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
559 		}
560 		return (-1);
561 	}
562 	ltype = 0;	/* silence gcc warning */
563 	if (cnp->cn_flags & ISDOTDOT) {
564 		ltype = VOP_ISLOCKED(dvp);
565 		VOP_UNLOCK(dvp, 0);
566 	}
567 	VI_LOCK(*vpp);
568 	if (wlocked)
569 		CACHE_WUNLOCK();
570 	else
571 		CACHE_RUNLOCK();
572 	error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
573 	if (cnp->cn_flags & ISDOTDOT) {
574 		vn_lock(dvp, ltype | LK_RETRY);
575 		if (dvp->v_iflag & VI_DOOMED) {
576 			if (error == 0)
577 				vput(*vpp);
578 			*vpp = NULL;
579 			return (ENOENT);
580 		}
581 	}
582 	if (error) {
583 		*vpp = NULL;
584 		goto retry;
585 	}
586 	if ((cnp->cn_flags & ISLASTCN) &&
587 	    (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
588 		ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
589 	}
590 	return (-1);
591 
592 unlock:
593 	if (wlocked)
594 		CACHE_WUNLOCK();
595 	else
596 		CACHE_RUNLOCK();
597 	return (0);
598 }
599 
600 /*
601  * Add an entry to the cache.
602  */
603 void
604 cache_enter(dvp, vp, cnp)
605 	struct vnode *dvp;
606 	struct vnode *vp;
607 	struct componentname *cnp;
608 {
609 	struct namecache *ncp, *n2;
610 	struct nchashhead *ncpp;
611 	uint32_t hash;
612 	int flag;
613 	int hold;
614 	int zap;
615 	int len;
616 
617 	CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
618 	VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
619 	    ("cache_enter: Adding a doomed vnode"));
620 	VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
621 	    ("cache_enter: Doomed vnode used as src"));
622 
623 	if (!doingcache)
624 		return;
625 
626 	/*
627 	 * Avoid blowout in namecache entries.
628 	 */
629 	if (numcache >= desiredvnodes * ncsizefactor)
630 		return;
631 
632 	flag = 0;
633 	if (cnp->cn_nameptr[0] == '.') {
634 		if (cnp->cn_namelen == 1)
635 			return;
636 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
637 			CACHE_WLOCK();
638 			/*
639 			 * If dotdot entry already exists, just retarget it
640 			 * to new parent vnode, otherwise continue with new
641 			 * namecache entry allocation.
642 			 */
643 			if ((ncp = dvp->v_cache_dd) != NULL &&
644 			    ncp->nc_flag & NCF_ISDOTDOT) {
645 				KASSERT(ncp->nc_dvp == dvp,
646 				    ("wrong isdotdot parent"));
647 				if (ncp->nc_vp != NULL)
648 					TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
649 					    ncp, nc_dst);
650 				else
651 					TAILQ_REMOVE(&ncneg, ncp, nc_dst);
652 				if (vp != NULL)
653 					TAILQ_INSERT_HEAD(&vp->v_cache_dst,
654 					    ncp, nc_dst);
655 				else
656 					TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
657 				ncp->nc_vp = vp;
658 				CACHE_WUNLOCK();
659 				return;
660 			}
661 			dvp->v_cache_dd = NULL;
662 			SDT_PROBE(vfs, namecache, enter, done, dvp, "..", vp,
663 			    0, 0);
664 			CACHE_WUNLOCK();
665 			flag = NCF_ISDOTDOT;
666 		}
667 	}
668 
669 	hold = 0;
670 	zap = 0;
671 
672 	/*
673 	 * Calculate the hash key and setup as much of the new
674 	 * namecache entry as possible before acquiring the lock.
675 	 */
676 	ncp = cache_alloc(cnp->cn_namelen);
677 	ncp->nc_vp = vp;
678 	ncp->nc_dvp = dvp;
679 	ncp->nc_flag = flag;
680 	len = ncp->nc_nlen = cnp->cn_namelen;
681 	hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
682 	strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1);
683 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
684 	CACHE_WLOCK();
685 
686 	/*
687 	 * See if this vnode or negative entry is already in the cache
688 	 * with this name.  This can happen with concurrent lookups of
689 	 * the same path name.
690 	 */
691 	ncpp = NCHHASH(hash);
692 	LIST_FOREACH(n2, ncpp, nc_hash) {
693 		if (n2->nc_dvp == dvp &&
694 		    n2->nc_nlen == cnp->cn_namelen &&
695 		    !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
696 			CACHE_WUNLOCK();
697 			cache_free(ncp);
698 			return;
699 		}
700 	}
701 
702 	if (flag == NCF_ISDOTDOT) {
703 		/*
704 		 * See if we are trying to add .. entry, but some other lookup
705 		 * has populated v_cache_dd pointer already.
706 		 */
707 		if (dvp->v_cache_dd != NULL) {
708 		    CACHE_WUNLOCK();
709 		    cache_free(ncp);
710 		    return;
711 		}
712 		KASSERT(vp == NULL || vp->v_type == VDIR,
713 		    ("wrong vnode type %p", vp));
714 		dvp->v_cache_dd = ncp;
715 	}
716 
717 	numcache++;
718 	if (!vp) {
719 		numneg++;
720 		if (cnp->cn_flags & ISWHITEOUT)
721 			ncp->nc_flag |= NCF_WHITE;
722 	} else if (vp->v_type == VDIR) {
723 		if (flag != NCF_ISDOTDOT) {
724 			if ((n2 = vp->v_cache_dd) != NULL &&
725 			    (n2->nc_flag & NCF_ISDOTDOT) != 0)
726 				cache_zap(n2);
727 			vp->v_cache_dd = ncp;
728 		}
729 	} else {
730 		vp->v_cache_dd = NULL;
731 	}
732 
733 	/*
734 	 * Insert the new namecache entry into the appropriate chain
735 	 * within the cache entries table.
736 	 */
737 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
738 	if (flag != NCF_ISDOTDOT) {
739 		if (LIST_EMPTY(&dvp->v_cache_src)) {
740 			hold = 1;
741 			numcachehv++;
742 		}
743 		LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
744 	}
745 
746 	/*
747 	 * If the entry is "negative", we place it into the
748 	 * "negative" cache queue, otherwise, we place it into the
749 	 * destination vnode's cache entries queue.
750 	 */
751 	if (vp) {
752 		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
753 		SDT_PROBE(vfs, namecache, enter, done, dvp, ncp->nc_name, vp,
754 		    0, 0);
755 	} else {
756 		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
757 		SDT_PROBE(vfs, namecache, enter_negative, done, dvp,
758 		    ncp->nc_name, 0, 0, 0);
759 	}
760 	if (numneg * ncnegfactor > numcache) {
761 		ncp = TAILQ_FIRST(&ncneg);
762 		zap = 1;
763 	}
764 	if (hold)
765 		vhold(dvp);
766 	if (zap)
767 		cache_zap(ncp);
768 	CACHE_WUNLOCK();
769 }
770 
771 /*
772  * Name cache initialization, from vfs_init() when we are booting
773  */
774 static void
775 nchinit(void *dummy __unused)
776 {
777 
778 	TAILQ_INIT(&ncneg);
779 
780 	cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
781 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
782 	cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
783 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
784 
785 	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
786 }
787 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
788 
789 
790 /*
791  * Invalidate all entries to a particular vnode.
792  */
793 void
794 cache_purge(vp)
795 	struct vnode *vp;
796 {
797 
798 	CTR1(KTR_VFS, "cache_purge(%p)", vp);
799 	SDT_PROBE(vfs, namecache, purge, done, vp, 0, 0, 0, 0);
800 	CACHE_WLOCK();
801 	while (!LIST_EMPTY(&vp->v_cache_src))
802 		cache_zap(LIST_FIRST(&vp->v_cache_src));
803 	while (!TAILQ_EMPTY(&vp->v_cache_dst))
804 		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
805 	if (vp->v_cache_dd != NULL) {
806 		KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT,
807 		   ("lost dotdot link"));
808 		cache_zap(vp->v_cache_dd);
809 	}
810 	KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
811 	CACHE_WUNLOCK();
812 }
813 
814 /*
815  * Invalidate all negative entries for a particular directory vnode.
816  */
817 void
818 cache_purge_negative(vp)
819 	struct vnode *vp;
820 {
821 	struct namecache *cp, *ncp;
822 
823 	CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
824 	SDT_PROBE(vfs, namecache, purge_negative, done, vp, 0, 0, 0, 0);
825 	CACHE_WLOCK();
826 	LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) {
827 		if (cp->nc_vp == NULL)
828 			cache_zap(cp);
829 	}
830 	CACHE_WUNLOCK();
831 }
832 
833 /*
834  * Flush all entries referencing a particular filesystem.
835  */
836 void
837 cache_purgevfs(mp)
838 	struct mount *mp;
839 {
840 	struct nchashhead *ncpp;
841 	struct namecache *ncp, *nnp;
842 
843 	/* Scan hash tables for applicable entries */
844 	SDT_PROBE(vfs, namecache, purgevfs, done, mp, 0, 0, 0, 0);
845 	CACHE_WLOCK();
846 	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
847 		LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) {
848 			if (ncp->nc_dvp->v_mount == mp)
849 				cache_zap(ncp);
850 		}
851 	}
852 	CACHE_WUNLOCK();
853 }
854 
855 /*
856  * Perform canonical checks and cache lookup and pass on to filesystem
857  * through the vop_cachedlookup only if needed.
858  */
859 
860 int
861 vfs_cache_lookup(ap)
862 	struct vop_lookup_args /* {
863 		struct vnode *a_dvp;
864 		struct vnode **a_vpp;
865 		struct componentname *a_cnp;
866 	} */ *ap;
867 {
868 	struct vnode *dvp;
869 	int error;
870 	struct vnode **vpp = ap->a_vpp;
871 	struct componentname *cnp = ap->a_cnp;
872 	struct ucred *cred = cnp->cn_cred;
873 	int flags = cnp->cn_flags;
874 	struct thread *td = cnp->cn_thread;
875 
876 	*vpp = NULL;
877 	dvp = ap->a_dvp;
878 
879 	if (dvp->v_type != VDIR)
880 		return (ENOTDIR);
881 
882 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
883 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
884 		return (EROFS);
885 
886 	error = VOP_ACCESS(dvp, VEXEC, cred, td);
887 	if (error)
888 		return (error);
889 
890 	error = cache_lookup(dvp, vpp, cnp);
891 	if (error == 0)
892 		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
893 	if (error == -1)
894 		return (0);
895 	return (error);
896 }
897 
898 
899 #ifndef _SYS_SYSPROTO_H_
900 struct  __getcwd_args {
901 	u_char	*buf;
902 	u_int	buflen;
903 };
904 #endif
905 
906 /*
907  * XXX All of these sysctls would probably be more productive dead.
908  */
909 static int disablecwd;
910 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
911    "Disable the getcwd syscall");
912 
913 /* Implementation of the getcwd syscall. */
914 int
915 __getcwd(td, uap)
916 	struct thread *td;
917 	struct __getcwd_args *uap;
918 {
919 
920 	return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
921 }
922 
923 int
924 kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen)
925 {
926 	char *bp, *tmpbuf;
927 	struct filedesc *fdp;
928 	struct vnode *cdir, *rdir;
929 	int error, vfslocked;
930 
931 	if (disablecwd)
932 		return (ENODEV);
933 	if (buflen < 2)
934 		return (EINVAL);
935 	if (buflen > MAXPATHLEN)
936 		buflen = MAXPATHLEN;
937 
938 	tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
939 	fdp = td->td_proc->p_fd;
940 	FILEDESC_SLOCK(fdp);
941 	cdir = fdp->fd_cdir;
942 	VREF(cdir);
943 	rdir = fdp->fd_rdir;
944 	VREF(rdir);
945 	FILEDESC_SUNLOCK(fdp);
946 	error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
947 	vfslocked = VFS_LOCK_GIANT(rdir->v_mount);
948 	vrele(rdir);
949 	VFS_UNLOCK_GIANT(vfslocked);
950 	vfslocked = VFS_LOCK_GIANT(cdir->v_mount);
951 	vrele(cdir);
952 	VFS_UNLOCK_GIANT(vfslocked);
953 
954 	if (!error) {
955 		if (bufseg == UIO_SYSSPACE)
956 			bcopy(bp, buf, strlen(bp) + 1);
957 		else
958 			error = copyout(bp, buf, strlen(bp) + 1);
959 #ifdef KTRACE
960 	if (KTRPOINT(curthread, KTR_NAMEI))
961 		ktrnamei(bp);
962 #endif
963 	}
964 	free(tmpbuf, M_TEMP);
965 	return (error);
966 }
967 
968 /*
969  * Thus begins the fullpath magic.
970  */
971 
972 #undef STATNODE
973 #define STATNODE(name)							\
974 	static u_int name;						\
975 	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
976 
977 static int disablefullpath;
978 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
979 	"Disable the vn_fullpath function");
980 
981 /* These count for kern___getcwd(), too. */
982 STATNODE(numfullpathcalls);
983 STATNODE(numfullpathfail1);
984 STATNODE(numfullpathfail2);
985 STATNODE(numfullpathfail4);
986 STATNODE(numfullpathfound);
987 
988 /*
989  * Retrieve the full filesystem path that correspond to a vnode from the name
990  * cache (if available)
991  */
992 int
993 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
994 {
995 	char *buf;
996 	struct filedesc *fdp;
997 	struct vnode *rdir;
998 	int error, vfslocked;
999 
1000 	if (disablefullpath)
1001 		return (ENODEV);
1002 	if (vn == NULL)
1003 		return (EINVAL);
1004 
1005 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1006 	fdp = td->td_proc->p_fd;
1007 	FILEDESC_SLOCK(fdp);
1008 	rdir = fdp->fd_rdir;
1009 	VREF(rdir);
1010 	FILEDESC_SUNLOCK(fdp);
1011 	error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
1012 	vfslocked = VFS_LOCK_GIANT(rdir->v_mount);
1013 	vrele(rdir);
1014 	VFS_UNLOCK_GIANT(vfslocked);
1015 
1016 	if (!error)
1017 		*freebuf = buf;
1018 	else
1019 		free(buf, M_TEMP);
1020 	return (error);
1021 }
1022 
1023 /*
1024  * This function is similar to vn_fullpath, but it attempts to lookup the
1025  * pathname relative to the global root mount point.  This is required for the
1026  * auditing sub-system, as audited pathnames must be absolute, relative to the
1027  * global root mount point.
1028  */
1029 int
1030 vn_fullpath_global(struct thread *td, struct vnode *vn,
1031     char **retbuf, char **freebuf)
1032 {
1033 	char *buf;
1034 	int error;
1035 
1036 	if (disablefullpath)
1037 		return (ENODEV);
1038 	if (vn == NULL)
1039 		return (EINVAL);
1040 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1041 	error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
1042 	if (!error)
1043 		*freebuf = buf;
1044 	else
1045 		free(buf, M_TEMP);
1046 	return (error);
1047 }
1048 
1049 int
1050 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
1051 {
1052 	int error;
1053 
1054 	CACHE_RLOCK();
1055 	error = vn_vptocnp_locked(vp, cred, buf, buflen);
1056 	if (error == 0) {
1057 		/*
1058 		 * vn_vptocnp_locked() dropped hold acquired by
1059 		 * VOP_VPTOCNP immediately after locking the
1060 		 * cache. Since we are going to drop the cache rlock,
1061 		 * re-hold the result.
1062 		 */
1063 		vhold(*vp);
1064 		CACHE_RUNLOCK();
1065 	}
1066 	return (error);
1067 }
1068 
1069 static int
1070 vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
1071     u_int *buflen)
1072 {
1073 	struct vnode *dvp;
1074 	struct namecache *ncp;
1075 	int error, vfslocked;
1076 
1077 	TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
1078 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1079 			break;
1080 	}
1081 	if (ncp != NULL) {
1082 		if (*buflen < ncp->nc_nlen) {
1083 			CACHE_RUNLOCK();
1084 			numfullpathfail4++;
1085 			error = ENOMEM;
1086 			SDT_PROBE(vfs, namecache, fullpath, return, error,
1087 			    vp, NULL, 0, 0);
1088 			return (error);
1089 		}
1090 		*buflen -= ncp->nc_nlen;
1091 		memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
1092 		SDT_PROBE(vfs, namecache, fullpath, hit, ncp->nc_dvp,
1093 		    ncp->nc_name, vp, 0, 0);
1094 		*vp = ncp->nc_dvp;
1095 		return (0);
1096 	}
1097 	SDT_PROBE(vfs, namecache, fullpath, miss, vp, 0, 0, 0, 0);
1098 
1099 	vhold(*vp);
1100 	CACHE_RUNLOCK();
1101 	vfslocked = VFS_LOCK_GIANT((*vp)->v_mount);
1102 	vn_lock(*vp, LK_SHARED | LK_RETRY);
1103 	error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
1104 	VOP_UNLOCK(*vp, 0);
1105 	vdrop(*vp);
1106 	VFS_UNLOCK_GIANT(vfslocked);
1107 	if (error) {
1108 		numfullpathfail2++;
1109 		SDT_PROBE(vfs, namecache, fullpath, return,  error, vp,
1110 		    NULL, 0, 0);
1111 		return (error);
1112 	}
1113 
1114 	*vp = dvp;
1115 	CACHE_RLOCK();
1116 	if ((*vp)->v_iflag & VI_DOOMED) {
1117 		/* forced unmount */
1118 		CACHE_RUNLOCK();
1119 		vdrop(*vp);
1120 		error = ENOENT;
1121 		SDT_PROBE(vfs, namecache, fullpath, return, error, vp,
1122 		    NULL, 0, 0);
1123 		return (error);
1124 	}
1125 	vdrop(*vp);
1126 
1127 	return (0);
1128 }
1129 
1130 /*
1131  * The magic behind kern___getcwd() and vn_fullpath().
1132  */
1133 static int
1134 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
1135     char *buf, char **retbuf, u_int buflen)
1136 {
1137 	int error, slash_prefixed;
1138 #ifdef KDTRACE_HOOKS
1139 	struct vnode *startvp = vp;
1140 #endif
1141 
1142 	buflen--;
1143 	buf[buflen] = '\0';
1144 	error = 0;
1145 	slash_prefixed = 0;
1146 
1147 	SDT_PROBE(vfs, namecache, fullpath, entry, vp, 0, 0, 0, 0);
1148 	numfullpathcalls++;
1149 	CACHE_RLOCK();
1150 	if (vp->v_type != VDIR) {
1151 		error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
1152 		if (error)
1153 			return (error);
1154 		if (buflen == 0) {
1155 			CACHE_RUNLOCK();
1156 			return (ENOMEM);
1157 		}
1158 		buf[--buflen] = '/';
1159 		slash_prefixed = 1;
1160 	}
1161 	while (vp != rdir && vp != rootvnode) {
1162 		if (vp->v_vflag & VV_ROOT) {
1163 			if (vp->v_iflag & VI_DOOMED) {	/* forced unmount */
1164 				CACHE_RUNLOCK();
1165 				error = ENOENT;
1166 				SDT_PROBE(vfs, namecache, fullpath, return,
1167 				    error, vp, NULL, 0, 0);
1168 				break;
1169 			}
1170 			vp = vp->v_mount->mnt_vnodecovered;
1171 			continue;
1172 		}
1173 		if (vp->v_type != VDIR) {
1174 			CACHE_RUNLOCK();
1175 			numfullpathfail1++;
1176 			error = ENOTDIR;
1177 			SDT_PROBE(vfs, namecache, fullpath, return,
1178 			    error, vp, NULL, 0, 0);
1179 			break;
1180 		}
1181 		error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
1182 		if (error)
1183 			break;
1184 		if (buflen == 0) {
1185 			CACHE_RUNLOCK();
1186 			error = ENOMEM;
1187 			SDT_PROBE(vfs, namecache, fullpath, return, error,
1188 			    startvp, NULL, 0, 0);
1189 			break;
1190 		}
1191 		buf[--buflen] = '/';
1192 		slash_prefixed = 1;
1193 	}
1194 	if (error)
1195 		return (error);
1196 	if (!slash_prefixed) {
1197 		if (buflen == 0) {
1198 			CACHE_RUNLOCK();
1199 			numfullpathfail4++;
1200 			SDT_PROBE(vfs, namecache, fullpath, return, ENOMEM,
1201 			    startvp, NULL, 0, 0);
1202 			return (ENOMEM);
1203 		}
1204 		buf[--buflen] = '/';
1205 	}
1206 	numfullpathfound++;
1207 	CACHE_RUNLOCK();
1208 
1209 	SDT_PROBE(vfs, namecache, fullpath, return, 0, startvp, buf + buflen,
1210 	    0, 0);
1211 	*retbuf = buf + buflen;
1212 	return (0);
1213 }
1214 
1215 int
1216 vn_commname(struct vnode *vp, char *buf, u_int buflen)
1217 {
1218 	struct namecache *ncp;
1219 	int l;
1220 
1221 	CACHE_RLOCK();
1222 	TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
1223 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1224 			break;
1225 	if (ncp == NULL) {
1226 		CACHE_RUNLOCK();
1227 		return (ENOENT);
1228 	}
1229 	l = min(ncp->nc_nlen, buflen - 1);
1230 	memcpy(buf, ncp->nc_name, l);
1231 	CACHE_RUNLOCK();
1232 	buf[l] = '\0';
1233 	return (0);
1234 }
1235