xref: /freebsd/sys/kern/vfs_cache.c (revision 1670a1c2a47d10ecccd001970b859caf93cd3b6e)
1 /*-
2  * Copyright (c) 1989, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Poul-Henning Kamp of the FreeBSD Project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_kdtrace.h"
39 #include "opt_ktrace.h"
40 
41 #include <sys/param.h>
42 #include <sys/filedesc.h>
43 #include <sys/fnv_hash.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/proc.h>
50 #include <sys/rwlock.h>
51 #include <sys/sdt.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysproto.h>
55 #include <sys/systm.h>
56 #include <sys/vnode.h>
57 #ifdef KTRACE
58 #include <sys/ktrace.h>
59 #endif
60 
61 #include <vm/uma.h>
62 
63 SDT_PROVIDER_DECLARE(vfs);
64 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
65     "struct vnode *");
66 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
67     "char *");
68 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
69 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
70     "struct char *", "struct vnode *");
71 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
72 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", "struct vnode *",
73     "struct char *");
74 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
75     "struct vnode *");
76 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit_negative, "struct vnode *",
77     "char *");
78 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
79     "char *");
80 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
81 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
82 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
83 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
84     "struct vnode *");
85 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *",
86     "char *");
87 
88 /*
89  * This structure describes the elements in the cache of recent
90  * names looked up by namei.
91  */
92 
93 struct	namecache {
94 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
95 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
96 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
97 	struct	vnode *nc_dvp;		/* vnode of parent of name */
98 	struct	vnode *nc_vp;		/* vnode the name refers to */
99 	u_char	nc_flag;		/* flag bits */
100 	u_char	nc_nlen;		/* length of name */
101 	char	nc_name[0];		/* segment name + nul */
102 };
103 
104 /*
105  * Name caching works as follows:
106  *
107  * Names found by directory scans are retained in a cache
108  * for future reference.  It is managed LRU, so frequently
109  * used names will hang around.  Cache is indexed by hash value
110  * obtained from (vp, name) where vp refers to the directory
111  * containing name.
112  *
113  * If it is a "negative" entry, (i.e. for a name that is known NOT to
114  * exist) the vnode pointer will be NULL.
115  *
116  * Upon reaching the last segment of a path, if the reference
117  * is for DELETE, or NOCACHE is set (rewrite), and the
118  * name is located in the cache, it will be dropped.
119  */
120 
121 /*
122  * Structures associated with name cacheing.
123  */
124 #define NCHHASH(hash) \
125 	(&nchashtbl[(hash) & nchash])
126 static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
127 static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
128 static u_long	nchash;			/* size of hash table */
129 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
130 static u_long	ncnegfactor = 16;	/* ratio of negative entries */
131 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
132 static u_long	numneg;			/* number of cache entries allocated */
133 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
134 static u_long	numcache;		/* number of cache entries allocated */
135 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
136 static u_long	numcachehv;		/* number of cache entries with vnodes held */
137 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, "");
138 #if 0
139 static u_long	numcachepl;		/* number of cache purge for leaf entries */
140 SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, "");
141 #endif
142 struct	nchstats nchstats;		/* cache effectiveness statistics */
143 
144 static struct rwlock cache_lock;
145 RW_SYSINIT(vfscache, &cache_lock, "Name Cache");
146 
147 #define	CACHE_UPGRADE_LOCK()	rw_try_upgrade(&cache_lock)
148 #define	CACHE_RLOCK()		rw_rlock(&cache_lock)
149 #define	CACHE_RUNLOCK()		rw_runlock(&cache_lock)
150 #define	CACHE_WLOCK()		rw_wlock(&cache_lock)
151 #define	CACHE_WUNLOCK()		rw_wunlock(&cache_lock)
152 
153 /*
154  * UMA zones for the VFS cache.
155  *
156  * The small cache is used for entries with short names, which are the
157  * most common.  The large cache is used for entries which are too big to
158  * fit in the small cache.
159  */
160 static uma_zone_t cache_zone_small;
161 static uma_zone_t cache_zone_large;
162 
163 #define	CACHE_PATH_CUTOFF	35
164 #define	CACHE_ZONE_SMALL	(sizeof(struct namecache) + CACHE_PATH_CUTOFF \
165 				    + 1)
166 #define	CACHE_ZONE_LARGE	(sizeof(struct namecache) + NAME_MAX + 1)
167 
168 #define cache_alloc(len)	uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
169 	cache_zone_small : cache_zone_large, M_WAITOK)
170 #define cache_free(ncp)		do { \
171 	if (ncp != NULL) \
172 		uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
173 		    cache_zone_small : cache_zone_large, (ncp)); \
174 } while (0)
175 
176 static int	doingcache = 1;		/* 1 => enable the cache */
177 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
178 
179 /* Export size information to userland */
180 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, 0,
181 	sizeof(struct namecache), "");
182 
183 /*
184  * The new name cache statistics
185  */
186 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
187 #define STATNODE(mode, name, var) \
188 	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
189 STATNODE(CTLFLAG_RD, numneg, &numneg);
190 STATNODE(CTLFLAG_RD, numcache, &numcache);
191 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
192 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
193 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
194 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
195 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
196 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
197 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
198 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
199 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
200 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
201 static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades);
202 
203 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE,
204 	&nchstats, sizeof(nchstats), "LU", "VFS cache effectiveness statistics");
205 
206 
207 
208 static void cache_zap(struct namecache *ncp);
209 static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
210     u_int *buflen);
211 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
212     char *buf, char **retbuf, u_int buflen);
213 
214 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
215 
216 /*
217  * Flags in namecache.nc_flag
218  */
219 #define NCF_WHITE	0x01
220 #define NCF_ISDOTDOT	0x02
221 
222 #ifdef DIAGNOSTIC
223 /*
224  * Grab an atomic snapshot of the name cache hash chain lengths
225  */
226 SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
227 
228 static int
229 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
230 {
231 	int error;
232 	struct nchashhead *ncpp;
233 	struct namecache *ncp;
234 	int n_nchash;
235 	int count;
236 
237 	n_nchash = nchash + 1;	/* nchash is max index, not count */
238 	if (!req->oldptr)
239 		return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
240 
241 	/* Scan hash tables for applicable entries */
242 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
243 		CACHE_RLOCK();
244 		count = 0;
245 		LIST_FOREACH(ncp, ncpp, nc_hash) {
246 			count++;
247 		}
248 		CACHE_RUNLOCK();
249 		error = SYSCTL_OUT(req, &count, sizeof(count));
250 		if (error)
251 			return (error);
252 	}
253 	return (0);
254 }
255 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
256 	CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
257 	"nchash chain lengths");
258 
259 static int
260 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
261 {
262 	int error;
263 	struct nchashhead *ncpp;
264 	struct namecache *ncp;
265 	int n_nchash;
266 	int count, maxlength, used, pct;
267 
268 	if (!req->oldptr)
269 		return SYSCTL_OUT(req, 0, 4 * sizeof(int));
270 
271 	n_nchash = nchash + 1;	/* nchash is max index, not count */
272 	used = 0;
273 	maxlength = 0;
274 
275 	/* Scan hash tables for applicable entries */
276 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
277 		count = 0;
278 		CACHE_RLOCK();
279 		LIST_FOREACH(ncp, ncpp, nc_hash) {
280 			count++;
281 		}
282 		CACHE_RUNLOCK();
283 		if (count)
284 			used++;
285 		if (maxlength < count)
286 			maxlength = count;
287 	}
288 	n_nchash = nchash + 1;
289 	pct = (used * 100 * 100) / n_nchash;
290 	error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
291 	if (error)
292 		return (error);
293 	error = SYSCTL_OUT(req, &used, sizeof(used));
294 	if (error)
295 		return (error);
296 	error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
297 	if (error)
298 		return (error);
299 	error = SYSCTL_OUT(req, &pct, sizeof(pct));
300 	if (error)
301 		return (error);
302 	return (0);
303 }
304 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
305 	CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
306 	"nchash chain lengths");
307 #endif
308 
309 /*
310  * cache_zap():
311  *
312  *   Removes a namecache entry from cache, whether it contains an actual
313  *   pointer to a vnode or if it is just a negative cache entry.
314  */
315 static void
316 cache_zap(ncp)
317 	struct namecache *ncp;
318 {
319 	struct vnode *vp;
320 
321 	rw_assert(&cache_lock, RA_WLOCKED);
322 	CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
323 #ifdef KDTRACE_HOOKS
324 	if (ncp->nc_vp != NULL) {
325 		SDT_PROBE(vfs, namecache, zap, done, ncp->nc_dvp,
326 		    ncp->nc_name, ncp->nc_vp, 0, 0);
327 	} else {
328 		SDT_PROBE(vfs, namecache, zap_negative, done, ncp->nc_dvp,
329 		    ncp->nc_name, 0, 0, 0);
330 	}
331 #endif
332 	vp = NULL;
333 	LIST_REMOVE(ncp, nc_hash);
334 	if (ncp->nc_flag & NCF_ISDOTDOT) {
335 		if (ncp == ncp->nc_dvp->v_cache_dd)
336 			ncp->nc_dvp->v_cache_dd = NULL;
337 	} else {
338 		LIST_REMOVE(ncp, nc_src);
339 		if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
340 			vp = ncp->nc_dvp;
341 			numcachehv--;
342 		}
343 	}
344 	if (ncp->nc_vp) {
345 		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
346 		if (ncp == ncp->nc_vp->v_cache_dd)
347 			ncp->nc_vp->v_cache_dd = NULL;
348 	} else {
349 		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
350 		numneg--;
351 	}
352 	numcache--;
353 	cache_free(ncp);
354 	if (vp)
355 		vdrop(vp);
356 }
357 
358 /*
359  * Lookup an entry in the cache
360  *
361  * Lookup is called with dvp pointing to the directory to search,
362  * cnp pointing to the name of the entry being sought. If the lookup
363  * succeeds, the vnode is returned in *vpp, and a status of -1 is
364  * returned. If the lookup determines that the name does not exist
365  * (negative cacheing), a status of ENOENT is returned. If the lookup
366  * fails, a status of zero is returned.  If the directory vnode is
367  * recycled out from under us due to a forced unmount, a status of
368  * ENOENT is returned.
369  *
370  * vpp is locked and ref'd on return.  If we're looking up DOTDOT, dvp is
371  * unlocked.  If we're looking up . an extra ref is taken, but the lock is
372  * not recursively acquired.
373  */
374 
375 int
376 cache_lookup(dvp, vpp, cnp)
377 	struct vnode *dvp;
378 	struct vnode **vpp;
379 	struct componentname *cnp;
380 {
381 	struct namecache *ncp;
382 	uint32_t hash;
383 	int error, ltype, wlocked;
384 
385 	if (!doingcache) {
386 		cnp->cn_flags &= ~MAKEENTRY;
387 		return (0);
388 	}
389 retry:
390 	CACHE_RLOCK();
391 	wlocked = 0;
392 	numcalls++;
393 	error = 0;
394 
395 retry_wlocked:
396 	if (cnp->cn_nameptr[0] == '.') {
397 		if (cnp->cn_namelen == 1) {
398 			*vpp = dvp;
399 			CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
400 			    dvp, cnp->cn_nameptr);
401 			dothits++;
402 			SDT_PROBE(vfs, namecache, lookup, hit, dvp, ".",
403 			    *vpp, 0, 0);
404 			goto success;
405 		}
406 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
407 			dotdothits++;
408 			if (dvp->v_cache_dd == NULL) {
409 				SDT_PROBE(vfs, namecache, lookup, miss, dvp,
410 				    "..", NULL, 0, 0);
411 				goto unlock;
412 			}
413 			if ((cnp->cn_flags & MAKEENTRY) == 0) {
414 				if (!wlocked && !CACHE_UPGRADE_LOCK())
415 					goto wlock;
416 				if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
417 					cache_zap(dvp->v_cache_dd);
418 				dvp->v_cache_dd = NULL;
419 				CACHE_WUNLOCK();
420 				return (0);
421 			}
422 			if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
423 				*vpp = dvp->v_cache_dd->nc_vp;
424 			else
425 				*vpp = dvp->v_cache_dd->nc_dvp;
426 			/* Return failure if negative entry was found. */
427 			if (*vpp == NULL) {
428 				ncp = dvp->v_cache_dd;
429 				goto negative_success;
430 			}
431 			CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
432 			    dvp, cnp->cn_nameptr, *vpp);
433 			SDT_PROBE(vfs, namecache, lookup, hit, dvp, "..",
434 			    *vpp, 0, 0);
435 			goto success;
436 		}
437 	}
438 
439 	hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
440 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
441 	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
442 		numchecks++;
443 		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
444 		    !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
445 			break;
446 	}
447 
448 	/* We failed to find an entry */
449 	if (ncp == NULL) {
450 		SDT_PROBE(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
451 		    NULL, 0, 0);
452 		if ((cnp->cn_flags & MAKEENTRY) == 0) {
453 			nummisszap++;
454 		} else {
455 			nummiss++;
456 		}
457 		nchstats.ncs_miss++;
458 		goto unlock;
459 	}
460 
461 	/* We don't want to have an entry, so dump it */
462 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
463 		numposzaps++;
464 		nchstats.ncs_badhits++;
465 		if (!wlocked && !CACHE_UPGRADE_LOCK())
466 			goto wlock;
467 		cache_zap(ncp);
468 		CACHE_WUNLOCK();
469 		return (0);
470 	}
471 
472 	/* We found a "positive" match, return the vnode */
473 	if (ncp->nc_vp) {
474 		numposhits++;
475 		nchstats.ncs_goodhits++;
476 		*vpp = ncp->nc_vp;
477 		CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
478 		    dvp, cnp->cn_nameptr, *vpp, ncp);
479 		SDT_PROBE(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
480 		    *vpp, 0, 0);
481 		goto success;
482 	}
483 
484 negative_success:
485 	/* We found a negative match, and want to create it, so purge */
486 	if (cnp->cn_nameiop == CREATE) {
487 		numnegzaps++;
488 		nchstats.ncs_badhits++;
489 		if (!wlocked && !CACHE_UPGRADE_LOCK())
490 			goto wlock;
491 		cache_zap(ncp);
492 		CACHE_WUNLOCK();
493 		return (0);
494 	}
495 
496 	if (!wlocked && !CACHE_UPGRADE_LOCK())
497 		goto wlock;
498 	numneghits++;
499 	/*
500 	 * We found a "negative" match, so we shift it to the end of
501 	 * the "negative" cache entries queue to satisfy LRU.  Also,
502 	 * check to see if the entry is a whiteout; indicate this to
503 	 * the componentname, if so.
504 	 */
505 	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
506 	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
507 	nchstats.ncs_neghits++;
508 	if (ncp->nc_flag & NCF_WHITE)
509 		cnp->cn_flags |= ISWHITEOUT;
510 	SDT_PROBE(vfs, namecache, lookup, hit_negative, dvp, ncp->nc_name,
511 	    0, 0, 0);
512 	CACHE_WUNLOCK();
513 	return (ENOENT);
514 
515 wlock:
516 	/*
517 	 * We need to update the cache after our lookup, so upgrade to
518 	 * a write lock and retry the operation.
519 	 */
520 	CACHE_RUNLOCK();
521 	CACHE_WLOCK();
522 	numupgrades++;
523 	wlocked = 1;
524 	goto retry_wlocked;
525 
526 success:
527 	/*
528 	 * On success we return a locked and ref'd vnode as per the lookup
529 	 * protocol.
530 	 */
531 	if (dvp == *vpp) {   /* lookup on "." */
532 		VREF(*vpp);
533 		if (wlocked)
534 			CACHE_WUNLOCK();
535 		else
536 			CACHE_RUNLOCK();
537 		/*
538 		 * When we lookup "." we still can be asked to lock it
539 		 * differently...
540 		 */
541 		ltype = cnp->cn_lkflags & LK_TYPE_MASK;
542 		if (ltype != VOP_ISLOCKED(*vpp)) {
543 			if (ltype == LK_EXCLUSIVE) {
544 				vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
545 				if ((*vpp)->v_iflag & VI_DOOMED) {
546 					/* forced unmount */
547 					vrele(*vpp);
548 					*vpp = NULL;
549 					return (ENOENT);
550 				}
551 			} else
552 				vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
553 		}
554 		return (-1);
555 	}
556 	ltype = 0;	/* silence gcc warning */
557 	if (cnp->cn_flags & ISDOTDOT) {
558 		ltype = VOP_ISLOCKED(dvp);
559 		VOP_UNLOCK(dvp, 0);
560 	}
561 	VI_LOCK(*vpp);
562 	if (wlocked)
563 		CACHE_WUNLOCK();
564 	else
565 		CACHE_RUNLOCK();
566 	error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
567 	if (cnp->cn_flags & ISDOTDOT) {
568 		vn_lock(dvp, ltype | LK_RETRY);
569 		if (dvp->v_iflag & VI_DOOMED) {
570 			if (error == 0)
571 				vput(*vpp);
572 			*vpp = NULL;
573 			return (ENOENT);
574 		}
575 	}
576 	if (error) {
577 		*vpp = NULL;
578 		goto retry;
579 	}
580 	if ((cnp->cn_flags & ISLASTCN) &&
581 	    (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
582 		ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
583 	}
584 	return (-1);
585 
586 unlock:
587 	if (wlocked)
588 		CACHE_WUNLOCK();
589 	else
590 		CACHE_RUNLOCK();
591 	return (0);
592 }
593 
594 /*
595  * Add an entry to the cache.
596  */
597 void
598 cache_enter(dvp, vp, cnp)
599 	struct vnode *dvp;
600 	struct vnode *vp;
601 	struct componentname *cnp;
602 {
603 	struct namecache *ncp, *n2;
604 	struct nchashhead *ncpp;
605 	uint32_t hash;
606 	int flag;
607 	int hold;
608 	int zap;
609 	int len;
610 
611 	CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
612 	VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
613 	    ("cache_enter: Adding a doomed vnode"));
614 	VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
615 	    ("cache_enter: Doomed vnode used as src"));
616 
617 	if (!doingcache)
618 		return;
619 
620 	/*
621 	 * Avoid blowout in namecache entries.
622 	 */
623 	if (numcache >= desiredvnodes * 2)
624 		return;
625 
626 	flag = 0;
627 	if (cnp->cn_nameptr[0] == '.') {
628 		if (cnp->cn_namelen == 1)
629 			return;
630 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
631 			CACHE_WLOCK();
632 			/*
633 			 * If dotdot entry already exists, just retarget it
634 			 * to new parent vnode, otherwise continue with new
635 			 * namecache entry allocation.
636 			 */
637 			if ((ncp = dvp->v_cache_dd) != NULL &&
638 			    ncp->nc_flag & NCF_ISDOTDOT) {
639 				KASSERT(ncp->nc_dvp == dvp,
640 				    ("wrong isdotdot parent"));
641 				if (ncp->nc_vp != NULL)
642 					TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
643 					    ncp, nc_dst);
644 				else
645 					TAILQ_REMOVE(&ncneg, ncp, nc_dst);
646 				if (vp != NULL)
647 					TAILQ_INSERT_HEAD(&vp->v_cache_dst,
648 					    ncp, nc_dst);
649 				else
650 					TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
651 				ncp->nc_vp = vp;
652 				CACHE_WUNLOCK();
653 				return;
654 			}
655 			dvp->v_cache_dd = NULL;
656 			SDT_PROBE(vfs, namecache, enter, done, dvp, "..", vp,
657 			    0, 0);
658 			CACHE_WUNLOCK();
659 			flag = NCF_ISDOTDOT;
660 		}
661 	}
662 
663 	hold = 0;
664 	zap = 0;
665 
666 	/*
667 	 * Calculate the hash key and setup as much of the new
668 	 * namecache entry as possible before acquiring the lock.
669 	 */
670 	ncp = cache_alloc(cnp->cn_namelen);
671 	ncp->nc_vp = vp;
672 	ncp->nc_dvp = dvp;
673 	ncp->nc_flag = flag;
674 	len = ncp->nc_nlen = cnp->cn_namelen;
675 	hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
676 	strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1);
677 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
678 	CACHE_WLOCK();
679 
680 	/*
681 	 * See if this vnode or negative entry is already in the cache
682 	 * with this name.  This can happen with concurrent lookups of
683 	 * the same path name.
684 	 */
685 	ncpp = NCHHASH(hash);
686 	LIST_FOREACH(n2, ncpp, nc_hash) {
687 		if (n2->nc_dvp == dvp &&
688 		    n2->nc_nlen == cnp->cn_namelen &&
689 		    !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
690 			CACHE_WUNLOCK();
691 			cache_free(ncp);
692 			return;
693 		}
694 	}
695 
696 	if (flag == NCF_ISDOTDOT) {
697 		/*
698 		 * See if we are trying to add .. entry, but some other lookup
699 		 * has populated v_cache_dd pointer already.
700 		 */
701 		if (dvp->v_cache_dd != NULL) {
702 		    CACHE_WUNLOCK();
703 		    cache_free(ncp);
704 		    return;
705 		}
706 		KASSERT(vp == NULL || vp->v_type == VDIR,
707 		    ("wrong vnode type %p", vp));
708 		dvp->v_cache_dd = ncp;
709 	}
710 
711 	numcache++;
712 	if (!vp) {
713 		numneg++;
714 		if (cnp->cn_flags & ISWHITEOUT)
715 			ncp->nc_flag |= NCF_WHITE;
716 	} else if (vp->v_type == VDIR) {
717 		if (flag != NCF_ISDOTDOT) {
718 			if ((n2 = vp->v_cache_dd) != NULL &&
719 			    (n2->nc_flag & NCF_ISDOTDOT) != 0)
720 				cache_zap(n2);
721 			vp->v_cache_dd = ncp;
722 		}
723 	} else {
724 		vp->v_cache_dd = NULL;
725 	}
726 
727 	/*
728 	 * Insert the new namecache entry into the appropriate chain
729 	 * within the cache entries table.
730 	 */
731 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
732 	if (flag != NCF_ISDOTDOT) {
733 		if (LIST_EMPTY(&dvp->v_cache_src)) {
734 			hold = 1;
735 			numcachehv++;
736 		}
737 		LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
738 	}
739 
740 	/*
741 	 * If the entry is "negative", we place it into the
742 	 * "negative" cache queue, otherwise, we place it into the
743 	 * destination vnode's cache entries queue.
744 	 */
745 	if (vp) {
746 		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
747 		SDT_PROBE(vfs, namecache, enter, done, dvp, ncp->nc_name, vp,
748 		    0, 0);
749 	} else {
750 		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
751 		SDT_PROBE(vfs, namecache, enter_negative, done, dvp,
752 		    ncp->nc_name, 0, 0, 0);
753 	}
754 	if (numneg * ncnegfactor > numcache) {
755 		ncp = TAILQ_FIRST(&ncneg);
756 		zap = 1;
757 	}
758 	if (hold)
759 		vhold(dvp);
760 	if (zap)
761 		cache_zap(ncp);
762 	CACHE_WUNLOCK();
763 }
764 
765 /*
766  * Name cache initialization, from vfs_init() when we are booting
767  */
768 static void
769 nchinit(void *dummy __unused)
770 {
771 
772 	TAILQ_INIT(&ncneg);
773 
774 	cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
775 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
776 	cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
777 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
778 
779 	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
780 }
781 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
782 
783 
784 /*
785  * Invalidate all entries to a particular vnode.
786  */
787 void
788 cache_purge(vp)
789 	struct vnode *vp;
790 {
791 
792 	CTR1(KTR_VFS, "cache_purge(%p)", vp);
793 	SDT_PROBE(vfs, namecache, purge, done, vp, 0, 0, 0, 0);
794 	CACHE_WLOCK();
795 	while (!LIST_EMPTY(&vp->v_cache_src))
796 		cache_zap(LIST_FIRST(&vp->v_cache_src));
797 	while (!TAILQ_EMPTY(&vp->v_cache_dst))
798 		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
799 	if (vp->v_cache_dd != NULL) {
800 		KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT,
801 		   ("lost dotdot link"));
802 		cache_zap(vp->v_cache_dd);
803 	}
804 	KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
805 	CACHE_WUNLOCK();
806 }
807 
808 /*
809  * Invalidate all negative entries for a particular directory vnode.
810  */
811 void
812 cache_purge_negative(vp)
813 	struct vnode *vp;
814 {
815 	struct namecache *cp, *ncp;
816 
817 	CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
818 	SDT_PROBE(vfs, namecache, purge_negative, done, vp, 0, 0, 0, 0);
819 	CACHE_WLOCK();
820 	LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) {
821 		if (cp->nc_vp == NULL)
822 			cache_zap(cp);
823 	}
824 	CACHE_WUNLOCK();
825 }
826 
827 /*
828  * Flush all entries referencing a particular filesystem.
829  */
830 void
831 cache_purgevfs(mp)
832 	struct mount *mp;
833 {
834 	struct nchashhead *ncpp;
835 	struct namecache *ncp, *nnp;
836 
837 	/* Scan hash tables for applicable entries */
838 	SDT_PROBE(vfs, namecache, purgevfs, done, mp, 0, 0, 0, 0);
839 	CACHE_WLOCK();
840 	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
841 		LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) {
842 			if (ncp->nc_dvp->v_mount == mp)
843 				cache_zap(ncp);
844 		}
845 	}
846 	CACHE_WUNLOCK();
847 }
848 
849 /*
850  * Perform canonical checks and cache lookup and pass on to filesystem
851  * through the vop_cachedlookup only if needed.
852  */
853 
854 int
855 vfs_cache_lookup(ap)
856 	struct vop_lookup_args /* {
857 		struct vnode *a_dvp;
858 		struct vnode **a_vpp;
859 		struct componentname *a_cnp;
860 	} */ *ap;
861 {
862 	struct vnode *dvp;
863 	int error;
864 	struct vnode **vpp = ap->a_vpp;
865 	struct componentname *cnp = ap->a_cnp;
866 	struct ucred *cred = cnp->cn_cred;
867 	int flags = cnp->cn_flags;
868 	struct thread *td = cnp->cn_thread;
869 
870 	*vpp = NULL;
871 	dvp = ap->a_dvp;
872 
873 	if (dvp->v_type != VDIR)
874 		return (ENOTDIR);
875 
876 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
877 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
878 		return (EROFS);
879 
880 	error = VOP_ACCESS(dvp, VEXEC, cred, td);
881 	if (error)
882 		return (error);
883 
884 	error = cache_lookup(dvp, vpp, cnp);
885 	if (error == 0)
886 		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
887 	if (error == -1)
888 		return (0);
889 	return (error);
890 }
891 
892 
893 #ifndef _SYS_SYSPROTO_H_
894 struct  __getcwd_args {
895 	u_char	*buf;
896 	u_int	buflen;
897 };
898 #endif
899 
900 /*
901  * XXX All of these sysctls would probably be more productive dead.
902  */
903 static int disablecwd;
904 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
905    "Disable the getcwd syscall");
906 
907 /* Implementation of the getcwd syscall. */
908 int
909 __getcwd(td, uap)
910 	struct thread *td;
911 	struct __getcwd_args *uap;
912 {
913 
914 	return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
915 }
916 
917 int
918 kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen)
919 {
920 	char *bp, *tmpbuf;
921 	struct filedesc *fdp;
922 	struct vnode *cdir, *rdir;
923 	int error, vfslocked;
924 
925 	if (disablecwd)
926 		return (ENODEV);
927 	if (buflen < 2)
928 		return (EINVAL);
929 	if (buflen > MAXPATHLEN)
930 		buflen = MAXPATHLEN;
931 
932 	tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
933 	fdp = td->td_proc->p_fd;
934 	FILEDESC_SLOCK(fdp);
935 	cdir = fdp->fd_cdir;
936 	VREF(cdir);
937 	rdir = fdp->fd_rdir;
938 	VREF(rdir);
939 	FILEDESC_SUNLOCK(fdp);
940 	error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
941 	vfslocked = VFS_LOCK_GIANT(rdir->v_mount);
942 	vrele(rdir);
943 	VFS_UNLOCK_GIANT(vfslocked);
944 	vfslocked = VFS_LOCK_GIANT(cdir->v_mount);
945 	vrele(cdir);
946 	VFS_UNLOCK_GIANT(vfslocked);
947 
948 	if (!error) {
949 		if (bufseg == UIO_SYSSPACE)
950 			bcopy(bp, buf, strlen(bp) + 1);
951 		else
952 			error = copyout(bp, buf, strlen(bp) + 1);
953 #ifdef KTRACE
954 	if (KTRPOINT(curthread, KTR_NAMEI))
955 		ktrnamei(bp);
956 #endif
957 	}
958 	free(tmpbuf, M_TEMP);
959 	return (error);
960 }
961 
962 /*
963  * Thus begins the fullpath magic.
964  */
965 
966 #undef STATNODE
967 #define STATNODE(name)							\
968 	static u_int name;						\
969 	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
970 
971 static int disablefullpath;
972 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
973 	"Disable the vn_fullpath function");
974 
975 /* These count for kern___getcwd(), too. */
976 STATNODE(numfullpathcalls);
977 STATNODE(numfullpathfail1);
978 STATNODE(numfullpathfail2);
979 STATNODE(numfullpathfail4);
980 STATNODE(numfullpathfound);
981 
982 /*
983  * Retrieve the full filesystem path that correspond to a vnode from the name
984  * cache (if available)
985  */
986 int
987 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
988 {
989 	char *buf;
990 	struct filedesc *fdp;
991 	struct vnode *rdir;
992 	int error, vfslocked;
993 
994 	if (disablefullpath)
995 		return (ENODEV);
996 	if (vn == NULL)
997 		return (EINVAL);
998 
999 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1000 	fdp = td->td_proc->p_fd;
1001 	FILEDESC_SLOCK(fdp);
1002 	rdir = fdp->fd_rdir;
1003 	VREF(rdir);
1004 	FILEDESC_SUNLOCK(fdp);
1005 	error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
1006 	vfslocked = VFS_LOCK_GIANT(rdir->v_mount);
1007 	vrele(rdir);
1008 	VFS_UNLOCK_GIANT(vfslocked);
1009 
1010 	if (!error)
1011 		*freebuf = buf;
1012 	else
1013 		free(buf, M_TEMP);
1014 	return (error);
1015 }
1016 
1017 /*
1018  * This function is similar to vn_fullpath, but it attempts to lookup the
1019  * pathname relative to the global root mount point.  This is required for the
1020  * auditing sub-system, as audited pathnames must be absolute, relative to the
1021  * global root mount point.
1022  */
1023 int
1024 vn_fullpath_global(struct thread *td, struct vnode *vn,
1025     char **retbuf, char **freebuf)
1026 {
1027 	char *buf;
1028 	int error;
1029 
1030 	if (disablefullpath)
1031 		return (ENODEV);
1032 	if (vn == NULL)
1033 		return (EINVAL);
1034 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1035 	error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
1036 	if (!error)
1037 		*freebuf = buf;
1038 	else
1039 		free(buf, M_TEMP);
1040 	return (error);
1041 }
1042 
1043 int
1044 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
1045 {
1046 	int error;
1047 
1048 	CACHE_RLOCK();
1049 	error = vn_vptocnp_locked(vp, cred, buf, buflen);
1050 	if (error == 0) {
1051 		/*
1052 		 * vn_vptocnp_locked() dropped hold acquired by
1053 		 * VOP_VPTOCNP immediately after locking the
1054 		 * cache. Since we are going to drop the cache rlock,
1055 		 * re-hold the result.
1056 		 */
1057 		vhold(*vp);
1058 		CACHE_RUNLOCK();
1059 	}
1060 	return (error);
1061 }
1062 
1063 static int
1064 vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
1065     u_int *buflen)
1066 {
1067 	struct vnode *dvp;
1068 	struct namecache *ncp;
1069 	int error, vfslocked;
1070 
1071 	TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
1072 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1073 			break;
1074 	}
1075 	if (ncp != NULL) {
1076 		if (*buflen < ncp->nc_nlen) {
1077 			CACHE_RUNLOCK();
1078 			numfullpathfail4++;
1079 			error = ENOMEM;
1080 			SDT_PROBE(vfs, namecache, fullpath, return, error,
1081 			    vp, NULL, 0, 0);
1082 			return (error);
1083 		}
1084 		*buflen -= ncp->nc_nlen;
1085 		memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
1086 		SDT_PROBE(vfs, namecache, fullpath, hit, ncp->nc_dvp,
1087 		    ncp->nc_name, vp, 0, 0);
1088 		*vp = ncp->nc_dvp;
1089 		return (0);
1090 	}
1091 	SDT_PROBE(vfs, namecache, fullpath, miss, vp, 0, 0, 0, 0);
1092 
1093 	vhold(*vp);
1094 	CACHE_RUNLOCK();
1095 	vfslocked = VFS_LOCK_GIANT((*vp)->v_mount);
1096 	vn_lock(*vp, LK_SHARED | LK_RETRY);
1097 	error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
1098 	VOP_UNLOCK(*vp, 0);
1099 	vdrop(*vp);
1100 	VFS_UNLOCK_GIANT(vfslocked);
1101 	if (error) {
1102 		numfullpathfail2++;
1103 		SDT_PROBE(vfs, namecache, fullpath, return,  error, vp,
1104 		    NULL, 0, 0);
1105 		return (error);
1106 	}
1107 
1108 	*vp = dvp;
1109 	CACHE_RLOCK();
1110 	if ((*vp)->v_iflag & VI_DOOMED) {
1111 		/* forced unmount */
1112 		CACHE_RUNLOCK();
1113 		vdrop(*vp);
1114 		error = ENOENT;
1115 		SDT_PROBE(vfs, namecache, fullpath, return, error, vp,
1116 		    NULL, 0, 0);
1117 		return (error);
1118 	}
1119 	vdrop(*vp);
1120 
1121 	return (0);
1122 }
1123 
1124 /*
1125  * The magic behind kern___getcwd() and vn_fullpath().
1126  */
1127 static int
1128 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
1129     char *buf, char **retbuf, u_int buflen)
1130 {
1131 	int error, slash_prefixed;
1132 #ifdef KDTRACE_HOOKS
1133 	struct vnode *startvp = vp;
1134 #endif
1135 
1136 	buflen--;
1137 	buf[buflen] = '\0';
1138 	error = 0;
1139 	slash_prefixed = 0;
1140 
1141 	SDT_PROBE(vfs, namecache, fullpath, entry, vp, 0, 0, 0, 0);
1142 	numfullpathcalls++;
1143 	CACHE_RLOCK();
1144 	if (vp->v_type != VDIR) {
1145 		error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
1146 		if (error)
1147 			return (error);
1148 		if (buflen == 0) {
1149 			CACHE_RUNLOCK();
1150 			return (ENOMEM);
1151 		}
1152 		buf[--buflen] = '/';
1153 		slash_prefixed = 1;
1154 	}
1155 	while (vp != rdir && vp != rootvnode) {
1156 		if (vp->v_vflag & VV_ROOT) {
1157 			if (vp->v_iflag & VI_DOOMED) {	/* forced unmount */
1158 				CACHE_RUNLOCK();
1159 				error = ENOENT;
1160 				SDT_PROBE(vfs, namecache, fullpath, return,
1161 				    error, vp, NULL, 0, 0);
1162 				break;
1163 			}
1164 			vp = vp->v_mount->mnt_vnodecovered;
1165 			continue;
1166 		}
1167 		if (vp->v_type != VDIR) {
1168 			CACHE_RUNLOCK();
1169 			numfullpathfail1++;
1170 			error = ENOTDIR;
1171 			SDT_PROBE(vfs, namecache, fullpath, return,
1172 			    error, vp, NULL, 0, 0);
1173 			break;
1174 		}
1175 		error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
1176 		if (error)
1177 			break;
1178 		if (buflen == 0) {
1179 			CACHE_RUNLOCK();
1180 			error = ENOMEM;
1181 			SDT_PROBE(vfs, namecache, fullpath, return, error,
1182 			    startvp, NULL, 0, 0);
1183 			break;
1184 		}
1185 		buf[--buflen] = '/';
1186 		slash_prefixed = 1;
1187 	}
1188 	if (error)
1189 		return (error);
1190 	if (!slash_prefixed) {
1191 		if (buflen == 0) {
1192 			CACHE_RUNLOCK();
1193 			numfullpathfail4++;
1194 			SDT_PROBE(vfs, namecache, fullpath, return, ENOMEM,
1195 			    startvp, NULL, 0, 0);
1196 			return (ENOMEM);
1197 		}
1198 		buf[--buflen] = '/';
1199 	}
1200 	numfullpathfound++;
1201 	CACHE_RUNLOCK();
1202 
1203 	SDT_PROBE(vfs, namecache, fullpath, return, 0, startvp, buf + buflen,
1204 	    0, 0);
1205 	*retbuf = buf + buflen;
1206 	return (0);
1207 }
1208 
1209 int
1210 vn_commname(struct vnode *vp, char *buf, u_int buflen)
1211 {
1212 	struct namecache *ncp;
1213 	int l;
1214 
1215 	CACHE_RLOCK();
1216 	TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
1217 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1218 			break;
1219 	if (ncp == NULL) {
1220 		CACHE_RUNLOCK();
1221 		return (ENOENT);
1222 	}
1223 	l = min(ncp->nc_nlen, buflen - 1);
1224 	memcpy(buf, ncp->nc_name, l);
1225 	CACHE_RUNLOCK();
1226 	buf[l] = '\0';
1227 	return (0);
1228 }
1229