xref: /freebsd/sys/kern/vfs_cache.c (revision f0574f5cf69e168cc4ea71ebbe5fdec9ec9a3dfe)
1 /*-
2  * Copyright (c) 1989, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Poul-Henning Kamp of the FreeBSD Project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_ktrace.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/counter.h>
43 #include <sys/filedesc.h>
44 #include <sys/fnv_hash.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/fcntl.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/rwlock.h>
53 #include <sys/sdt.h>
54 #include <sys/smp.h>
55 #include <sys/syscallsubr.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysproto.h>
58 #include <sys/vnode.h>
59 #ifdef KTRACE
60 #include <sys/ktrace.h>
61 #endif
62 
63 #include <vm/uma.h>
64 
65 SDT_PROVIDER_DECLARE(vfs);
66 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
67     "struct vnode *");
68 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
69     "char *");
70 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
71 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
72     "char *", "struct vnode *");
73 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
74 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int",
75     "struct vnode *", "char *");
76 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
77     "struct vnode *");
78 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative,
79     "struct vnode *", "char *");
80 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
81     "char *");
82 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
83 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
84 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
85 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
86     "struct vnode *");
87 SDT_PROBE_DEFINE3(vfs, namecache, zap_negative, done, "struct vnode *",
88     "char *", "int");
89 SDT_PROBE_DEFINE3(vfs, namecache, shrink_negative, done, "struct vnode *",
90     "char *", "int");
91 
92 /*
93  * This structure describes the elements in the cache of recent
94  * names looked up by namei.
95  */
96 
97 struct	namecache {
98 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
99 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
100 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
101 	struct	vnode *nc_dvp;		/* vnode of parent of name */
102 	union {
103 		struct	vnode *nu_vp;	/* vnode the name refers to */
104 		u_int	nu_neghits;	/* negative entry hits */
105 	} n_un;
106 	u_char	nc_flag;		/* flag bits */
107 	u_char	nc_nlen;		/* length of name */
108 	char	nc_name[0];		/* segment name + nul */
109 };
110 
111 /*
112  * struct namecache_ts repeats struct namecache layout up to the
113  * nc_nlen member.
114  * struct namecache_ts is used in place of struct namecache when time(s) need
115  * to be stored.  The nc_dotdottime field is used when a cache entry is mapping
116  * both a non-dotdot directory name plus dotdot for the directory's
117  * parent.
118  */
119 struct	namecache_ts {
120 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
121 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
122 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
123 	struct	vnode *nc_dvp;		/* vnode of parent of name */
124 	union {
125 		struct	vnode *nu_vp;	/* vnode the name refers to */
126 		u_int	nu_neghits;	/* negative entry hits */
127 	} n_un;
128 	u_char	nc_flag;		/* flag bits */
129 	u_char	nc_nlen;		/* length of name */
130 	struct	timespec nc_time;	/* timespec provided by fs */
131 	struct	timespec nc_dotdottime;	/* dotdot timespec provided by fs */
132 	int	nc_ticks;		/* ticks value when entry was added */
133 	char	nc_name[0];		/* segment name + nul */
134 };
135 
136 #define	nc_vp		n_un.nu_vp
137 #define	nc_neghits	n_un.nu_neghits
138 
139 /*
140  * Flags in namecache.nc_flag
141  */
142 #define NCF_WHITE	0x01
143 #define NCF_ISDOTDOT	0x02
144 #define	NCF_TS		0x04
145 #define	NCF_DTS		0x08
146 #define	NCF_DVDROP	0x10
147 #define	NCF_NEGATIVE	0x20
148 #define	NCF_HOTNEGATIVE	0x40
149 
150 /*
151  * Name caching works as follows:
152  *
153  * Names found by directory scans are retained in a cache
154  * for future reference.  It is managed LRU, so frequently
155  * used names will hang around.  Cache is indexed by hash value
156  * obtained from (vp, name) where vp refers to the directory
157  * containing name.
158  *
159  * If it is a "negative" entry, (i.e. for a name that is known NOT to
160  * exist) the vnode pointer will be NULL.
161  *
162  * Upon reaching the last segment of a path, if the reference
163  * is for DELETE, or NOCACHE is set (rewrite), and the
164  * name is located in the cache, it will be dropped.
165  *
166  * These locks are used (in the order in which they can be taken):
167  * NAME		TYPE	ROLE
168  * vnodelock	mtx	vnode lists and v_cache_dd field protection
169  * bucketlock	rwlock	for access to given set of hash buckets
170  * neglist	mtx	negative entry LRU management
171  *
172  * Additionally, ncneg_shrink_lock mtx is used to have at most one thread
173  * shrinking the LRU list.
174  *
175  * It is legal to take multiple vnodelock and bucketlock locks. The locking
176  * order is lower address first. Both are recursive.
177  *
178  * "." lookups are lockless.
179  *
180  * ".." and vnode -> name lookups require vnodelock.
181  *
182  * name -> vnode lookup requires the relevant bucketlock to be held for reading.
183  *
184  * Insertions and removals of entries require involved vnodes and bucketlocks
185  * to be write-locked to prevent other threads from seeing the entry.
186  *
187  * Some lookups result in removal of the found entry (e.g. getting rid of a
188  * negative entry with the intent to create a positive one), which poses a
189  * problem when multiple threads reach the state. Similarly, two different
190  * threads can purge two different vnodes and try to remove the same name.
191  *
192  * If the already held vnode lock is lower than the second required lock, we
193  * can just take the other lock. However, in the opposite case, this could
194  * deadlock. As such, this is resolved by trylocking and if that fails unlocking
195  * the first node, locking everything in order and revalidating the state.
196  */
197 
198 /*
199  * Structures associated with name caching.
200  */
201 #define NCHHASH(hash) \
202 	(&nchashtbl[(hash) & nchash])
203 static __read_mostly LIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */
204 static u_long __read_mostly	nchash;			/* size of hash table */
205 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
206     "Size of namecache hash table");
207 static u_long __read_mostly	ncnegfactor = 16; /* ratio of negative entries */
208 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
209     "Ratio of negative namecache entries");
210 static u_long __exclusive_cache_line	numneg;	/* number of negative entries allocated */
211 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
212     "Number of negative entries in namecache");
213 static u_long __exclusive_cache_line	numcache;/* number of cache entries allocated */
214 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
215     "Number of namecache entries");
216 static u_long __exclusive_cache_line	numcachehv;/* number of cache entries with vnodes held */
217 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0,
218     "Number of namecache entries with vnodes held");
219 u_int __read_mostly	ncsizefactor = 2;
220 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
221     "Size factor for namecache");
222 static u_int __read_mostly	ncpurgeminvnodes;
223 SYSCTL_UINT(_vfs, OID_AUTO, ncpurgeminvnodes, CTLFLAG_RW, &ncpurgeminvnodes, 0,
224     "Number of vnodes below which purgevfs ignores the request");
225 static u_int __read_mostly	ncneghitsrequeue = 8;
226 SYSCTL_UINT(_vfs, OID_AUTO, ncneghitsrequeue, CTLFLAG_RW, &ncneghitsrequeue, 0,
227     "Number of hits to requeue a negative entry in the LRU list");
228 
229 struct nchstats	nchstats;		/* cache effectiveness statistics */
230 
231 static struct mtx       ncneg_shrink_lock;
232 static int	shrink_list_turn;
233 
234 struct neglist {
235 	struct mtx		nl_lock;
236 	TAILQ_HEAD(, namecache) nl_list;
237 } __aligned(CACHE_LINE_SIZE);
238 
239 static struct neglist __read_mostly	*neglists;
240 static struct neglist ncneg_hot;
241 
242 #define	numneglists (ncneghash + 1)
243 static u_int __read_mostly	ncneghash;
244 static inline struct neglist *
245 NCP2NEGLIST(struct namecache *ncp)
246 {
247 
248 	return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]);
249 }
250 
251 #define	numbucketlocks (ncbuckethash + 1)
252 static u_int __read_mostly  ncbuckethash;
253 static struct rwlock_padalign __read_mostly  *bucketlocks;
254 #define	HASH2BUCKETLOCK(hash) \
255 	((struct rwlock *)(&bucketlocks[((hash) & ncbuckethash)]))
256 
257 #define	numvnodelocks (ncvnodehash + 1)
258 static u_int __read_mostly  ncvnodehash;
259 static struct mtx __read_mostly *vnodelocks;
260 static inline struct mtx *
261 VP2VNODELOCK(struct vnode *vp)
262 {
263 
264 	return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]);
265 }
266 
267 /*
268  * UMA zones for the VFS cache.
269  *
270  * The small cache is used for entries with short names, which are the
271  * most common.  The large cache is used for entries which are too big to
272  * fit in the small cache.
273  */
274 static uma_zone_t __read_mostly cache_zone_small;
275 static uma_zone_t __read_mostly cache_zone_small_ts;
276 static uma_zone_t __read_mostly cache_zone_large;
277 static uma_zone_t __read_mostly cache_zone_large_ts;
278 
279 #define	CACHE_PATH_CUTOFF	35
280 
281 static struct namecache *
282 cache_alloc(int len, int ts)
283 {
284 
285 	if (len > CACHE_PATH_CUTOFF) {
286 		if (ts)
287 			return (uma_zalloc(cache_zone_large_ts, M_WAITOK));
288 		else
289 			return (uma_zalloc(cache_zone_large, M_WAITOK));
290 	}
291 	if (ts)
292 		return (uma_zalloc(cache_zone_small_ts, M_WAITOK));
293 	else
294 		return (uma_zalloc(cache_zone_small, M_WAITOK));
295 }
296 
297 static void
298 cache_free(struct namecache *ncp)
299 {
300 	int ts;
301 
302 	if (ncp == NULL)
303 		return;
304 	ts = ncp->nc_flag & NCF_TS;
305 	if ((ncp->nc_flag & NCF_DVDROP) != 0)
306 		vdrop(ncp->nc_dvp);
307 	if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) {
308 		if (ts)
309 			uma_zfree(cache_zone_small_ts, ncp);
310 		else
311 			uma_zfree(cache_zone_small, ncp);
312 	} else if (ts)
313 		uma_zfree(cache_zone_large_ts, ncp);
314 	else
315 		uma_zfree(cache_zone_large, ncp);
316 }
317 
318 static char *
319 nc_get_name(struct namecache *ncp)
320 {
321 	struct namecache_ts *ncp_ts;
322 
323 	if ((ncp->nc_flag & NCF_TS) == 0)
324 		return (ncp->nc_name);
325 	ncp_ts = (struct namecache_ts *)ncp;
326 	return (ncp_ts->nc_name);
327 }
328 
329 static void
330 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp)
331 {
332 
333 	KASSERT((ncp->nc_flag & NCF_TS) != 0 ||
334 	    (tsp == NULL && ticksp == NULL),
335 	    ("No NCF_TS"));
336 
337 	if (tsp != NULL)
338 		*tsp = ((struct namecache_ts *)ncp)->nc_time;
339 	if (ticksp != NULL)
340 		*ticksp = ((struct namecache_ts *)ncp)->nc_ticks;
341 }
342 
343 static int __read_mostly	doingcache = 1;	/* 1 => enable the cache */
344 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
345     "VFS namecache enabled");
346 
347 /* Export size information to userland */
348 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR,
349     sizeof(struct namecache), "sizeof(struct namecache)");
350 
351 /*
352  * The new name cache statistics
353  */
354 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0,
355     "Name cache statistics");
356 #define STATNODE_ULONG(name, descr)	\
357 	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr);
358 #define STATNODE_COUNTER(name, descr)	\
359 	static counter_u64_t __read_mostly name; \
360 	SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, descr);
361 STATNODE_ULONG(numneg, "Number of negative cache entries");
362 STATNODE_ULONG(numcache, "Number of cache entries");
363 STATNODE_COUNTER(numcalls, "Number of cache lookups");
364 STATNODE_COUNTER(dothits, "Number of '.' hits");
365 STATNODE_COUNTER(dotdothits, "Number of '..' hits");
366 STATNODE_COUNTER(numchecks, "Number of checks in lookup");
367 STATNODE_COUNTER(nummiss, "Number of cache misses");
368 STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache");
369 STATNODE_COUNTER(numposzaps,
370     "Number of cache hits (positive) we do not want to cache");
371 STATNODE_COUNTER(numposhits, "Number of cache hits (positive)");
372 STATNODE_COUNTER(numnegzaps,
373     "Number of cache hits (negative) we do not want to cache");
374 STATNODE_COUNTER(numneghits, "Number of cache hits (negative)");
375 /* These count for kern___getcwd(), too. */
376 STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls");
377 STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
378 STATNODE_COUNTER(numfullpathfail2,
379     "Number of fullpath search errors (VOP_VPTOCNP failures)");
380 STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
381 STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls");
382 static long zap_and_exit_bucket_fail; STATNODE_ULONG(zap_and_exit_bucket_fail,
383     "Number of times zap_and_exit failed to lock");
384 static long cache_lock_vnodes_cel_3_failures;
385 STATNODE_ULONG(cache_lock_vnodes_cel_3_failures,
386     "Number of times 3-way vnode locking failed");
387 
388 static void cache_zap_locked(struct namecache *ncp, bool neg_locked);
389 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
390     char *buf, char **retbuf, u_int buflen);
391 
392 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
393 
394 static int cache_yield;
395 SYSCTL_INT(_vfs_cache, OID_AUTO, yield, CTLFLAG_RD, &cache_yield, 0,
396     "Number of times cache called yield");
397 
398 static void
399 cache_maybe_yield(void)
400 {
401 
402 	if (should_yield()) {
403 		cache_yield++;
404 		kern_yield(PRI_USER);
405 	}
406 }
407 
408 static inline void
409 cache_assert_vlp_locked(struct mtx *vlp)
410 {
411 
412 	if (vlp != NULL)
413 		mtx_assert(vlp, MA_OWNED);
414 }
415 
416 static inline void
417 cache_assert_vnode_locked(struct vnode *vp)
418 {
419 	struct mtx *vlp;
420 
421 	vlp = VP2VNODELOCK(vp);
422 	cache_assert_vlp_locked(vlp);
423 }
424 
425 static uint32_t
426 cache_get_hash(char *name, u_char len, struct vnode *dvp)
427 {
428 	uint32_t hash;
429 
430 	hash = fnv_32_buf(name, len, FNV1_32_INIT);
431 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
432 	return (hash);
433 }
434 
435 static inline struct rwlock *
436 NCP2BUCKETLOCK(struct namecache *ncp)
437 {
438 	uint32_t hash;
439 
440 	hash = cache_get_hash(nc_get_name(ncp), ncp->nc_nlen, ncp->nc_dvp);
441 	return (HASH2BUCKETLOCK(hash));
442 }
443 
444 #ifdef INVARIANTS
445 static void
446 cache_assert_bucket_locked(struct namecache *ncp, int mode)
447 {
448 	struct rwlock *blp;
449 
450 	blp = NCP2BUCKETLOCK(ncp);
451 	rw_assert(blp, mode);
452 }
453 #else
454 #define cache_assert_bucket_locked(x, y) do { } while (0)
455 #endif
456 
457 #define cache_sort(x, y)	_cache_sort((void **)(x), (void **)(y))
458 static void
459 _cache_sort(void **p1, void **p2)
460 {
461 	void *tmp;
462 
463 	if (*p1 > *p2) {
464 		tmp = *p2;
465 		*p2 = *p1;
466 		*p1 = tmp;
467 	}
468 }
469 
470 static void
471 cache_lock_all_buckets(void)
472 {
473 	u_int i;
474 
475 	for (i = 0; i < numbucketlocks; i++)
476 		rw_wlock(&bucketlocks[i]);
477 }
478 
479 static void
480 cache_unlock_all_buckets(void)
481 {
482 	u_int i;
483 
484 	for (i = 0; i < numbucketlocks; i++)
485 		rw_wunlock(&bucketlocks[i]);
486 }
487 
488 static void
489 cache_lock_all_vnodes(void)
490 {
491 	u_int i;
492 
493 	for (i = 0; i < numvnodelocks; i++)
494 		mtx_lock(&vnodelocks[i]);
495 }
496 
497 static void
498 cache_unlock_all_vnodes(void)
499 {
500 	u_int i;
501 
502 	for (i = 0; i < numvnodelocks; i++)
503 		mtx_unlock(&vnodelocks[i]);
504 }
505 
506 static int
507 cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
508 {
509 
510 	cache_sort(&vlp1, &vlp2);
511 	MPASS(vlp2 != NULL);
512 
513 	if (vlp1 != NULL) {
514 		if (!mtx_trylock(vlp1))
515 			return (EAGAIN);
516 	}
517 	if (!mtx_trylock(vlp2)) {
518 		if (vlp1 != NULL)
519 			mtx_unlock(vlp1);
520 		return (EAGAIN);
521 	}
522 
523 	return (0);
524 }
525 
526 static void
527 cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
528 {
529 
530 	MPASS(vlp1 != NULL || vlp2 != NULL);
531 
532 	if (vlp1 != NULL)
533 		mtx_unlock(vlp1);
534 	if (vlp2 != NULL)
535 		mtx_unlock(vlp2);
536 }
537 
538 static int
539 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
540 {
541 	struct nchstats snap;
542 
543 	if (req->oldptr == NULL)
544 		return (SYSCTL_OUT(req, 0, sizeof(snap)));
545 
546 	snap = nchstats;
547 	snap.ncs_goodhits = counter_u64_fetch(numposhits);
548 	snap.ncs_neghits = counter_u64_fetch(numneghits);
549 	snap.ncs_badhits = counter_u64_fetch(numposzaps) +
550 	    counter_u64_fetch(numnegzaps);
551 	snap.ncs_miss = counter_u64_fetch(nummisszap) +
552 	    counter_u64_fetch(nummiss);
553 
554 	return (SYSCTL_OUT(req, &snap, sizeof(snap)));
555 }
556 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD |
557     CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU",
558     "VFS cache effectiveness statistics");
559 
560 #ifdef DIAGNOSTIC
561 /*
562  * Grab an atomic snapshot of the name cache hash chain lengths
563  */
564 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL,
565     "hash table stats");
566 
567 static int
568 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
569 {
570 	struct nchashhead *ncpp;
571 	struct namecache *ncp;
572 	int i, error, n_nchash, *cntbuf;
573 
574 retry:
575 	n_nchash = nchash + 1;	/* nchash is max index, not count */
576 	if (req->oldptr == NULL)
577 		return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
578 	cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK);
579 	cache_lock_all_buckets();
580 	if (n_nchash != nchash + 1) {
581 		cache_unlock_all_buckets();
582 		free(cntbuf, M_TEMP);
583 		goto retry;
584 	}
585 	/* Scan hash tables counting entries */
586 	for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++)
587 		LIST_FOREACH(ncp, ncpp, nc_hash)
588 			cntbuf[i]++;
589 	cache_unlock_all_buckets();
590 	for (error = 0, i = 0; i < n_nchash; i++)
591 		if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0)
592 			break;
593 	free(cntbuf, M_TEMP);
594 	return (error);
595 }
596 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
597     CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
598     "nchash chain lengths");
599 
600 static int
601 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
602 {
603 	int error;
604 	struct nchashhead *ncpp;
605 	struct namecache *ncp;
606 	int n_nchash;
607 	int count, maxlength, used, pct;
608 
609 	if (!req->oldptr)
610 		return SYSCTL_OUT(req, 0, 4 * sizeof(int));
611 
612 	cache_lock_all_buckets();
613 	n_nchash = nchash + 1;	/* nchash is max index, not count */
614 	used = 0;
615 	maxlength = 0;
616 
617 	/* Scan hash tables for applicable entries */
618 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
619 		count = 0;
620 		LIST_FOREACH(ncp, ncpp, nc_hash) {
621 			count++;
622 		}
623 		if (count)
624 			used++;
625 		if (maxlength < count)
626 			maxlength = count;
627 	}
628 	n_nchash = nchash + 1;
629 	cache_unlock_all_buckets();
630 	pct = (used * 100) / (n_nchash / 100);
631 	error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
632 	if (error)
633 		return (error);
634 	error = SYSCTL_OUT(req, &used, sizeof(used));
635 	if (error)
636 		return (error);
637 	error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
638 	if (error)
639 		return (error);
640 	error = SYSCTL_OUT(req, &pct, sizeof(pct));
641 	if (error)
642 		return (error);
643 	return (0);
644 }
645 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
646     CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
647     "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)");
648 #endif
649 
650 /*
651  * Negative entries management
652  *
653  * A variation of LRU scheme is used. New entries are hashed into one of
654  * numneglists cold lists. Entries get promoted to the hot list on first hit.
655  * Partial LRU for the hot list is maintained by requeueing them every
656  * ncneghitsrequeue hits.
657  *
658  * The shrinker will demote hot list head and evict from the cold list in a
659  * round-robin manner.
660  */
661 static void
662 cache_negative_hit(struct namecache *ncp)
663 {
664 	struct neglist *neglist;
665 	u_int hits;
666 
667 	MPASS(ncp->nc_flag & NCF_NEGATIVE);
668 	hits = atomic_fetchadd_int(&ncp->nc_neghits, 1);
669 	if (ncp->nc_flag & NCF_HOTNEGATIVE) {
670 		if ((hits % ncneghitsrequeue) != 0)
671 			return;
672 		mtx_lock(&ncneg_hot.nl_lock);
673 		if (ncp->nc_flag & NCF_HOTNEGATIVE) {
674 			TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
675 			TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
676 			mtx_unlock(&ncneg_hot.nl_lock);
677 			return;
678 		}
679 		/*
680 		 * The shrinker cleared the flag and removed the entry from
681 		 * the hot list. Put it back.
682 		 */
683 	} else {
684 		mtx_lock(&ncneg_hot.nl_lock);
685 	}
686 	neglist = NCP2NEGLIST(ncp);
687 	mtx_lock(&neglist->nl_lock);
688 	if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) {
689 		TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
690 		TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
691 		ncp->nc_flag |= NCF_HOTNEGATIVE;
692 	}
693 	mtx_unlock(&neglist->nl_lock);
694 	mtx_unlock(&ncneg_hot.nl_lock);
695 }
696 
697 static void
698 cache_negative_insert(struct namecache *ncp, bool neg_locked)
699 {
700 	struct neglist *neglist;
701 
702 	MPASS(ncp->nc_flag & NCF_NEGATIVE);
703 	cache_assert_bucket_locked(ncp, RA_WLOCKED);
704 	neglist = NCP2NEGLIST(ncp);
705 	if (!neg_locked) {
706 		mtx_lock(&neglist->nl_lock);
707 	} else {
708 		mtx_assert(&neglist->nl_lock, MA_OWNED);
709 	}
710 	TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
711 	if (!neg_locked)
712 		mtx_unlock(&neglist->nl_lock);
713 	atomic_add_rel_long(&numneg, 1);
714 }
715 
716 static void
717 cache_negative_remove(struct namecache *ncp, bool neg_locked)
718 {
719 	struct neglist *neglist;
720 	bool hot_locked = false;
721 	bool list_locked = false;
722 
723 	MPASS(ncp->nc_flag & NCF_NEGATIVE);
724 	cache_assert_bucket_locked(ncp, RA_WLOCKED);
725 	neglist = NCP2NEGLIST(ncp);
726 	if (!neg_locked) {
727 		if (ncp->nc_flag & NCF_HOTNEGATIVE) {
728 			hot_locked = true;
729 			mtx_lock(&ncneg_hot.nl_lock);
730 			if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) {
731 				list_locked = true;
732 				mtx_lock(&neglist->nl_lock);
733 			}
734 		} else {
735 			list_locked = true;
736 			mtx_lock(&neglist->nl_lock);
737 		}
738 	} else {
739 		mtx_assert(&neglist->nl_lock, MA_OWNED);
740 		mtx_assert(&ncneg_hot.nl_lock, MA_OWNED);
741 	}
742 	if (ncp->nc_flag & NCF_HOTNEGATIVE) {
743 		TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
744 	} else {
745 		TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
746 	}
747 	if (list_locked)
748 		mtx_unlock(&neglist->nl_lock);
749 	if (hot_locked)
750 		mtx_unlock(&ncneg_hot.nl_lock);
751 	atomic_subtract_rel_long(&numneg, 1);
752 }
753 
754 static void
755 cache_negative_shrink_select(int start, struct namecache **ncpp,
756     struct neglist **neglistpp)
757 {
758 	struct neglist *neglist;
759 	struct namecache *ncp;
760 	int i;
761 
762 	*ncpp = ncp = NULL;
763 
764 	for (i = start; i < numneglists; i++) {
765 		neglist = &neglists[i];
766 		if (TAILQ_FIRST(&neglist->nl_list) == NULL)
767 			continue;
768 		mtx_lock(&neglist->nl_lock);
769 		ncp = TAILQ_FIRST(&neglist->nl_list);
770 		if (ncp != NULL)
771 			break;
772 		mtx_unlock(&neglist->nl_lock);
773 	}
774 
775 	*neglistpp = neglist;
776 	*ncpp = ncp;
777 }
778 
779 static void
780 cache_negative_zap_one(void)
781 {
782 	struct namecache *ncp, *ncp2;
783 	struct neglist *neglist;
784 	struct mtx *dvlp;
785 	struct rwlock *blp;
786 
787 	if (!mtx_trylock(&ncneg_shrink_lock))
788 		return;
789 
790 	mtx_lock(&ncneg_hot.nl_lock);
791 	ncp = TAILQ_FIRST(&ncneg_hot.nl_list);
792 	if (ncp != NULL) {
793 		neglist = NCP2NEGLIST(ncp);
794 		mtx_lock(&neglist->nl_lock);
795 		TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
796 		TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
797 		ncp->nc_flag &= ~NCF_HOTNEGATIVE;
798 		mtx_unlock(&neglist->nl_lock);
799 	}
800 
801 	cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist);
802 	shrink_list_turn++;
803 	if (shrink_list_turn == numneglists)
804 		shrink_list_turn = 0;
805 	if (ncp == NULL && shrink_list_turn == 0)
806 		cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist);
807 	if (ncp == NULL) {
808 		mtx_unlock(&ncneg_hot.nl_lock);
809 		goto out;
810 	}
811 
812 	MPASS(ncp->nc_flag & NCF_NEGATIVE);
813 	dvlp = VP2VNODELOCK(ncp->nc_dvp);
814 	blp = NCP2BUCKETLOCK(ncp);
815 	mtx_unlock(&neglist->nl_lock);
816 	mtx_unlock(&ncneg_hot.nl_lock);
817 	mtx_lock(dvlp);
818 	rw_wlock(blp);
819 	mtx_lock(&ncneg_hot.nl_lock);
820 	mtx_lock(&neglist->nl_lock);
821 	ncp2 = TAILQ_FIRST(&neglist->nl_list);
822 	if (ncp != ncp2 || dvlp != VP2VNODELOCK(ncp2->nc_dvp) ||
823 	    blp != NCP2BUCKETLOCK(ncp2) || !(ncp2->nc_flag & NCF_NEGATIVE)) {
824 		ncp = NULL;
825 		goto out_unlock_all;
826 	}
827 	SDT_PROBE3(vfs, namecache, shrink_negative, done, ncp->nc_dvp,
828 	    nc_get_name(ncp), ncp->nc_neghits);
829 
830 	cache_zap_locked(ncp, true);
831 out_unlock_all:
832 	mtx_unlock(&neglist->nl_lock);
833 	mtx_unlock(&ncneg_hot.nl_lock);
834 	rw_wunlock(blp);
835 	mtx_unlock(dvlp);
836 out:
837 	mtx_unlock(&ncneg_shrink_lock);
838 	cache_free(ncp);
839 }
840 
841 /*
842  * cache_zap_locked():
843  *
844  *   Removes a namecache entry from cache, whether it contains an actual
845  *   pointer to a vnode or if it is just a negative cache entry.
846  */
847 static void
848 cache_zap_locked(struct namecache *ncp, bool neg_locked)
849 {
850 
851 	if (!(ncp->nc_flag & NCF_NEGATIVE))
852 		cache_assert_vnode_locked(ncp->nc_vp);
853 	cache_assert_vnode_locked(ncp->nc_dvp);
854 	cache_assert_bucket_locked(ncp, RA_WLOCKED);
855 
856 	CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp,
857 	    (ncp->nc_flag & NCF_NEGATIVE) ? NULL : ncp->nc_vp);
858 	if (!(ncp->nc_flag & NCF_NEGATIVE)) {
859 		SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp,
860 		    nc_get_name(ncp), ncp->nc_vp);
861 	} else {
862 		SDT_PROBE3(vfs, namecache, zap_negative, done, ncp->nc_dvp,
863 		    nc_get_name(ncp), ncp->nc_neghits);
864 	}
865 	LIST_REMOVE(ncp, nc_hash);
866 	if (!(ncp->nc_flag & NCF_NEGATIVE)) {
867 		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
868 		if (ncp == ncp->nc_vp->v_cache_dd)
869 			ncp->nc_vp->v_cache_dd = NULL;
870 	} else {
871 		cache_negative_remove(ncp, neg_locked);
872 	}
873 	if (ncp->nc_flag & NCF_ISDOTDOT) {
874 		if (ncp == ncp->nc_dvp->v_cache_dd)
875 			ncp->nc_dvp->v_cache_dd = NULL;
876 	} else {
877 		LIST_REMOVE(ncp, nc_src);
878 		if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
879 			ncp->nc_flag |= NCF_DVDROP;
880 			atomic_subtract_rel_long(&numcachehv, 1);
881 		}
882 	}
883 	atomic_subtract_rel_long(&numcache, 1);
884 }
885 
886 static void
887 cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp)
888 {
889 	struct rwlock *blp;
890 
891 	MPASS(ncp->nc_dvp == vp);
892 	MPASS(ncp->nc_flag & NCF_NEGATIVE);
893 	cache_assert_vnode_locked(vp);
894 
895 	blp = NCP2BUCKETLOCK(ncp);
896 	rw_wlock(blp);
897 	cache_zap_locked(ncp, false);
898 	rw_wunlock(blp);
899 }
900 
901 static bool
902 cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp,
903     struct mtx **vlpp)
904 {
905 	struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
906 	struct rwlock *blp;
907 
908 	MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
909 	cache_assert_vnode_locked(vp);
910 
911 	if (ncp->nc_flag & NCF_NEGATIVE) {
912 		if (*vlpp != NULL) {
913 			mtx_unlock(*vlpp);
914 			*vlpp = NULL;
915 		}
916 		cache_zap_negative_locked_vnode_kl(ncp, vp);
917 		return (true);
918 	}
919 
920 	pvlp = VP2VNODELOCK(vp);
921 	blp = NCP2BUCKETLOCK(ncp);
922 	vlp1 = VP2VNODELOCK(ncp->nc_dvp);
923 	vlp2 = VP2VNODELOCK(ncp->nc_vp);
924 
925 	if (*vlpp == vlp1 || *vlpp == vlp2) {
926 		to_unlock = *vlpp;
927 		*vlpp = NULL;
928 	} else {
929 		if (*vlpp != NULL) {
930 			mtx_unlock(*vlpp);
931 			*vlpp = NULL;
932 		}
933 		cache_sort(&vlp1, &vlp2);
934 		if (vlp1 == pvlp) {
935 			mtx_lock(vlp2);
936 			to_unlock = vlp2;
937 		} else {
938 			if (!mtx_trylock(vlp1))
939 				goto out_relock;
940 			to_unlock = vlp1;
941 		}
942 	}
943 	rw_wlock(blp);
944 	cache_zap_locked(ncp, false);
945 	rw_wunlock(blp);
946 	if (to_unlock != NULL)
947 		mtx_unlock(to_unlock);
948 	return (true);
949 
950 out_relock:
951 	mtx_unlock(vlp2);
952 	mtx_lock(vlp1);
953 	mtx_lock(vlp2);
954 	MPASS(*vlpp == NULL);
955 	*vlpp = vlp1;
956 	return (false);
957 }
958 
959 static int
960 cache_zap_locked_vnode(struct namecache *ncp, struct vnode *vp)
961 {
962 	struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
963 	struct rwlock *blp;
964 	int error = 0;
965 
966 	MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
967 	cache_assert_vnode_locked(vp);
968 
969 	pvlp = VP2VNODELOCK(vp);
970 	if (ncp->nc_flag & NCF_NEGATIVE) {
971 		cache_zap_negative_locked_vnode_kl(ncp, vp);
972 		goto out;
973 	}
974 
975 	blp = NCP2BUCKETLOCK(ncp);
976 	vlp1 = VP2VNODELOCK(ncp->nc_dvp);
977 	vlp2 = VP2VNODELOCK(ncp->nc_vp);
978 	cache_sort(&vlp1, &vlp2);
979 	if (vlp1 == pvlp) {
980 		mtx_lock(vlp2);
981 		to_unlock = vlp2;
982 	} else {
983 		if (!mtx_trylock(vlp1)) {
984 			error = EAGAIN;
985 			goto out;
986 		}
987 		to_unlock = vlp1;
988 	}
989 	rw_wlock(blp);
990 	cache_zap_locked(ncp, false);
991 	rw_wunlock(blp);
992 	mtx_unlock(to_unlock);
993 out:
994 	mtx_unlock(pvlp);
995 	return (error);
996 }
997 
998 static int
999 cache_zap_rlocked_bucket(struct namecache *ncp, struct rwlock *blp)
1000 {
1001 	struct mtx *dvlp, *vlp;
1002 
1003 	cache_assert_bucket_locked(ncp, RA_RLOCKED);
1004 
1005 	dvlp = VP2VNODELOCK(ncp->nc_dvp);
1006 	vlp = NULL;
1007 	if (!(ncp->nc_flag & NCF_NEGATIVE))
1008 		vlp = VP2VNODELOCK(ncp->nc_vp);
1009 	if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1010 		rw_runlock(blp);
1011 		rw_wlock(blp);
1012 		cache_zap_locked(ncp, false);
1013 		rw_wunlock(blp);
1014 		cache_unlock_vnodes(dvlp, vlp);
1015 		return (0);
1016 	}
1017 
1018 	rw_runlock(blp);
1019 	return (EAGAIN);
1020 }
1021 
1022 static int
1023 cache_zap_wlocked_bucket_kl(struct namecache *ncp, struct rwlock *blp,
1024     struct mtx **vlpp1, struct mtx **vlpp2)
1025 {
1026 	struct mtx *dvlp, *vlp;
1027 
1028 	cache_assert_bucket_locked(ncp, RA_WLOCKED);
1029 
1030 	dvlp = VP2VNODELOCK(ncp->nc_dvp);
1031 	vlp = NULL;
1032 	if (!(ncp->nc_flag & NCF_NEGATIVE))
1033 		vlp = VP2VNODELOCK(ncp->nc_vp);
1034 	cache_sort(&dvlp, &vlp);
1035 
1036 	if (*vlpp1 == dvlp && *vlpp2 == vlp) {
1037 		cache_zap_locked(ncp, false);
1038 		cache_unlock_vnodes(dvlp, vlp);
1039 		*vlpp1 = NULL;
1040 		*vlpp2 = NULL;
1041 		return (0);
1042 	}
1043 
1044 	if (*vlpp1 != NULL)
1045 		mtx_unlock(*vlpp1);
1046 	if (*vlpp2 != NULL)
1047 		mtx_unlock(*vlpp2);
1048 	*vlpp1 = NULL;
1049 	*vlpp2 = NULL;
1050 
1051 	if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1052 		cache_zap_locked(ncp, false);
1053 		cache_unlock_vnodes(dvlp, vlp);
1054 		return (0);
1055 	}
1056 
1057 	rw_wunlock(blp);
1058 	*vlpp1 = dvlp;
1059 	*vlpp2 = vlp;
1060 	if (*vlpp1 != NULL)
1061 		mtx_lock(*vlpp1);
1062 	mtx_lock(*vlpp2);
1063 	rw_wlock(blp);
1064 	return (EAGAIN);
1065 }
1066 
1067 static void
1068 cache_lookup_unlock(struct rwlock *blp, struct mtx *vlp)
1069 {
1070 
1071 	if (blp != NULL) {
1072 		rw_runlock(blp);
1073 		mtx_assert(vlp, MA_NOTOWNED);
1074 	} else {
1075 		mtx_unlock(vlp);
1076 	}
1077 }
1078 
1079 /*
1080  * Lookup an entry in the cache
1081  *
1082  * Lookup is called with dvp pointing to the directory to search,
1083  * cnp pointing to the name of the entry being sought. If the lookup
1084  * succeeds, the vnode is returned in *vpp, and a status of -1 is
1085  * returned. If the lookup determines that the name does not exist
1086  * (negative caching), a status of ENOENT is returned. If the lookup
1087  * fails, a status of zero is returned.  If the directory vnode is
1088  * recycled out from under us due to a forced unmount, a status of
1089  * ENOENT is returned.
1090  *
1091  * vpp is locked and ref'd on return.  If we're looking up DOTDOT, dvp is
1092  * unlocked.  If we're looking up . an extra ref is taken, but the lock is
1093  * not recursively acquired.
1094  */
1095 
1096 int
1097 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1098     struct timespec *tsp, int *ticksp)
1099 {
1100 	struct namecache *ncp;
1101 	struct rwlock *blp;
1102 	struct mtx *dvlp, *dvlp2;
1103 	uint32_t hash;
1104 	int error, ltype;
1105 
1106 	if (__predict_false(!doingcache)) {
1107 		cnp->cn_flags &= ~MAKEENTRY;
1108 		return (0);
1109 	}
1110 retry:
1111 	blp = NULL;
1112 	dvlp = VP2VNODELOCK(dvp);
1113 	error = 0;
1114 	counter_u64_add(numcalls, 1);
1115 
1116 	if (cnp->cn_nameptr[0] == '.') {
1117 		if (cnp->cn_namelen == 1) {
1118 			*vpp = dvp;
1119 			CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
1120 			    dvp, cnp->cn_nameptr);
1121 			counter_u64_add(dothits, 1);
1122 			SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
1123 			if (tsp != NULL)
1124 				timespecclear(tsp);
1125 			if (ticksp != NULL)
1126 				*ticksp = ticks;
1127 			vrefact(*vpp);
1128 			/*
1129 			 * When we lookup "." we still can be asked to lock it
1130 			 * differently...
1131 			 */
1132 			ltype = cnp->cn_lkflags & LK_TYPE_MASK;
1133 			if (ltype != VOP_ISLOCKED(*vpp)) {
1134 				if (ltype == LK_EXCLUSIVE) {
1135 					vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
1136 					if ((*vpp)->v_iflag & VI_DOOMED) {
1137 						/* forced unmount */
1138 						vrele(*vpp);
1139 						*vpp = NULL;
1140 						return (ENOENT);
1141 					}
1142 				} else
1143 					vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
1144 			}
1145 			return (-1);
1146 		}
1147 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
1148 			counter_u64_add(dotdothits, 1);
1149 			dvlp2 = NULL;
1150 			mtx_lock(dvlp);
1151 retry_dotdot:
1152 			ncp = dvp->v_cache_dd;
1153 			if (ncp == NULL) {
1154 				SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
1155 				    "..", NULL);
1156 				mtx_unlock(dvlp);
1157 				return (0);
1158 			}
1159 			if ((cnp->cn_flags & MAKEENTRY) == 0) {
1160 				if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1161 					if (ncp->nc_dvp != dvp)
1162 						panic("dvp %p v_cache_dd %p\n", dvp, ncp);
1163 					if (!cache_zap_locked_vnode_kl2(ncp,
1164 					    dvp, &dvlp2))
1165 						goto retry_dotdot;
1166 					MPASS(dvp->v_cache_dd == NULL);
1167 					mtx_unlock(dvlp);
1168 					if (dvlp2 != NULL)
1169 						mtx_unlock(dvlp2);
1170 					cache_free(ncp);
1171 				} else {
1172 					dvp->v_cache_dd = NULL;
1173 					mtx_unlock(dvlp);
1174 					if (dvlp2 != NULL)
1175 						mtx_unlock(dvlp2);
1176 				}
1177 				return (0);
1178 			}
1179 			if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1180 				if (ncp->nc_flag & NCF_NEGATIVE)
1181 					*vpp = NULL;
1182 				else
1183 					*vpp = ncp->nc_vp;
1184 			} else
1185 				*vpp = ncp->nc_dvp;
1186 			/* Return failure if negative entry was found. */
1187 			if (*vpp == NULL)
1188 				goto negative_success;
1189 			CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
1190 			    dvp, cnp->cn_nameptr, *vpp);
1191 			SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..",
1192 			    *vpp);
1193 			cache_out_ts(ncp, tsp, ticksp);
1194 			if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) ==
1195 			    NCF_DTS && tsp != NULL)
1196 				*tsp = ((struct namecache_ts *)ncp)->
1197 				    nc_dotdottime;
1198 			goto success;
1199 		}
1200 	}
1201 
1202 	hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
1203 	blp = HASH2BUCKETLOCK(hash);
1204 	rw_rlock(blp);
1205 
1206 	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1207 		counter_u64_add(numchecks, 1);
1208 		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
1209 		    !bcmp(nc_get_name(ncp), cnp->cn_nameptr, ncp->nc_nlen))
1210 			break;
1211 	}
1212 
1213 	/* We failed to find an entry */
1214 	if (ncp == NULL) {
1215 		SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
1216 		    NULL);
1217 		if ((cnp->cn_flags & MAKEENTRY) == 0) {
1218 			counter_u64_add(nummisszap, 1);
1219 		} else {
1220 			counter_u64_add(nummiss, 1);
1221 		}
1222 		goto unlock;
1223 	}
1224 
1225 	/* We don't want to have an entry, so dump it */
1226 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
1227 		counter_u64_add(numposzaps, 1);
1228 		goto zap_and_exit;
1229 	}
1230 
1231 	/* We found a "positive" match, return the vnode */
1232 	if (!(ncp->nc_flag & NCF_NEGATIVE)) {
1233 		counter_u64_add(numposhits, 1);
1234 		*vpp = ncp->nc_vp;
1235 		CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
1236 		    dvp, cnp->cn_nameptr, *vpp, ncp);
1237 		SDT_PROBE3(vfs, namecache, lookup, hit, dvp, nc_get_name(ncp),
1238 		    *vpp);
1239 		cache_out_ts(ncp, tsp, ticksp);
1240 		goto success;
1241 	}
1242 
1243 negative_success:
1244 	/* We found a negative match, and want to create it, so purge */
1245 	if (cnp->cn_nameiop == CREATE) {
1246 		counter_u64_add(numnegzaps, 1);
1247 		goto zap_and_exit;
1248 	}
1249 
1250 	counter_u64_add(numneghits, 1);
1251 	cache_negative_hit(ncp);
1252 	if (ncp->nc_flag & NCF_WHITE)
1253 		cnp->cn_flags |= ISWHITEOUT;
1254 	SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp,
1255 	    nc_get_name(ncp));
1256 	cache_out_ts(ncp, tsp, ticksp);
1257 	cache_lookup_unlock(blp, dvlp);
1258 	return (ENOENT);
1259 
1260 success:
1261 	/*
1262 	 * On success we return a locked and ref'd vnode as per the lookup
1263 	 * protocol.
1264 	 */
1265 	MPASS(dvp != *vpp);
1266 	ltype = 0;	/* silence gcc warning */
1267 	if (cnp->cn_flags & ISDOTDOT) {
1268 		ltype = VOP_ISLOCKED(dvp);
1269 		VOP_UNLOCK(dvp, 0);
1270 	}
1271 	vhold(*vpp);
1272 	cache_lookup_unlock(blp, dvlp);
1273 	error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread);
1274 	if (cnp->cn_flags & ISDOTDOT) {
1275 		vn_lock(dvp, ltype | LK_RETRY);
1276 		if (dvp->v_iflag & VI_DOOMED) {
1277 			if (error == 0)
1278 				vput(*vpp);
1279 			*vpp = NULL;
1280 			return (ENOENT);
1281 		}
1282 	}
1283 	if (error) {
1284 		*vpp = NULL;
1285 		goto retry;
1286 	}
1287 	if ((cnp->cn_flags & ISLASTCN) &&
1288 	    (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1289 		ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
1290 	}
1291 	return (-1);
1292 
1293 unlock:
1294 	cache_lookup_unlock(blp, dvlp);
1295 	return (0);
1296 
1297 zap_and_exit:
1298 	if (blp != NULL)
1299 		error = cache_zap_rlocked_bucket(ncp, blp);
1300 	else
1301 		error = cache_zap_locked_vnode(ncp, dvp);
1302 	if (error != 0) {
1303 		zap_and_exit_bucket_fail++;
1304 		cache_maybe_yield();
1305 		goto retry;
1306 	}
1307 	cache_free(ncp);
1308 	return (0);
1309 }
1310 
1311 struct celockstate {
1312 	struct mtx *vlp[3];
1313 	struct rwlock *blp[2];
1314 };
1315 CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3));
1316 CTASSERT((nitems(((struct celockstate *)0)->blp) == 2));
1317 
1318 static inline void
1319 cache_celockstate_init(struct celockstate *cel)
1320 {
1321 
1322 	bzero(cel, sizeof(*cel));
1323 }
1324 
1325 static void
1326 cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp,
1327     struct vnode *dvp)
1328 {
1329 	struct mtx *vlp1, *vlp2;
1330 
1331 	MPASS(cel->vlp[0] == NULL);
1332 	MPASS(cel->vlp[1] == NULL);
1333 	MPASS(cel->vlp[2] == NULL);
1334 
1335 	MPASS(vp != NULL || dvp != NULL);
1336 
1337 	vlp1 = VP2VNODELOCK(vp);
1338 	vlp2 = VP2VNODELOCK(dvp);
1339 	cache_sort(&vlp1, &vlp2);
1340 
1341 	if (vlp1 != NULL) {
1342 		mtx_lock(vlp1);
1343 		cel->vlp[0] = vlp1;
1344 	}
1345 	mtx_lock(vlp2);
1346 	cel->vlp[1] = vlp2;
1347 }
1348 
1349 static void
1350 cache_unlock_vnodes_cel(struct celockstate *cel)
1351 {
1352 
1353 	MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL);
1354 
1355 	if (cel->vlp[0] != NULL)
1356 		mtx_unlock(cel->vlp[0]);
1357 	if (cel->vlp[1] != NULL)
1358 		mtx_unlock(cel->vlp[1]);
1359 	if (cel->vlp[2] != NULL)
1360 		mtx_unlock(cel->vlp[2]);
1361 }
1362 
1363 static bool
1364 cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp)
1365 {
1366 	struct mtx *vlp;
1367 	bool ret;
1368 
1369 	cache_assert_vlp_locked(cel->vlp[0]);
1370 	cache_assert_vlp_locked(cel->vlp[1]);
1371 	MPASS(cel->vlp[2] == NULL);
1372 
1373 	MPASS(vp != NULL);
1374 	vlp = VP2VNODELOCK(vp);
1375 
1376 	ret = true;
1377 	if (vlp >= cel->vlp[1]) {
1378 		mtx_lock(vlp);
1379 	} else {
1380 		if (mtx_trylock(vlp))
1381 			goto out;
1382 		cache_lock_vnodes_cel_3_failures++;
1383 		cache_unlock_vnodes_cel(cel);
1384 		if (vlp < cel->vlp[0]) {
1385 			mtx_lock(vlp);
1386 			mtx_lock(cel->vlp[0]);
1387 			mtx_lock(cel->vlp[1]);
1388 		} else {
1389 			if (cel->vlp[0] != NULL)
1390 				mtx_lock(cel->vlp[0]);
1391 			mtx_lock(vlp);
1392 			mtx_lock(cel->vlp[1]);
1393 		}
1394 		ret = false;
1395 	}
1396 out:
1397 	cel->vlp[2] = vlp;
1398 	return (ret);
1399 }
1400 
1401 static void
1402 cache_lock_buckets_cel(struct celockstate *cel, struct rwlock *blp1,
1403     struct rwlock *blp2)
1404 {
1405 
1406 	MPASS(cel->blp[0] == NULL);
1407 	MPASS(cel->blp[1] == NULL);
1408 
1409 	cache_sort(&blp1, &blp2);
1410 
1411 	if (blp1 != NULL) {
1412 		rw_wlock(blp1);
1413 		cel->blp[0] = blp1;
1414 	}
1415 	rw_wlock(blp2);
1416 	cel->blp[1] = blp2;
1417 }
1418 
1419 static void
1420 cache_unlock_buckets_cel(struct celockstate *cel)
1421 {
1422 
1423 	if (cel->blp[0] != NULL)
1424 		rw_wunlock(cel->blp[0]);
1425 	rw_wunlock(cel->blp[1]);
1426 }
1427 
1428 /*
1429  * Lock part of the cache affected by the insertion.
1430  *
1431  * This means vnodelocks for dvp, vp and the relevant bucketlock.
1432  * However, insertion can result in removal of an old entry. In this
1433  * case we have an additional vnode and bucketlock pair to lock. If the
1434  * entry is negative, ncelock is locked instead of the vnode.
1435  *
1436  * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while
1437  * preserving the locking order (smaller address first).
1438  */
1439 static void
1440 cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1441     uint32_t hash)
1442 {
1443 	struct namecache *ncp;
1444 	struct rwlock *blps[2];
1445 
1446 	blps[0] = HASH2BUCKETLOCK(hash);
1447 	for (;;) {
1448 		blps[1] = NULL;
1449 		cache_lock_vnodes_cel(cel, dvp, vp);
1450 		if (vp == NULL || vp->v_type != VDIR)
1451 			break;
1452 		ncp = vp->v_cache_dd;
1453 		if (ncp == NULL)
1454 			break;
1455 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1456 			break;
1457 		MPASS(ncp->nc_dvp == vp);
1458 		blps[1] = NCP2BUCKETLOCK(ncp);
1459 		if (ncp->nc_flag & NCF_NEGATIVE)
1460 			break;
1461 		if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1462 			break;
1463 		/*
1464 		 * All vnodes got re-locked. Re-validate the state and if
1465 		 * nothing changed we are done. Otherwise restart.
1466 		 */
1467 		if (ncp == vp->v_cache_dd &&
1468 		    (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1469 		    blps[1] == NCP2BUCKETLOCK(ncp) &&
1470 		    VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1471 			break;
1472 		cache_unlock_vnodes_cel(cel);
1473 		cel->vlp[0] = NULL;
1474 		cel->vlp[1] = NULL;
1475 		cel->vlp[2] = NULL;
1476 	}
1477 	cache_lock_buckets_cel(cel, blps[0], blps[1]);
1478 }
1479 
1480 static void
1481 cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1482     uint32_t hash)
1483 {
1484 	struct namecache *ncp;
1485 	struct rwlock *blps[2];
1486 
1487 	blps[0] = HASH2BUCKETLOCK(hash);
1488 	for (;;) {
1489 		blps[1] = NULL;
1490 		cache_lock_vnodes_cel(cel, dvp, vp);
1491 		ncp = dvp->v_cache_dd;
1492 		if (ncp == NULL)
1493 			break;
1494 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1495 			break;
1496 		MPASS(ncp->nc_dvp == dvp);
1497 		blps[1] = NCP2BUCKETLOCK(ncp);
1498 		if (ncp->nc_flag & NCF_NEGATIVE)
1499 			break;
1500 		if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1501 			break;
1502 		if (ncp == dvp->v_cache_dd &&
1503 		    (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1504 		    blps[1] == NCP2BUCKETLOCK(ncp) &&
1505 		    VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1506 			break;
1507 		cache_unlock_vnodes_cel(cel);
1508 		cel->vlp[0] = NULL;
1509 		cel->vlp[1] = NULL;
1510 		cel->vlp[2] = NULL;
1511 	}
1512 	cache_lock_buckets_cel(cel, blps[0], blps[1]);
1513 }
1514 
1515 static void
1516 cache_enter_unlock(struct celockstate *cel)
1517 {
1518 
1519 	cache_unlock_buckets_cel(cel);
1520 	cache_unlock_vnodes_cel(cel);
1521 }
1522 
1523 /*
1524  * Add an entry to the cache.
1525  */
1526 void
1527 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1528     struct timespec *tsp, struct timespec *dtsp)
1529 {
1530 	struct celockstate cel;
1531 	struct namecache *ncp, *n2, *ndd;
1532 	struct namecache_ts *n3;
1533 	struct nchashhead *ncpp;
1534 	struct neglist *neglist;
1535 	uint32_t hash;
1536 	int flag;
1537 	int len;
1538 	bool neg_locked;
1539 
1540 	CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
1541 	VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
1542 	    ("cache_enter: Adding a doomed vnode"));
1543 	VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
1544 	    ("cache_enter: Doomed vnode used as src"));
1545 
1546 	if (__predict_false(!doingcache))
1547 		return;
1548 
1549 	/*
1550 	 * Avoid blowout in namecache entries.
1551 	 */
1552 	if (__predict_false(numcache >= desiredvnodes * ncsizefactor))
1553 		return;
1554 
1555 	cache_celockstate_init(&cel);
1556 	ndd = NULL;
1557 	flag = 0;
1558 	if (cnp->cn_nameptr[0] == '.') {
1559 		if (cnp->cn_namelen == 1)
1560 			return;
1561 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
1562 			len = cnp->cn_namelen;
1563 			hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1564 			cache_enter_lock_dd(&cel, dvp, vp, hash);
1565 			/*
1566 			 * If dotdot entry already exists, just retarget it
1567 			 * to new parent vnode, otherwise continue with new
1568 			 * namecache entry allocation.
1569 			 */
1570 			if ((ncp = dvp->v_cache_dd) != NULL &&
1571 			    ncp->nc_flag & NCF_ISDOTDOT) {
1572 				KASSERT(ncp->nc_dvp == dvp,
1573 				    ("wrong isdotdot parent"));
1574 				neg_locked = false;
1575 				if (ncp->nc_flag & NCF_NEGATIVE || vp == NULL) {
1576 					neglist = NCP2NEGLIST(ncp);
1577 					mtx_lock(&ncneg_hot.nl_lock);
1578 					mtx_lock(&neglist->nl_lock);
1579 					neg_locked = true;
1580 				}
1581 				if (!(ncp->nc_flag & NCF_NEGATIVE)) {
1582 					TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
1583 					    ncp, nc_dst);
1584 				} else {
1585 					cache_negative_remove(ncp, true);
1586 				}
1587 				if (vp != NULL) {
1588 					TAILQ_INSERT_HEAD(&vp->v_cache_dst,
1589 					    ncp, nc_dst);
1590 					ncp->nc_flag &= ~(NCF_NEGATIVE|NCF_HOTNEGATIVE);
1591 				} else {
1592 					ncp->nc_flag &= ~(NCF_HOTNEGATIVE);
1593 					ncp->nc_flag |= NCF_NEGATIVE;
1594 					cache_negative_insert(ncp, true);
1595 				}
1596 				if (neg_locked) {
1597 					mtx_unlock(&neglist->nl_lock);
1598 					mtx_unlock(&ncneg_hot.nl_lock);
1599 				}
1600 				ncp->nc_vp = vp;
1601 				cache_enter_unlock(&cel);
1602 				return;
1603 			}
1604 			dvp->v_cache_dd = NULL;
1605 			cache_enter_unlock(&cel);
1606 			cache_celockstate_init(&cel);
1607 			SDT_PROBE3(vfs, namecache, enter, done, dvp, "..", vp);
1608 			flag = NCF_ISDOTDOT;
1609 		}
1610 	}
1611 
1612 	/*
1613 	 * Calculate the hash key and setup as much of the new
1614 	 * namecache entry as possible before acquiring the lock.
1615 	 */
1616 	ncp = cache_alloc(cnp->cn_namelen, tsp != NULL);
1617 	ncp->nc_flag = flag;
1618 	ncp->nc_vp = vp;
1619 	if (vp == NULL)
1620 		ncp->nc_flag |= NCF_NEGATIVE;
1621 	ncp->nc_dvp = dvp;
1622 	if (tsp != NULL) {
1623 		n3 = (struct namecache_ts *)ncp;
1624 		n3->nc_time = *tsp;
1625 		n3->nc_ticks = ticks;
1626 		n3->nc_flag |= NCF_TS;
1627 		if (dtsp != NULL) {
1628 			n3->nc_dotdottime = *dtsp;
1629 			n3->nc_flag |= NCF_DTS;
1630 		}
1631 	}
1632 	len = ncp->nc_nlen = cnp->cn_namelen;
1633 	hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1634 	strlcpy(nc_get_name(ncp), cnp->cn_nameptr, len + 1);
1635 	cache_enter_lock(&cel, dvp, vp, hash);
1636 
1637 	/*
1638 	 * See if this vnode or negative entry is already in the cache
1639 	 * with this name.  This can happen with concurrent lookups of
1640 	 * the same path name.
1641 	 */
1642 	ncpp = NCHHASH(hash);
1643 	LIST_FOREACH(n2, ncpp, nc_hash) {
1644 		if (n2->nc_dvp == dvp &&
1645 		    n2->nc_nlen == cnp->cn_namelen &&
1646 		    !bcmp(nc_get_name(n2), cnp->cn_nameptr, n2->nc_nlen)) {
1647 			if (tsp != NULL) {
1648 				KASSERT((n2->nc_flag & NCF_TS) != 0,
1649 				    ("no NCF_TS"));
1650 				n3 = (struct namecache_ts *)n2;
1651 				n3->nc_time =
1652 				    ((struct namecache_ts *)ncp)->nc_time;
1653 				n3->nc_ticks =
1654 				    ((struct namecache_ts *)ncp)->nc_ticks;
1655 				if (dtsp != NULL) {
1656 					n3->nc_dotdottime =
1657 					    ((struct namecache_ts *)ncp)->
1658 					    nc_dotdottime;
1659 					if (ncp->nc_flag & NCF_NEGATIVE)
1660 						mtx_lock(&ncneg_hot.nl_lock);
1661 					n3->nc_flag |= NCF_DTS;
1662 					if (ncp->nc_flag & NCF_NEGATIVE)
1663 						mtx_unlock(&ncneg_hot.nl_lock);
1664 				}
1665 			}
1666 			goto out_unlock_free;
1667 		}
1668 	}
1669 
1670 	if (flag == NCF_ISDOTDOT) {
1671 		/*
1672 		 * See if we are trying to add .. entry, but some other lookup
1673 		 * has populated v_cache_dd pointer already.
1674 		 */
1675 		if (dvp->v_cache_dd != NULL)
1676 			goto out_unlock_free;
1677 		KASSERT(vp == NULL || vp->v_type == VDIR,
1678 		    ("wrong vnode type %p", vp));
1679 		dvp->v_cache_dd = ncp;
1680 	}
1681 
1682 	atomic_add_rel_long(&numcache, 1);
1683 	if (vp != NULL) {
1684 		if (vp->v_type == VDIR) {
1685 			if (flag != NCF_ISDOTDOT) {
1686 				/*
1687 				 * For this case, the cache entry maps both the
1688 				 * directory name in it and the name ".." for the
1689 				 * directory's parent.
1690 				 */
1691 				if ((ndd = vp->v_cache_dd) != NULL) {
1692 					if ((ndd->nc_flag & NCF_ISDOTDOT) != 0)
1693 						cache_zap_locked(ndd, false);
1694 					else
1695 						ndd = NULL;
1696 				}
1697 				vp->v_cache_dd = ncp;
1698 			}
1699 		} else {
1700 			vp->v_cache_dd = NULL;
1701 		}
1702 	}
1703 
1704 	if (flag != NCF_ISDOTDOT) {
1705 		if (LIST_EMPTY(&dvp->v_cache_src)) {
1706 			vhold(dvp);
1707 			atomic_add_rel_long(&numcachehv, 1);
1708 		}
1709 		LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
1710 	}
1711 
1712 	/*
1713 	 * Insert the new namecache entry into the appropriate chain
1714 	 * within the cache entries table.
1715 	 */
1716 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
1717 
1718 	/*
1719 	 * If the entry is "negative", we place it into the
1720 	 * "negative" cache queue, otherwise, we place it into the
1721 	 * destination vnode's cache entries queue.
1722 	 */
1723 	if (vp != NULL) {
1724 		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
1725 		SDT_PROBE3(vfs, namecache, enter, done, dvp, nc_get_name(ncp),
1726 		    vp);
1727 	} else {
1728 		if (cnp->cn_flags & ISWHITEOUT)
1729 			ncp->nc_flag |= NCF_WHITE;
1730 		cache_negative_insert(ncp, false);
1731 		SDT_PROBE2(vfs, namecache, enter_negative, done, dvp,
1732 		    nc_get_name(ncp));
1733 	}
1734 	cache_enter_unlock(&cel);
1735 	if (numneg * ncnegfactor > numcache)
1736 		cache_negative_zap_one();
1737 	cache_free(ndd);
1738 	return;
1739 out_unlock_free:
1740 	cache_enter_unlock(&cel);
1741 	cache_free(ncp);
1742 	return;
1743 }
1744 
1745 static u_int
1746 cache_roundup_2(u_int val)
1747 {
1748 	u_int res;
1749 
1750 	for (res = 1; res <= val; res <<= 1)
1751 		continue;
1752 
1753 	return (res);
1754 }
1755 
1756 /*
1757  * Name cache initialization, from vfs_init() when we are booting
1758  */
1759 static void
1760 nchinit(void *dummy __unused)
1761 {
1762 	u_int i;
1763 
1764 	cache_zone_small = uma_zcreate("S VFS Cache",
1765 	    sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1,
1766 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
1767 	cache_zone_small_ts = uma_zcreate("STS VFS Cache",
1768 	    sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1,
1769 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
1770 	cache_zone_large = uma_zcreate("L VFS Cache",
1771 	    sizeof(struct namecache) + NAME_MAX + 1,
1772 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
1773 	cache_zone_large_ts = uma_zcreate("LTS VFS Cache",
1774 	    sizeof(struct namecache_ts) + NAME_MAX + 1,
1775 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
1776 
1777 	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
1778 	ncbuckethash = cache_roundup_2(mp_ncpus * 64) - 1;
1779 	if (ncbuckethash > nchash)
1780 		ncbuckethash = nchash;
1781 	bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE,
1782 	    M_WAITOK | M_ZERO);
1783 	for (i = 0; i < numbucketlocks; i++)
1784 		rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE);
1785 	ncvnodehash = cache_roundup_2(mp_ncpus * 64) - 1;
1786 	vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE,
1787 	    M_WAITOK | M_ZERO);
1788 	for (i = 0; i < numvnodelocks; i++)
1789 		mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE);
1790 	ncpurgeminvnodes = numbucketlocks;
1791 
1792 	ncneghash = 3;
1793 	neglists = malloc(sizeof(*neglists) * numneglists, M_VFSCACHE,
1794 	    M_WAITOK | M_ZERO);
1795 	for (i = 0; i < numneglists; i++) {
1796 		mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF);
1797 		TAILQ_INIT(&neglists[i].nl_list);
1798 	}
1799 	mtx_init(&ncneg_hot.nl_lock, "ncneglh", NULL, MTX_DEF);
1800 	TAILQ_INIT(&ncneg_hot.nl_list);
1801 
1802 	mtx_init(&ncneg_shrink_lock, "ncnegs", NULL, MTX_DEF);
1803 
1804 	numcalls = counter_u64_alloc(M_WAITOK);
1805 	dothits = counter_u64_alloc(M_WAITOK);
1806 	dotdothits = counter_u64_alloc(M_WAITOK);
1807 	numchecks = counter_u64_alloc(M_WAITOK);
1808 	nummiss = counter_u64_alloc(M_WAITOK);
1809 	nummisszap = counter_u64_alloc(M_WAITOK);
1810 	numposzaps = counter_u64_alloc(M_WAITOK);
1811 	numposhits = counter_u64_alloc(M_WAITOK);
1812 	numnegzaps = counter_u64_alloc(M_WAITOK);
1813 	numneghits = counter_u64_alloc(M_WAITOK);
1814 	numfullpathcalls = counter_u64_alloc(M_WAITOK);
1815 	numfullpathfail1 = counter_u64_alloc(M_WAITOK);
1816 	numfullpathfail2 = counter_u64_alloc(M_WAITOK);
1817 	numfullpathfail4 = counter_u64_alloc(M_WAITOK);
1818 	numfullpathfound = counter_u64_alloc(M_WAITOK);
1819 }
1820 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
1821 
1822 void
1823 cache_changesize(int newmaxvnodes)
1824 {
1825 	struct nchashhead *new_nchashtbl, *old_nchashtbl;
1826 	u_long new_nchash, old_nchash;
1827 	struct namecache *ncp;
1828 	uint32_t hash;
1829 	int i;
1830 
1831 	newmaxvnodes = cache_roundup_2(newmaxvnodes * 2);
1832 	if (newmaxvnodes < numbucketlocks)
1833 		newmaxvnodes = numbucketlocks;
1834 
1835 	new_nchashtbl = hashinit(newmaxvnodes, M_VFSCACHE, &new_nchash);
1836 	/* If same hash table size, nothing to do */
1837 	if (nchash == new_nchash) {
1838 		free(new_nchashtbl, M_VFSCACHE);
1839 		return;
1840 	}
1841 	/*
1842 	 * Move everything from the old hash table to the new table.
1843 	 * None of the namecache entries in the table can be removed
1844 	 * because to do so, they have to be removed from the hash table.
1845 	 */
1846 	cache_lock_all_vnodes();
1847 	cache_lock_all_buckets();
1848 	old_nchashtbl = nchashtbl;
1849 	old_nchash = nchash;
1850 	nchashtbl = new_nchashtbl;
1851 	nchash = new_nchash;
1852 	for (i = 0; i <= old_nchash; i++) {
1853 		while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) {
1854 			hash = cache_get_hash(nc_get_name(ncp), ncp->nc_nlen,
1855 			    ncp->nc_dvp);
1856 			LIST_REMOVE(ncp, nc_hash);
1857 			LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
1858 		}
1859 	}
1860 	cache_unlock_all_buckets();
1861 	cache_unlock_all_vnodes();
1862 	free(old_nchashtbl, M_VFSCACHE);
1863 }
1864 
1865 /*
1866  * Invalidate all entries to a particular vnode.
1867  */
1868 void
1869 cache_purge(struct vnode *vp)
1870 {
1871 	TAILQ_HEAD(, namecache) ncps;
1872 	struct namecache *ncp, *nnp;
1873 	struct mtx *vlp, *vlp2;
1874 
1875 	CTR1(KTR_VFS, "cache_purge(%p)", vp);
1876 	SDT_PROBE1(vfs, namecache, purge, done, vp);
1877 	if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
1878 	    vp->v_cache_dd == NULL)
1879 		return;
1880 	TAILQ_INIT(&ncps);
1881 	vlp = VP2VNODELOCK(vp);
1882 	vlp2 = NULL;
1883 	mtx_lock(vlp);
1884 retry:
1885 	while (!LIST_EMPTY(&vp->v_cache_src)) {
1886 		ncp = LIST_FIRST(&vp->v_cache_src);
1887 		if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
1888 			goto retry;
1889 		TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1890 	}
1891 	while (!TAILQ_EMPTY(&vp->v_cache_dst)) {
1892 		ncp = TAILQ_FIRST(&vp->v_cache_dst);
1893 		if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
1894 			goto retry;
1895 		TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1896 	}
1897 	ncp = vp->v_cache_dd;
1898 	if (ncp != NULL) {
1899 		KASSERT(ncp->nc_flag & NCF_ISDOTDOT,
1900 		   ("lost dotdot link"));
1901 		if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
1902 			goto retry;
1903 		TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1904 	}
1905 	KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
1906 	mtx_unlock(vlp);
1907 	if (vlp2 != NULL)
1908 		mtx_unlock(vlp2);
1909 	TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
1910 		cache_free(ncp);
1911 	}
1912 }
1913 
1914 /*
1915  * Invalidate all negative entries for a particular directory vnode.
1916  */
1917 void
1918 cache_purge_negative(struct vnode *vp)
1919 {
1920 	TAILQ_HEAD(, namecache) ncps;
1921 	struct namecache *ncp, *nnp;
1922 	struct mtx *vlp;
1923 
1924 	CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
1925 	SDT_PROBE1(vfs, namecache, purge_negative, done, vp);
1926 	TAILQ_INIT(&ncps);
1927 	vlp = VP2VNODELOCK(vp);
1928 	mtx_lock(vlp);
1929 	LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) {
1930 		if (!(ncp->nc_flag & NCF_NEGATIVE))
1931 			continue;
1932 		cache_zap_negative_locked_vnode_kl(ncp, vp);
1933 		TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1934 	}
1935 	mtx_unlock(vlp);
1936 	TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
1937 		cache_free(ncp);
1938 	}
1939 }
1940 
1941 /*
1942  * Flush all entries referencing a particular filesystem.
1943  */
1944 void
1945 cache_purgevfs(struct mount *mp, bool force)
1946 {
1947 	TAILQ_HEAD(, namecache) ncps;
1948 	struct mtx *vlp1, *vlp2;
1949 	struct rwlock *blp;
1950 	struct nchashhead *bucket;
1951 	struct namecache *ncp, *nnp;
1952 	u_long i, j, n_nchash;
1953 	int error;
1954 
1955 	/* Scan hash tables for applicable entries */
1956 	SDT_PROBE1(vfs, namecache, purgevfs, done, mp);
1957 	if (!force && mp->mnt_nvnodelistsize <= ncpurgeminvnodes)
1958 		return;
1959 	TAILQ_INIT(&ncps);
1960 	n_nchash = nchash + 1;
1961 	vlp1 = vlp2 = NULL;
1962 	for (i = 0; i < numbucketlocks; i++) {
1963 		blp = (struct rwlock *)&bucketlocks[i];
1964 		rw_wlock(blp);
1965 		for (j = i; j < n_nchash; j += numbucketlocks) {
1966 retry:
1967 			bucket = &nchashtbl[j];
1968 			LIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) {
1969 				cache_assert_bucket_locked(ncp, RA_WLOCKED);
1970 				if (ncp->nc_dvp->v_mount != mp)
1971 					continue;
1972 				error = cache_zap_wlocked_bucket_kl(ncp, blp,
1973 				    &vlp1, &vlp2);
1974 				if (error != 0)
1975 					goto retry;
1976 				TAILQ_INSERT_HEAD(&ncps, ncp, nc_dst);
1977 			}
1978 		}
1979 		rw_wunlock(blp);
1980 		if (vlp1 == NULL && vlp2 == NULL)
1981 			cache_maybe_yield();
1982 	}
1983 	if (vlp1 != NULL)
1984 		mtx_unlock(vlp1);
1985 	if (vlp2 != NULL)
1986 		mtx_unlock(vlp2);
1987 
1988 	TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
1989 		cache_free(ncp);
1990 	}
1991 }
1992 
1993 /*
1994  * Perform canonical checks and cache lookup and pass on to filesystem
1995  * through the vop_cachedlookup only if needed.
1996  */
1997 
1998 int
1999 vfs_cache_lookup(struct vop_lookup_args *ap)
2000 {
2001 	struct vnode *dvp;
2002 	int error;
2003 	struct vnode **vpp = ap->a_vpp;
2004 	struct componentname *cnp = ap->a_cnp;
2005 	struct ucred *cred = cnp->cn_cred;
2006 	int flags = cnp->cn_flags;
2007 	struct thread *td = cnp->cn_thread;
2008 
2009 	*vpp = NULL;
2010 	dvp = ap->a_dvp;
2011 
2012 	if (dvp->v_type != VDIR)
2013 		return (ENOTDIR);
2014 
2015 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
2016 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
2017 		return (EROFS);
2018 
2019 	error = VOP_ACCESS(dvp, VEXEC, cred, td);
2020 	if (error)
2021 		return (error);
2022 
2023 	error = cache_lookup(dvp, vpp, cnp, NULL, NULL);
2024 	if (error == 0)
2025 		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
2026 	if (error == -1)
2027 		return (0);
2028 	return (error);
2029 }
2030 
2031 /*
2032  * XXX All of these sysctls would probably be more productive dead.
2033  */
2034 static int __read_mostly disablecwd;
2035 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
2036    "Disable the getcwd syscall");
2037 
2038 /* Implementation of the getcwd syscall. */
2039 int
2040 sys___getcwd(struct thread *td, struct __getcwd_args *uap)
2041 {
2042 
2043 	return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen,
2044 	    MAXPATHLEN));
2045 }
2046 
2047 int
2048 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, u_int buflen,
2049     u_int path_max)
2050 {
2051 	char *bp, *tmpbuf;
2052 	struct filedesc *fdp;
2053 	struct vnode *cdir, *rdir;
2054 	int error;
2055 
2056 	if (__predict_false(disablecwd))
2057 		return (ENODEV);
2058 	if (__predict_false(buflen < 2))
2059 		return (EINVAL);
2060 	if (buflen > path_max)
2061 		buflen = path_max;
2062 
2063 	tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
2064 	fdp = td->td_proc->p_fd;
2065 	FILEDESC_SLOCK(fdp);
2066 	cdir = fdp->fd_cdir;
2067 	vrefact(cdir);
2068 	rdir = fdp->fd_rdir;
2069 	vrefact(rdir);
2070 	FILEDESC_SUNLOCK(fdp);
2071 	error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
2072 	vrele(rdir);
2073 	vrele(cdir);
2074 
2075 	if (!error) {
2076 		if (bufseg == UIO_SYSSPACE)
2077 			bcopy(bp, buf, strlen(bp) + 1);
2078 		else
2079 			error = copyout(bp, buf, strlen(bp) + 1);
2080 #ifdef KTRACE
2081 	if (KTRPOINT(curthread, KTR_NAMEI))
2082 		ktrnamei(bp);
2083 #endif
2084 	}
2085 	free(tmpbuf, M_TEMP);
2086 	return (error);
2087 }
2088 
2089 /*
2090  * Thus begins the fullpath magic.
2091  */
2092 
2093 static int __read_mostly disablefullpath;
2094 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
2095     "Disable the vn_fullpath function");
2096 
2097 /*
2098  * Retrieve the full filesystem path that correspond to a vnode from the name
2099  * cache (if available)
2100  */
2101 int
2102 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
2103 {
2104 	char *buf;
2105 	struct filedesc *fdp;
2106 	struct vnode *rdir;
2107 	int error;
2108 
2109 	if (__predict_false(disablefullpath))
2110 		return (ENODEV);
2111 	if (__predict_false(vn == NULL))
2112 		return (EINVAL);
2113 
2114 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2115 	fdp = td->td_proc->p_fd;
2116 	FILEDESC_SLOCK(fdp);
2117 	rdir = fdp->fd_rdir;
2118 	vrefact(rdir);
2119 	FILEDESC_SUNLOCK(fdp);
2120 	error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
2121 	vrele(rdir);
2122 
2123 	if (!error)
2124 		*freebuf = buf;
2125 	else
2126 		free(buf, M_TEMP);
2127 	return (error);
2128 }
2129 
2130 /*
2131  * This function is similar to vn_fullpath, but it attempts to lookup the
2132  * pathname relative to the global root mount point.  This is required for the
2133  * auditing sub-system, as audited pathnames must be absolute, relative to the
2134  * global root mount point.
2135  */
2136 int
2137 vn_fullpath_global(struct thread *td, struct vnode *vn,
2138     char **retbuf, char **freebuf)
2139 {
2140 	char *buf;
2141 	int error;
2142 
2143 	if (__predict_false(disablefullpath))
2144 		return (ENODEV);
2145 	if (__predict_false(vn == NULL))
2146 		return (EINVAL);
2147 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2148 	error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
2149 	if (!error)
2150 		*freebuf = buf;
2151 	else
2152 		free(buf, M_TEMP);
2153 	return (error);
2154 }
2155 
2156 int
2157 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
2158 {
2159 	struct vnode *dvp;
2160 	struct namecache *ncp;
2161 	struct mtx *vlp;
2162 	int error;
2163 
2164 	vlp = VP2VNODELOCK(*vp);
2165 	mtx_lock(vlp);
2166 	TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
2167 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2168 			break;
2169 	}
2170 	if (ncp != NULL) {
2171 		if (*buflen < ncp->nc_nlen) {
2172 			mtx_unlock(vlp);
2173 			vrele(*vp);
2174 			counter_u64_add(numfullpathfail4, 1);
2175 			error = ENOMEM;
2176 			SDT_PROBE3(vfs, namecache, fullpath, return, error,
2177 			    vp, NULL);
2178 			return (error);
2179 		}
2180 		*buflen -= ncp->nc_nlen;
2181 		memcpy(buf + *buflen, nc_get_name(ncp), ncp->nc_nlen);
2182 		SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp,
2183 		    nc_get_name(ncp), vp);
2184 		dvp = *vp;
2185 		*vp = ncp->nc_dvp;
2186 		vref(*vp);
2187 		mtx_unlock(vlp);
2188 		vrele(dvp);
2189 		return (0);
2190 	}
2191 	SDT_PROBE1(vfs, namecache, fullpath, miss, vp);
2192 
2193 	mtx_unlock(vlp);
2194 	vn_lock(*vp, LK_SHARED | LK_RETRY);
2195 	error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
2196 	vput(*vp);
2197 	if (error) {
2198 		counter_u64_add(numfullpathfail2, 1);
2199 		SDT_PROBE3(vfs, namecache, fullpath, return,  error, vp, NULL);
2200 		return (error);
2201 	}
2202 
2203 	*vp = dvp;
2204 	if (dvp->v_iflag & VI_DOOMED) {
2205 		/* forced unmount */
2206 		vrele(dvp);
2207 		error = ENOENT;
2208 		SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
2209 		return (error);
2210 	}
2211 	/*
2212 	 * *vp has its use count incremented still.
2213 	 */
2214 
2215 	return (0);
2216 }
2217 
2218 /*
2219  * The magic behind kern___getcwd() and vn_fullpath().
2220  */
2221 static int
2222 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
2223     char *buf, char **retbuf, u_int buflen)
2224 {
2225 	int error, slash_prefixed;
2226 #ifdef KDTRACE_HOOKS
2227 	struct vnode *startvp = vp;
2228 #endif
2229 	struct vnode *vp1;
2230 
2231 	buflen--;
2232 	buf[buflen] = '\0';
2233 	error = 0;
2234 	slash_prefixed = 0;
2235 
2236 	SDT_PROBE1(vfs, namecache, fullpath, entry, vp);
2237 	counter_u64_add(numfullpathcalls, 1);
2238 	vref(vp);
2239 	if (vp->v_type != VDIR) {
2240 		error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2241 		if (error)
2242 			return (error);
2243 		if (buflen == 0) {
2244 			vrele(vp);
2245 			return (ENOMEM);
2246 		}
2247 		buf[--buflen] = '/';
2248 		slash_prefixed = 1;
2249 	}
2250 	while (vp != rdir && vp != rootvnode) {
2251 		/*
2252 		 * The vp vnode must be already fully constructed,
2253 		 * since it is either found in namecache or obtained
2254 		 * from VOP_VPTOCNP().  We may test for VV_ROOT safely
2255 		 * without obtaining the vnode lock.
2256 		 */
2257 		if ((vp->v_vflag & VV_ROOT) != 0) {
2258 			vn_lock(vp, LK_RETRY | LK_SHARED);
2259 
2260 			/*
2261 			 * With the vnode locked, check for races with
2262 			 * unmount, forced or not.  Note that we
2263 			 * already verified that vp is not equal to
2264 			 * the root vnode, which means that
2265 			 * mnt_vnodecovered can be NULL only for the
2266 			 * case of unmount.
2267 			 */
2268 			if ((vp->v_iflag & VI_DOOMED) != 0 ||
2269 			    (vp1 = vp->v_mount->mnt_vnodecovered) == NULL ||
2270 			    vp1->v_mountedhere != vp->v_mount) {
2271 				vput(vp);
2272 				error = ENOENT;
2273 				SDT_PROBE3(vfs, namecache, fullpath, return,
2274 				    error, vp, NULL);
2275 				break;
2276 			}
2277 
2278 			vref(vp1);
2279 			vput(vp);
2280 			vp = vp1;
2281 			continue;
2282 		}
2283 		if (vp->v_type != VDIR) {
2284 			vrele(vp);
2285 			counter_u64_add(numfullpathfail1, 1);
2286 			error = ENOTDIR;
2287 			SDT_PROBE3(vfs, namecache, fullpath, return,
2288 			    error, vp, NULL);
2289 			break;
2290 		}
2291 		error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2292 		if (error)
2293 			break;
2294 		if (buflen == 0) {
2295 			vrele(vp);
2296 			error = ENOMEM;
2297 			SDT_PROBE3(vfs, namecache, fullpath, return, error,
2298 			    startvp, NULL);
2299 			break;
2300 		}
2301 		buf[--buflen] = '/';
2302 		slash_prefixed = 1;
2303 	}
2304 	if (error)
2305 		return (error);
2306 	if (!slash_prefixed) {
2307 		if (buflen == 0) {
2308 			vrele(vp);
2309 			counter_u64_add(numfullpathfail4, 1);
2310 			SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM,
2311 			    startvp, NULL);
2312 			return (ENOMEM);
2313 		}
2314 		buf[--buflen] = '/';
2315 	}
2316 	counter_u64_add(numfullpathfound, 1);
2317 	vrele(vp);
2318 
2319 	SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen);
2320 	*retbuf = buf + buflen;
2321 	return (0);
2322 }
2323 
2324 struct vnode *
2325 vn_dir_dd_ino(struct vnode *vp)
2326 {
2327 	struct namecache *ncp;
2328 	struct vnode *ddvp;
2329 	struct mtx *vlp;
2330 
2331 	ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino");
2332 	vlp = VP2VNODELOCK(vp);
2333 	mtx_lock(vlp);
2334 	TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) {
2335 		if ((ncp->nc_flag & NCF_ISDOTDOT) != 0)
2336 			continue;
2337 		ddvp = ncp->nc_dvp;
2338 		vhold(ddvp);
2339 		mtx_unlock(vlp);
2340 		if (vget(ddvp, LK_SHARED | LK_NOWAIT | LK_VNHELD, curthread))
2341 			return (NULL);
2342 		return (ddvp);
2343 	}
2344 	mtx_unlock(vlp);
2345 	return (NULL);
2346 }
2347 
2348 int
2349 vn_commname(struct vnode *vp, char *buf, u_int buflen)
2350 {
2351 	struct namecache *ncp;
2352 	struct mtx *vlp;
2353 	int l;
2354 
2355 	vlp = VP2VNODELOCK(vp);
2356 	mtx_lock(vlp);
2357 	TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
2358 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2359 			break;
2360 	if (ncp == NULL) {
2361 		mtx_unlock(vlp);
2362 		return (ENOENT);
2363 	}
2364 	l = min(ncp->nc_nlen, buflen - 1);
2365 	memcpy(buf, nc_get_name(ncp), l);
2366 	mtx_unlock(vlp);
2367 	buf[l] = '\0';
2368 	return (0);
2369 }
2370 
2371 /* ABI compat shims for old kernel modules. */
2372 #undef cache_enter
2373 
2374 void	cache_enter(struct vnode *dvp, struct vnode *vp,
2375 	    struct componentname *cnp);
2376 
2377 void
2378 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2379 {
2380 
2381 	cache_enter_time(dvp, vp, cnp, NULL, NULL);
2382 }
2383 
2384 /*
2385  * This function updates path string to vnode's full global path
2386  * and checks the size of the new path string against the pathlen argument.
2387  *
2388  * Requires a locked, referenced vnode.
2389  * Vnode is re-locked on success or ENODEV, otherwise unlocked.
2390  *
2391  * If sysctl debug.disablefullpath is set, ENODEV is returned,
2392  * vnode is left locked and path remain untouched.
2393  *
2394  * If vp is a directory, the call to vn_fullpath_global() always succeeds
2395  * because it falls back to the ".." lookup if the namecache lookup fails.
2396  */
2397 int
2398 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path,
2399     u_int pathlen)
2400 {
2401 	struct nameidata nd;
2402 	struct vnode *vp1;
2403 	char *rpath, *fbuf;
2404 	int error;
2405 
2406 	ASSERT_VOP_ELOCKED(vp, __func__);
2407 
2408 	/* Return ENODEV if sysctl debug.disablefullpath==1 */
2409 	if (__predict_false(disablefullpath))
2410 		return (ENODEV);
2411 
2412 	/* Construct global filesystem path from vp. */
2413 	VOP_UNLOCK(vp, 0);
2414 	error = vn_fullpath_global(td, vp, &rpath, &fbuf);
2415 
2416 	if (error != 0) {
2417 		vrele(vp);
2418 		return (error);
2419 	}
2420 
2421 	if (strlen(rpath) >= pathlen) {
2422 		vrele(vp);
2423 		error = ENAMETOOLONG;
2424 		goto out;
2425 	}
2426 
2427 	/*
2428 	 * Re-lookup the vnode by path to detect a possible rename.
2429 	 * As a side effect, the vnode is relocked.
2430 	 * If vnode was renamed, return ENOENT.
2431 	 */
2432 	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
2433 	    UIO_SYSSPACE, path, td);
2434 	error = namei(&nd);
2435 	if (error != 0) {
2436 		vrele(vp);
2437 		goto out;
2438 	}
2439 	NDFREE(&nd, NDF_ONLY_PNBUF);
2440 	vp1 = nd.ni_vp;
2441 	vrele(vp);
2442 	if (vp1 == vp)
2443 		strcpy(path, rpath);
2444 	else {
2445 		vput(vp1);
2446 		error = ENOENT;
2447 	}
2448 
2449 out:
2450 	free(fbuf, M_TEMP);
2451 	return (error);
2452 }
2453