xref: /freebsd/sys/kern/vfs_cache.c (revision 21a4258d89a4e27632cfd87e5ad6e8538a6e77a2)
1 /*-
2  * Copyright (c) 1989, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Poul-Henning Kamp of the FreeBSD Project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_ktrace.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/counter.h>
43 #include <sys/filedesc.h>
44 #include <sys/fnv_hash.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/fcntl.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/rwlock.h>
53 #include <sys/sdt.h>
54 #include <sys/smp.h>
55 #include <sys/syscallsubr.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysproto.h>
58 #include <sys/vnode.h>
59 #ifdef KTRACE
60 #include <sys/ktrace.h>
61 #endif
62 
63 #include <vm/uma.h>
64 
65 SDT_PROVIDER_DECLARE(vfs);
66 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
67     "struct vnode *");
68 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
69     "char *");
70 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
71 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
72     "char *", "struct vnode *");
73 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
74 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int",
75     "struct vnode *", "char *");
76 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
77     "struct vnode *");
78 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative,
79     "struct vnode *", "char *");
80 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
81     "char *");
82 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
83 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
84 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
85 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
86     "struct vnode *");
87 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *",
88     "char *");
89 
90 /*
91  * This structure describes the elements in the cache of recent
92  * names looked up by namei.
93  */
94 
95 struct	namecache {
96 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
97 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
98 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
99 	struct	vnode *nc_dvp;		/* vnode of parent of name */
100 	struct	vnode *nc_vp;		/* vnode the name refers to */
101 	u_char	nc_flag;		/* flag bits */
102 	u_char	nc_nlen;		/* length of name */
103 	char	nc_name[0];		/* segment name + nul */
104 };
105 
106 /*
107  * struct namecache_ts repeats struct namecache layout up to the
108  * nc_nlen member.
109  * struct namecache_ts is used in place of struct namecache when time(s) need
110  * to be stored.  The nc_dotdottime field is used when a cache entry is mapping
111  * both a non-dotdot directory name plus dotdot for the directory's
112  * parent.
113  */
114 struct	namecache_ts {
115 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
116 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
117 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
118 	struct	vnode *nc_dvp;		/* vnode of parent of name */
119 	struct	vnode *nc_vp;		/* vnode the name refers to */
120 	u_char	nc_flag;		/* flag bits */
121 	u_char	nc_nlen;		/* length of name */
122 	struct	timespec nc_time;	/* timespec provided by fs */
123 	struct	timespec nc_dotdottime;	/* dotdot timespec provided by fs */
124 	int	nc_ticks;		/* ticks value when entry was added */
125 	char	nc_name[0];		/* segment name + nul */
126 };
127 
128 /*
129  * Flags in namecache.nc_flag
130  */
131 #define NCF_WHITE	0x01
132 #define NCF_ISDOTDOT	0x02
133 #define	NCF_TS		0x04
134 #define	NCF_DTS		0x08
135 #define	NCF_DVDROP	0x10
136 
137 /*
138  * Name caching works as follows:
139  *
140  * Names found by directory scans are retained in a cache
141  * for future reference.  It is managed LRU, so frequently
142  * used names will hang around.  Cache is indexed by hash value
143  * obtained from (vp, name) where vp refers to the directory
144  * containing name.
145  *
146  * If it is a "negative" entry, (i.e. for a name that is known NOT to
147  * exist) the vnode pointer will be NULL.
148  *
149  * Upon reaching the last segment of a path, if the reference
150  * is for DELETE, or NOCACHE is set (rewrite), and the
151  * name is located in the cache, it will be dropped.
152  *
153  * These locks are used (in the order in which they can be taken):
154  * NAME		TYPE	ROLE
155  * vnodelock	mtx	vnode lists and v_cache_dd field protection
156  * bucketlock	rwlock	for access to given set of hash buckets
157  * ncneg_mtx	mtx	negative entry LRU management
158  *
159  * Additionally, ncneg_shrink_lock mtx is used to have at most one thread
160  * shrinking the LRU list.
161  *
162  * It is legal to take multiple vnodelock and bucketlock locks. The locking
163  * order is lower address first. Both are recursive.
164  *
165  * "." lookups are lockless.
166  *
167  * ".." and vnode -> name lookups require vnodelock.
168  *
169  * name -> vnode lookup requires the relevant bucketlock to be held for reading.
170  *
171  * Insertions and removals of entries require involved vnodes and bucketlocks
172  * to be write-locked to prevent other threads from seeing the entry.
173  *
174  * Some lookups result in removal of the found entry (e.g. getting rid of a
175  * negative entry with the intent to create a positive one), which poses a
176  * problem when multiple threads reach the state. Similarly, two different
177  * threads can purge two different vnodes and try to remove the same name.
178  *
179  * If the already held vnode lock is lower than the second required lock, we
180  * can just take the other lock. However, in the opposite case, this could
181  * deadlock. As such, this is resolved by trylocking and if that fails unlocking
182  * the first node, locking everything in order and revalidating the state.
183  */
184 
185 /*
186  * Structures associated with name caching.
187  */
188 #define NCHHASH(hash) \
189 	(&nchashtbl[(hash) & nchash])
190 static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
191 static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
192 static u_long	nchash;			/* size of hash table */
193 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
194     "Size of namecache hash table");
195 static u_long	ncnegfactor = 16;	/* ratio of negative entries */
196 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
197     "Ratio of negative namecache entries");
198 static u_long	numneg;			/* number of negative entries allocated */
199 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
200     "Number of negative entries in namecache");
201 static u_long	numcache;		/* number of cache entries allocated */
202 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
203     "Number of namecache entries");
204 static u_long	numcachehv;		/* number of cache entries with vnodes held */
205 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0,
206     "Number of namecache entries with vnodes held");
207 u_int	ncsizefactor = 2;
208 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
209     "Size factor for namecache");
210 static u_int	ncpurgeminvnodes;
211 SYSCTL_UINT(_vfs, OID_AUTO, ncpurgeminvnodes, CTLFLAG_RW, &ncpurgeminvnodes, 0,
212     "Number of vnodes below which purgevfs ignores the request");
213 
214 struct nchstats	nchstats;		/* cache effectiveness statistics */
215 
216 static struct mtx       ncneg_shrink_lock;
217 MTX_SYSINIT(vfscache_shrink_neg, &ncneg_shrink_lock, "Name Cache shrink neg",
218     MTX_DEF);
219 
220 static struct mtx_padalign ncneg_mtx;
221 MTX_SYSINIT(vfscache_neg, &ncneg_mtx, "ncneg", MTX_DEF);
222 
223 static u_int   numbucketlocks;
224 static struct rwlock_padalign  *bucketlocks;
225 #define	HASH2BUCKETLOCK(hash) \
226 	((struct rwlock *)(&bucketlocks[((hash) % numbucketlocks)]))
227 
228 static u_int   numvnodelocks;
229 static struct mtx *vnodelocks;
230 static inline struct mtx *
231 VP2VNODELOCK(struct vnode *vp)
232 {
233 	struct mtx *vlp;
234 
235 	if (vp == NULL)
236 		return (NULL);
237 	vlp = &vnodelocks[(((uintptr_t)(vp) >> 8) % numvnodelocks)];
238 	return (vlp);
239 }
240 
241 /*
242  * UMA zones for the VFS cache.
243  *
244  * The small cache is used for entries with short names, which are the
245  * most common.  The large cache is used for entries which are too big to
246  * fit in the small cache.
247  */
248 static uma_zone_t cache_zone_small;
249 static uma_zone_t cache_zone_small_ts;
250 static uma_zone_t cache_zone_large;
251 static uma_zone_t cache_zone_large_ts;
252 
253 #define	CACHE_PATH_CUTOFF	35
254 
255 static struct namecache *
256 cache_alloc(int len, int ts)
257 {
258 
259 	if (len > CACHE_PATH_CUTOFF) {
260 		if (ts)
261 			return (uma_zalloc(cache_zone_large_ts, M_WAITOK));
262 		else
263 			return (uma_zalloc(cache_zone_large, M_WAITOK));
264 	}
265 	if (ts)
266 		return (uma_zalloc(cache_zone_small_ts, M_WAITOK));
267 	else
268 		return (uma_zalloc(cache_zone_small, M_WAITOK));
269 }
270 
271 static void
272 cache_free(struct namecache *ncp)
273 {
274 	int ts;
275 
276 	if (ncp == NULL)
277 		return;
278 	ts = ncp->nc_flag & NCF_TS;
279 	if ((ncp->nc_flag & NCF_DVDROP) != 0)
280 		vdrop(ncp->nc_dvp);
281 	if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) {
282 		if (ts)
283 			uma_zfree(cache_zone_small_ts, ncp);
284 		else
285 			uma_zfree(cache_zone_small, ncp);
286 	} else if (ts)
287 		uma_zfree(cache_zone_large_ts, ncp);
288 	else
289 		uma_zfree(cache_zone_large, ncp);
290 }
291 
292 static char *
293 nc_get_name(struct namecache *ncp)
294 {
295 	struct namecache_ts *ncp_ts;
296 
297 	if ((ncp->nc_flag & NCF_TS) == 0)
298 		return (ncp->nc_name);
299 	ncp_ts = (struct namecache_ts *)ncp;
300 	return (ncp_ts->nc_name);
301 }
302 
303 static void
304 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp)
305 {
306 
307 	KASSERT((ncp->nc_flag & NCF_TS) != 0 ||
308 	    (tsp == NULL && ticksp == NULL),
309 	    ("No NCF_TS"));
310 
311 	if (tsp != NULL)
312 		*tsp = ((struct namecache_ts *)ncp)->nc_time;
313 	if (ticksp != NULL)
314 		*ticksp = ((struct namecache_ts *)ncp)->nc_ticks;
315 }
316 
317 static int	doingcache = 1;		/* 1 => enable the cache */
318 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
319     "VFS namecache enabled");
320 
321 /* Export size information to userland */
322 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR,
323     sizeof(struct namecache), "sizeof(struct namecache)");
324 
325 /*
326  * The new name cache statistics
327  */
328 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0,
329     "Name cache statistics");
330 #define STATNODE_ULONG(name, descr)	\
331 	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr);
332 #define STATNODE_COUNTER(name, descr)	\
333 	static counter_u64_t name;	\
334 	SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, descr);
335 STATNODE_ULONG(numneg, "Number of negative cache entries");
336 STATNODE_ULONG(numcache, "Number of cache entries");
337 STATNODE_COUNTER(numcalls, "Number of cache lookups");
338 STATNODE_COUNTER(dothits, "Number of '.' hits");
339 STATNODE_COUNTER(dotdothits, "Number of '..' hits");
340 STATNODE_COUNTER(numchecks, "Number of checks in lookup");
341 STATNODE_COUNTER(nummiss, "Number of cache misses");
342 STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache");
343 STATNODE_COUNTER(numposzaps,
344     "Number of cache hits (positive) we do not want to cache");
345 STATNODE_COUNTER(numposhits, "Number of cache hits (positive)");
346 STATNODE_COUNTER(numnegzaps,
347     "Number of cache hits (negative) we do not want to cache");
348 STATNODE_COUNTER(numneghits, "Number of cache hits (negative)");
349 /* These count for kern___getcwd(), too. */
350 STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls");
351 STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
352 STATNODE_COUNTER(numfullpathfail2,
353     "Number of fullpath search errors (VOP_VPTOCNP failures)");
354 STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
355 STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls");
356 static long zap_and_exit_bucket_fail; STATNODE_ULONG(zap_and_exit_bucket_fail,
357     "Number of times zap_and_exit failed to lock");
358 static long cache_lock_vnodes_cel_3_failures;
359 STATNODE_ULONG(cache_lock_vnodes_cel_3_failures,
360     "Number of times 3-way vnode locking failed");
361 
362 static void cache_zap_locked(struct namecache *ncp, bool neg_locked);
363 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
364     char *buf, char **retbuf, u_int buflen);
365 
366 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
367 
368 static int cache_yield;
369 SYSCTL_INT(_vfs_cache, OID_AUTO, yield, CTLFLAG_RD, &cache_yield, 0,
370     "Number of times cache called yield");
371 
372 static void
373 cache_maybe_yield(void)
374 {
375 
376 	if (should_yield()) {
377 		cache_yield++;
378 		kern_yield(PRI_USER);
379 	}
380 }
381 
382 static inline void
383 cache_assert_vlp_locked(struct mtx *vlp)
384 {
385 
386 	if (vlp != NULL)
387 		mtx_assert(vlp, MA_OWNED);
388 }
389 
390 static inline void
391 cache_assert_vnode_locked(struct vnode *vp)
392 {
393 	struct mtx *vlp;
394 
395 	vlp = VP2VNODELOCK(vp);
396 	cache_assert_vlp_locked(vlp);
397 }
398 
399 static uint32_t
400 cache_get_hash(char *name, u_char len, struct vnode *dvp)
401 {
402 	uint32_t hash;
403 
404 	hash = fnv_32_buf(name, len, FNV1_32_INIT);
405 	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
406 	return (hash);
407 }
408 
409 static inline struct rwlock *
410 NCP2BUCKETLOCK(struct namecache *ncp)
411 {
412 	uint32_t hash;
413 
414 	hash = cache_get_hash(nc_get_name(ncp), ncp->nc_nlen, ncp->nc_dvp);
415 	return (HASH2BUCKETLOCK(hash));
416 }
417 
418 #ifdef INVARIANTS
419 static void
420 cache_assert_bucket_locked(struct namecache *ncp, int mode)
421 {
422 	struct rwlock *blp;
423 
424 	blp = NCP2BUCKETLOCK(ncp);
425 	rw_assert(blp, mode);
426 }
427 #else
428 #define cache_assert_bucket_locked(x, y) do { } while (0)
429 #endif
430 
431 #define cache_sort(x, y)	_cache_sort((void **)(x), (void **)(y))
432 static void
433 _cache_sort(void **p1, void **p2)
434 {
435 	void *tmp;
436 
437 	if (*p1 > *p2) {
438 		tmp = *p2;
439 		*p2 = *p1;
440 		*p1 = tmp;
441 	}
442 }
443 
444 static void
445 cache_lock_all_buckets(void)
446 {
447 	u_int i;
448 
449 	for (i = 0; i < numbucketlocks; i++)
450 		rw_wlock(&bucketlocks[i]);
451 }
452 
453 static void
454 cache_unlock_all_buckets(void)
455 {
456 	u_int i;
457 
458 	for (i = 0; i < numbucketlocks; i++)
459 		rw_wunlock(&bucketlocks[i]);
460 }
461 
462 static void
463 cache_lock_all_vnodes(void)
464 {
465 	u_int i;
466 
467 	for (i = 0; i < numvnodelocks; i++)
468 		mtx_lock(&vnodelocks[i]);
469 }
470 
471 static void
472 cache_unlock_all_vnodes(void)
473 {
474 	u_int i;
475 
476 	for (i = 0; i < numvnodelocks; i++)
477 		mtx_unlock(&vnodelocks[i]);
478 }
479 
480 static int
481 cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
482 {
483 
484 	cache_sort(&vlp1, &vlp2);
485 	MPASS(vlp2 != NULL);
486 
487 	if (vlp1 != NULL) {
488 		if (!mtx_trylock(vlp1))
489 			return (EAGAIN);
490 	}
491 	if (!mtx_trylock(vlp2)) {
492 		if (vlp1 != NULL)
493 			mtx_unlock(vlp1);
494 		return (EAGAIN);
495 	}
496 
497 	return (0);
498 }
499 
500 static void
501 cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
502 {
503 
504 	MPASS(vlp1 != NULL || vlp2 != NULL);
505 
506 	if (vlp1 != NULL)
507 		mtx_unlock(vlp1);
508 	if (vlp2 != NULL)
509 		mtx_unlock(vlp2);
510 }
511 
512 static int
513 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
514 {
515 	struct nchstats snap;
516 
517 	if (req->oldptr == NULL)
518 		return (SYSCTL_OUT(req, 0, sizeof(snap)));
519 
520 	snap = nchstats;
521 	snap.ncs_goodhits = counter_u64_fetch(numposhits);
522 	snap.ncs_neghits = counter_u64_fetch(numneghits);
523 	snap.ncs_badhits = counter_u64_fetch(numposzaps) +
524 	    counter_u64_fetch(numnegzaps);
525 	snap.ncs_miss = counter_u64_fetch(nummisszap) +
526 	    counter_u64_fetch(nummiss);
527 
528 	return (SYSCTL_OUT(req, &snap, sizeof(snap)));
529 }
530 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD |
531     CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU",
532     "VFS cache effectiveness statistics");
533 
534 #ifdef DIAGNOSTIC
535 /*
536  * Grab an atomic snapshot of the name cache hash chain lengths
537  */
538 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL,
539     "hash table stats");
540 
541 static int
542 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
543 {
544 	struct nchashhead *ncpp;
545 	struct namecache *ncp;
546 	int i, error, n_nchash, *cntbuf;
547 
548 retry:
549 	n_nchash = nchash + 1;	/* nchash is max index, not count */
550 	if (req->oldptr == NULL)
551 		return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
552 	cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK);
553 	cache_lock_all_buckets();
554 	if (n_nchash != nchash + 1) {
555 		cache_unlock_all_buckets();
556 		free(cntbuf, M_TEMP);
557 		goto retry;
558 	}
559 	/* Scan hash tables counting entries */
560 	for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++)
561 		LIST_FOREACH(ncp, ncpp, nc_hash)
562 			cntbuf[i]++;
563 	cache_unlock_all_buckets();
564 	for (error = 0, i = 0; i < n_nchash; i++)
565 		if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0)
566 			break;
567 	free(cntbuf, M_TEMP);
568 	return (error);
569 }
570 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
571     CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
572     "nchash chain lengths");
573 
574 static int
575 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
576 {
577 	int error;
578 	struct nchashhead *ncpp;
579 	struct namecache *ncp;
580 	int n_nchash;
581 	int count, maxlength, used, pct;
582 
583 	if (!req->oldptr)
584 		return SYSCTL_OUT(req, 0, 4 * sizeof(int));
585 
586 	cache_lock_all_buckets();
587 	n_nchash = nchash + 1;	/* nchash is max index, not count */
588 	used = 0;
589 	maxlength = 0;
590 
591 	/* Scan hash tables for applicable entries */
592 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
593 		count = 0;
594 		LIST_FOREACH(ncp, ncpp, nc_hash) {
595 			count++;
596 		}
597 		if (count)
598 			used++;
599 		if (maxlength < count)
600 			maxlength = count;
601 	}
602 	n_nchash = nchash + 1;
603 	cache_unlock_all_buckets();
604 	pct = (used * 100) / (n_nchash / 100);
605 	error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
606 	if (error)
607 		return (error);
608 	error = SYSCTL_OUT(req, &used, sizeof(used));
609 	if (error)
610 		return (error);
611 	error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
612 	if (error)
613 		return (error);
614 	error = SYSCTL_OUT(req, &pct, sizeof(pct));
615 	if (error)
616 		return (error);
617 	return (0);
618 }
619 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
620     CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
621     "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)");
622 #endif
623 
624 /*
625  * Negative entries management
626  */
627 static void
628 cache_negative_hit(struct namecache *ncp)
629 {
630 
631 	MPASS(ncp->nc_vp == NULL);
632 	mtx_lock(&ncneg_mtx);
633 	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
634 	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
635 	mtx_unlock(&ncneg_mtx);
636 }
637 
638 static void
639 cache_negative_insert(struct namecache *ncp)
640 {
641 
642 	MPASS(ncp->nc_vp == NULL);
643 	cache_assert_bucket_locked(ncp, RA_WLOCKED);
644 	mtx_lock(&ncneg_mtx);
645 	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
646 	numneg++;
647 	mtx_unlock(&ncneg_mtx);
648 }
649 
650 static void
651 cache_negative_remove(struct namecache *ncp, bool neg_locked)
652 {
653 
654 	MPASS(ncp->nc_vp == NULL);
655 	cache_assert_bucket_locked(ncp, RA_WLOCKED);
656 	if (!neg_locked)
657 		mtx_lock(&ncneg_mtx);
658 	else
659 		mtx_assert(&ncneg_mtx, MA_OWNED);
660 	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
661 	numneg--;
662 	if (!neg_locked)
663 		mtx_unlock(&ncneg_mtx);
664 }
665 
666 static void
667 cache_negative_zap_one(void)
668 {
669 	struct namecache *ncp, *ncp2;
670 	struct mtx *dvlp;
671 	struct rwlock *blp;
672 
673 	if (!mtx_trylock(&ncneg_shrink_lock))
674 		return;
675 
676 	mtx_lock(&ncneg_mtx);
677 	ncp = TAILQ_FIRST(&ncneg);
678 	if (ncp == NULL) {
679 		mtx_unlock(&ncneg_mtx);
680 		goto out;
681 	}
682 	MPASS(ncp->nc_vp == NULL);
683 	dvlp = VP2VNODELOCK(ncp->nc_dvp);
684 	blp = NCP2BUCKETLOCK(ncp);
685 	mtx_unlock(&ncneg_mtx);
686 	mtx_lock(dvlp);
687 	rw_wlock(blp);
688 	mtx_lock(&ncneg_mtx);
689 	ncp2 = TAILQ_FIRST(&ncneg);
690 	if (ncp != ncp2 || dvlp != VP2VNODELOCK(ncp2->nc_dvp) ||
691 	    blp != NCP2BUCKETLOCK(ncp2) || ncp2->nc_vp != NULL) {
692 		ncp = NULL;
693 		goto out_unlock_all;
694 	}
695 	cache_zap_locked(ncp, true);
696 out_unlock_all:
697 	mtx_unlock(&ncneg_mtx);
698 	rw_wunlock(blp);
699 	mtx_unlock(dvlp);
700 out:
701 	mtx_unlock(&ncneg_shrink_lock);
702 	cache_free(ncp);
703 }
704 
705 /*
706  * cache_zap_locked():
707  *
708  *   Removes a namecache entry from cache, whether it contains an actual
709  *   pointer to a vnode or if it is just a negative cache entry.
710  */
711 static void
712 cache_zap_locked(struct namecache *ncp, bool neg_locked)
713 {
714 
715 	cache_assert_vnode_locked(ncp->nc_vp);
716 	cache_assert_vnode_locked(ncp->nc_dvp);
717 	cache_assert_bucket_locked(ncp, RA_WLOCKED);
718 
719 	CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
720 	if (ncp->nc_vp != NULL) {
721 		SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp,
722 		    nc_get_name(ncp), ncp->nc_vp);
723 	} else {
724 		SDT_PROBE2(vfs, namecache, zap_negative, done, ncp->nc_dvp,
725 		    nc_get_name(ncp));
726 	}
727 	LIST_REMOVE(ncp, nc_hash);
728 	if (ncp->nc_flag & NCF_ISDOTDOT) {
729 		if (ncp == ncp->nc_dvp->v_cache_dd)
730 			ncp->nc_dvp->v_cache_dd = NULL;
731 	} else {
732 		LIST_REMOVE(ncp, nc_src);
733 		if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
734 			ncp->nc_flag |= NCF_DVDROP;
735 			atomic_subtract_rel_long(&numcachehv, 1);
736 		}
737 	}
738 	if (ncp->nc_vp) {
739 		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
740 		if (ncp == ncp->nc_vp->v_cache_dd)
741 			ncp->nc_vp->v_cache_dd = NULL;
742 	} else {
743 		cache_negative_remove(ncp, neg_locked);
744 	}
745 	atomic_subtract_rel_long(&numcache, 1);
746 }
747 
748 static void
749 cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp)
750 {
751 	struct rwlock *blp;
752 
753 	MPASS(ncp->nc_dvp == vp);
754 	MPASS(ncp->nc_vp == NULL);
755 	cache_assert_vnode_locked(vp);
756 
757 	blp = NCP2BUCKETLOCK(ncp);
758 	rw_wlock(blp);
759 	cache_zap_locked(ncp, false);
760 	rw_wunlock(blp);
761 }
762 
763 static bool
764 cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp,
765     struct mtx **vlpp)
766 {
767 	struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
768 	struct rwlock *blp;
769 
770 	MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
771 	cache_assert_vnode_locked(vp);
772 
773 	if (ncp->nc_vp == NULL) {
774 		if (*vlpp != NULL) {
775 			mtx_unlock(*vlpp);
776 			*vlpp = NULL;
777 		}
778 		cache_zap_negative_locked_vnode_kl(ncp, vp);
779 		return (true);
780 	}
781 
782 	pvlp = VP2VNODELOCK(vp);
783 	blp = NCP2BUCKETLOCK(ncp);
784 	vlp1 = VP2VNODELOCK(ncp->nc_dvp);
785 	vlp2 = VP2VNODELOCK(ncp->nc_vp);
786 
787 	if (*vlpp == vlp1 || *vlpp == vlp2) {
788 		to_unlock = *vlpp;
789 		*vlpp = NULL;
790 	} else {
791 		if (*vlpp != NULL) {
792 			mtx_unlock(*vlpp);
793 			*vlpp = NULL;
794 		}
795 		cache_sort(&vlp1, &vlp2);
796 		if (vlp1 == pvlp) {
797 			mtx_lock(vlp2);
798 			to_unlock = vlp2;
799 		} else {
800 			if (!mtx_trylock(vlp1))
801 				goto out_relock;
802 			to_unlock = vlp1;
803 		}
804 	}
805 	rw_wlock(blp);
806 	cache_zap_locked(ncp, false);
807 	rw_wunlock(blp);
808 	if (to_unlock != NULL)
809 		mtx_unlock(to_unlock);
810 	return (true);
811 
812 out_relock:
813 	mtx_unlock(vlp2);
814 	mtx_lock(vlp1);
815 	mtx_lock(vlp2);
816 	MPASS(*vlpp == NULL);
817 	*vlpp = vlp1;
818 	return (false);
819 }
820 
821 static int
822 cache_zap_locked_vnode(struct namecache *ncp, struct vnode *vp)
823 {
824 	struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
825 	struct rwlock *blp;
826 	int error = 0;
827 
828 	MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
829 	cache_assert_vnode_locked(vp);
830 
831 	pvlp = VP2VNODELOCK(vp);
832 	if (ncp->nc_vp == NULL) {
833 		cache_zap_negative_locked_vnode_kl(ncp, vp);
834 		goto out;
835 	}
836 
837 	blp = NCP2BUCKETLOCK(ncp);
838 	vlp1 = VP2VNODELOCK(ncp->nc_dvp);
839 	vlp2 = VP2VNODELOCK(ncp->nc_vp);
840 	cache_sort(&vlp1, &vlp2);
841 	if (vlp1 == pvlp) {
842 		mtx_lock(vlp2);
843 		to_unlock = vlp2;
844 	} else {
845 		if (!mtx_trylock(vlp1)) {
846 			error = EAGAIN;
847 			goto out;
848 		}
849 		to_unlock = vlp1;
850 	}
851 	rw_wlock(blp);
852 	cache_zap_locked(ncp, false);
853 	rw_wunlock(blp);
854 	mtx_unlock(to_unlock);
855 out:
856 	mtx_unlock(pvlp);
857 	return (error);
858 }
859 
860 static int
861 cache_zap_rlocked_bucket(struct namecache *ncp, struct rwlock *blp)
862 {
863 	struct mtx *dvlp, *vlp;
864 
865 	cache_assert_bucket_locked(ncp, RA_RLOCKED);
866 
867 	dvlp = VP2VNODELOCK(ncp->nc_dvp);
868 	vlp = VP2VNODELOCK(ncp->nc_vp);
869 	if (cache_trylock_vnodes(dvlp, vlp) == 0) {
870 		rw_runlock(blp);
871 		rw_wlock(blp);
872 		cache_zap_locked(ncp, false);
873 		rw_wunlock(blp);
874 		cache_unlock_vnodes(dvlp, vlp);
875 		return (0);
876 	}
877 
878 	rw_runlock(blp);
879 	return (EAGAIN);
880 }
881 
882 static int
883 cache_zap_wlocked_bucket_kl(struct namecache *ncp, struct rwlock *blp,
884     struct mtx **vlpp1, struct mtx **vlpp2)
885 {
886 	struct mtx *dvlp, *vlp;
887 
888 	cache_assert_bucket_locked(ncp, RA_WLOCKED);
889 
890 	dvlp = VP2VNODELOCK(ncp->nc_dvp);
891 	vlp = VP2VNODELOCK(ncp->nc_vp);
892 	cache_sort(&dvlp, &vlp);
893 
894 	if (*vlpp1 == dvlp && *vlpp2 == vlp) {
895 		cache_zap_locked(ncp, false);
896 		cache_unlock_vnodes(dvlp, vlp);
897 		*vlpp1 = NULL;
898 		*vlpp2 = NULL;
899 		return (0);
900 	}
901 
902 	if (*vlpp1 != NULL)
903 		mtx_unlock(*vlpp1);
904 	if (*vlpp2 != NULL)
905 		mtx_unlock(*vlpp2);
906 	*vlpp1 = NULL;
907 	*vlpp2 = NULL;
908 
909 	if (cache_trylock_vnodes(dvlp, vlp) == 0) {
910 		cache_zap_locked(ncp, false);
911 		cache_unlock_vnodes(dvlp, vlp);
912 		return (0);
913 	}
914 
915 	rw_wunlock(blp);
916 	*vlpp1 = dvlp;
917 	*vlpp2 = vlp;
918 	if (*vlpp1 != NULL)
919 		mtx_lock(*vlpp1);
920 	mtx_lock(*vlpp2);
921 	rw_wlock(blp);
922 	return (EAGAIN);
923 }
924 
925 static void
926 cache_lookup_unlock(struct rwlock *blp, struct mtx *vlp)
927 {
928 
929 	if (blp != NULL) {
930 		rw_runlock(blp);
931 		mtx_assert(vlp, MA_NOTOWNED);
932 	} else {
933 		mtx_unlock(vlp);
934 	}
935 }
936 
937 /*
938  * Lookup an entry in the cache
939  *
940  * Lookup is called with dvp pointing to the directory to search,
941  * cnp pointing to the name of the entry being sought. If the lookup
942  * succeeds, the vnode is returned in *vpp, and a status of -1 is
943  * returned. If the lookup determines that the name does not exist
944  * (negative caching), a status of ENOENT is returned. If the lookup
945  * fails, a status of zero is returned.  If the directory vnode is
946  * recycled out from under us due to a forced unmount, a status of
947  * ENOENT is returned.
948  *
949  * vpp is locked and ref'd on return.  If we're looking up DOTDOT, dvp is
950  * unlocked.  If we're looking up . an extra ref is taken, but the lock is
951  * not recursively acquired.
952  */
953 
954 int
955 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
956     struct timespec *tsp, int *ticksp)
957 {
958 	struct namecache *ncp;
959 	struct rwlock *blp;
960 	struct mtx *dvlp, *dvlp2;
961 	uint32_t hash;
962 	int error, ltype;
963 
964 	if (!doingcache) {
965 		cnp->cn_flags &= ~MAKEENTRY;
966 		return (0);
967 	}
968 retry:
969 	blp = NULL;
970 	dvlp = VP2VNODELOCK(dvp);
971 	error = 0;
972 	counter_u64_add(numcalls, 1);
973 
974 	if (cnp->cn_nameptr[0] == '.') {
975 		if (cnp->cn_namelen == 1) {
976 			*vpp = dvp;
977 			CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
978 			    dvp, cnp->cn_nameptr);
979 			counter_u64_add(dothits, 1);
980 			SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
981 			if (tsp != NULL)
982 				timespecclear(tsp);
983 			if (ticksp != NULL)
984 				*ticksp = ticks;
985 			VREF(*vpp);
986 			/*
987 			 * When we lookup "." we still can be asked to lock it
988 			 * differently...
989 			 */
990 			ltype = cnp->cn_lkflags & LK_TYPE_MASK;
991 			if (ltype != VOP_ISLOCKED(*vpp)) {
992 				if (ltype == LK_EXCLUSIVE) {
993 					vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
994 					if ((*vpp)->v_iflag & VI_DOOMED) {
995 						/* forced unmount */
996 						vrele(*vpp);
997 						*vpp = NULL;
998 						return (ENOENT);
999 					}
1000 				} else
1001 					vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
1002 			}
1003 			return (-1);
1004 		}
1005 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
1006 			counter_u64_add(dotdothits, 1);
1007 			dvlp2 = NULL;
1008 			mtx_lock(dvlp);
1009 retry_dotdot:
1010 			ncp = dvp->v_cache_dd;
1011 			if (ncp == NULL) {
1012 				SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
1013 				    "..", NULL);
1014 				mtx_unlock(dvlp);
1015 				return (0);
1016 			}
1017 			if ((cnp->cn_flags & MAKEENTRY) == 0) {
1018 				if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1019 					if (ncp->nc_dvp != dvp)
1020 						panic("dvp %p v_cache_dd %p\n", dvp, ncp);
1021 					if (!cache_zap_locked_vnode_kl2(ncp,
1022 					    dvp, &dvlp2))
1023 						goto retry_dotdot;
1024 					MPASS(dvp->v_cache_dd == NULL);
1025 					mtx_unlock(dvlp);
1026 					if (dvlp2 != NULL)
1027 						mtx_unlock(dvlp2);
1028 					cache_free(ncp);
1029 				} else {
1030 					dvp->v_cache_dd = NULL;
1031 					mtx_unlock(dvlp);
1032 					if (dvlp2 != NULL)
1033 						mtx_unlock(dvlp2);
1034 				}
1035 				return (0);
1036 			}
1037 			if ((ncp->nc_flag & NCF_ISDOTDOT) != 0)
1038 				*vpp = ncp->nc_vp;
1039 			else
1040 				*vpp = ncp->nc_dvp;
1041 			/* Return failure if negative entry was found. */
1042 			if (*vpp == NULL)
1043 				goto negative_success;
1044 			CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
1045 			    dvp, cnp->cn_nameptr, *vpp);
1046 			SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..",
1047 			    *vpp);
1048 			cache_out_ts(ncp, tsp, ticksp);
1049 			if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) ==
1050 			    NCF_DTS && tsp != NULL)
1051 				*tsp = ((struct namecache_ts *)ncp)->
1052 				    nc_dotdottime;
1053 			goto success;
1054 		}
1055 	}
1056 
1057 	hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
1058 	blp = HASH2BUCKETLOCK(hash);
1059 	rw_rlock(blp);
1060 
1061 	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1062 		counter_u64_add(numchecks, 1);
1063 		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
1064 		    !bcmp(nc_get_name(ncp), cnp->cn_nameptr, ncp->nc_nlen))
1065 			break;
1066 	}
1067 
1068 	/* We failed to find an entry */
1069 	if (ncp == NULL) {
1070 		SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
1071 		    NULL);
1072 		if ((cnp->cn_flags & MAKEENTRY) == 0) {
1073 			counter_u64_add(nummisszap, 1);
1074 		} else {
1075 			counter_u64_add(nummiss, 1);
1076 		}
1077 		goto unlock;
1078 	}
1079 
1080 	/* We don't want to have an entry, so dump it */
1081 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
1082 		counter_u64_add(numposzaps, 1);
1083 		goto zap_and_exit;
1084 	}
1085 
1086 	/* We found a "positive" match, return the vnode */
1087 	if (ncp->nc_vp) {
1088 		counter_u64_add(numposhits, 1);
1089 		*vpp = ncp->nc_vp;
1090 		CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
1091 		    dvp, cnp->cn_nameptr, *vpp, ncp);
1092 		SDT_PROBE3(vfs, namecache, lookup, hit, dvp, nc_get_name(ncp),
1093 		    *vpp);
1094 		cache_out_ts(ncp, tsp, ticksp);
1095 		goto success;
1096 	}
1097 
1098 negative_success:
1099 	/* We found a negative match, and want to create it, so purge */
1100 	if (cnp->cn_nameiop == CREATE) {
1101 		counter_u64_add(numnegzaps, 1);
1102 		goto zap_and_exit;
1103 	}
1104 
1105 	counter_u64_add(numneghits, 1);
1106 	cache_negative_hit(ncp);
1107 	if (ncp->nc_flag & NCF_WHITE)
1108 		cnp->cn_flags |= ISWHITEOUT;
1109 	SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp,
1110 	    nc_get_name(ncp));
1111 	cache_out_ts(ncp, tsp, ticksp);
1112 	cache_lookup_unlock(blp, dvlp);
1113 	return (ENOENT);
1114 
1115 success:
1116 	/*
1117 	 * On success we return a locked and ref'd vnode as per the lookup
1118 	 * protocol.
1119 	 */
1120 	MPASS(dvp != *vpp);
1121 	ltype = 0;	/* silence gcc warning */
1122 	if (cnp->cn_flags & ISDOTDOT) {
1123 		ltype = VOP_ISLOCKED(dvp);
1124 		VOP_UNLOCK(dvp, 0);
1125 	}
1126 	vhold(*vpp);
1127 	cache_lookup_unlock(blp, dvlp);
1128 	error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread);
1129 	if (cnp->cn_flags & ISDOTDOT) {
1130 		vn_lock(dvp, ltype | LK_RETRY);
1131 		if (dvp->v_iflag & VI_DOOMED) {
1132 			if (error == 0)
1133 				vput(*vpp);
1134 			*vpp = NULL;
1135 			return (ENOENT);
1136 		}
1137 	}
1138 	if (error) {
1139 		*vpp = NULL;
1140 		goto retry;
1141 	}
1142 	if ((cnp->cn_flags & ISLASTCN) &&
1143 	    (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1144 		ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
1145 	}
1146 	return (-1);
1147 
1148 unlock:
1149 	cache_lookup_unlock(blp, dvlp);
1150 	return (0);
1151 
1152 zap_and_exit:
1153 	if (blp != NULL)
1154 		error = cache_zap_rlocked_bucket(ncp, blp);
1155 	else
1156 		error = cache_zap_locked_vnode(ncp, dvp);
1157 	if (error != 0) {
1158 		zap_and_exit_bucket_fail++;
1159 		cache_maybe_yield();
1160 		goto retry;
1161 	}
1162 	cache_free(ncp);
1163 	return (0);
1164 }
1165 
1166 struct celockstate {
1167 	struct mtx *vlp[3];
1168 	struct rwlock *blp[2];
1169 };
1170 CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3));
1171 CTASSERT((nitems(((struct celockstate *)0)->blp) == 2));
1172 
1173 static inline void
1174 cache_celockstate_init(struct celockstate *cel)
1175 {
1176 
1177 	bzero(cel, sizeof(*cel));
1178 }
1179 
1180 static void
1181 cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp,
1182     struct vnode *dvp)
1183 {
1184 	struct mtx *vlp1, *vlp2;
1185 
1186 	MPASS(cel->vlp[0] == NULL);
1187 	MPASS(cel->vlp[1] == NULL);
1188 	MPASS(cel->vlp[2] == NULL);
1189 
1190 	MPASS(vp != NULL || dvp != NULL);
1191 
1192 	vlp1 = VP2VNODELOCK(vp);
1193 	vlp2 = VP2VNODELOCK(dvp);
1194 	cache_sort(&vlp1, &vlp2);
1195 
1196 	if (vlp1 != NULL) {
1197 		mtx_lock(vlp1);
1198 		cel->vlp[0] = vlp1;
1199 	}
1200 	mtx_lock(vlp2);
1201 	cel->vlp[1] = vlp2;
1202 }
1203 
1204 static void
1205 cache_unlock_vnodes_cel(struct celockstate *cel)
1206 {
1207 
1208 	MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL);
1209 
1210 	if (cel->vlp[0] != NULL)
1211 		mtx_unlock(cel->vlp[0]);
1212 	if (cel->vlp[1] != NULL)
1213 		mtx_unlock(cel->vlp[1]);
1214 	if (cel->vlp[2] != NULL)
1215 		mtx_unlock(cel->vlp[2]);
1216 }
1217 
1218 static bool
1219 cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp)
1220 {
1221 	struct mtx *vlp;
1222 	bool ret;
1223 
1224 	cache_assert_vlp_locked(cel->vlp[0]);
1225 	cache_assert_vlp_locked(cel->vlp[1]);
1226 	MPASS(cel->vlp[2] == NULL);
1227 
1228 	vlp = VP2VNODELOCK(vp);
1229 	if (vlp == NULL)
1230 		return (true);
1231 
1232 	ret = true;
1233 	if (vlp >= cel->vlp[1]) {
1234 		mtx_lock(vlp);
1235 	} else {
1236 		if (mtx_trylock(vlp))
1237 			goto out;
1238 		cache_lock_vnodes_cel_3_failures++;
1239 		cache_unlock_vnodes_cel(cel);
1240 		if (vlp < cel->vlp[0]) {
1241 			mtx_lock(vlp);
1242 			mtx_lock(cel->vlp[0]);
1243 			mtx_lock(cel->vlp[1]);
1244 		} else {
1245 			if (cel->vlp[0] != NULL)
1246 				mtx_lock(cel->vlp[0]);
1247 			mtx_lock(vlp);
1248 			mtx_lock(cel->vlp[1]);
1249 		}
1250 		ret = false;
1251 	}
1252 out:
1253 	cel->vlp[2] = vlp;
1254 	return (ret);
1255 }
1256 
1257 static void
1258 cache_lock_buckets_cel(struct celockstate *cel, struct rwlock *blp1,
1259     struct rwlock *blp2)
1260 {
1261 
1262 	MPASS(cel->blp[0] == NULL);
1263 	MPASS(cel->blp[1] == NULL);
1264 
1265 	cache_sort(&blp1, &blp2);
1266 
1267 	if (blp1 != NULL) {
1268 		rw_wlock(blp1);
1269 		cel->blp[0] = blp1;
1270 	}
1271 	rw_wlock(blp2);
1272 	cel->blp[1] = blp2;
1273 }
1274 
1275 static void
1276 cache_unlock_buckets_cel(struct celockstate *cel)
1277 {
1278 
1279 	if (cel->blp[0] != NULL)
1280 		rw_wunlock(cel->blp[0]);
1281 	rw_wunlock(cel->blp[1]);
1282 }
1283 
1284 /*
1285  * Lock part of the cache affected by the insertion.
1286  *
1287  * This means vnodelocks for dvp, vp and the relevant bucketlock.
1288  * However, insertion can result in removal of an old entry. In this
1289  * case we have an additional vnode and bucketlock pair to lock. If the
1290  * entry is negative, ncelock is locked instead of the vnode.
1291  *
1292  * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while
1293  * preserving the locking order (smaller address first).
1294  */
1295 static void
1296 cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1297     uint32_t hash)
1298 {
1299 	struct namecache *ncp;
1300 	struct rwlock *blps[2];
1301 
1302 	blps[0] = HASH2BUCKETLOCK(hash);
1303 	for (;;) {
1304 		blps[1] = NULL;
1305 		cache_lock_vnodes_cel(cel, dvp, vp);
1306 		if (vp == NULL || vp->v_type != VDIR)
1307 			break;
1308 		ncp = vp->v_cache_dd;
1309 		if (ncp == NULL)
1310 			break;
1311 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1312 			break;
1313 		MPASS(ncp->nc_dvp == vp);
1314 		blps[1] = NCP2BUCKETLOCK(ncp);
1315 		if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1316 			break;
1317 		/*
1318 		 * All vnodes got re-locked. Re-validate the state and if
1319 		 * nothing changed we are done. Otherwise restart.
1320 		 */
1321 		if (ncp == vp->v_cache_dd &&
1322 		    (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1323 		    blps[1] == NCP2BUCKETLOCK(ncp) &&
1324 		    VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1325 			break;
1326 		cache_unlock_vnodes_cel(cel);
1327 		cel->vlp[0] = NULL;
1328 		cel->vlp[1] = NULL;
1329 		cel->vlp[2] = NULL;
1330 	}
1331 	cache_lock_buckets_cel(cel, blps[0], blps[1]);
1332 }
1333 
1334 static void
1335 cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1336     uint32_t hash)
1337 {
1338 	struct namecache *ncp;
1339 	struct rwlock *blps[2];
1340 
1341 	blps[0] = HASH2BUCKETLOCK(hash);
1342 	for (;;) {
1343 		blps[1] = NULL;
1344 		cache_lock_vnodes_cel(cel, dvp, vp);
1345 		ncp = dvp->v_cache_dd;
1346 		if (ncp == NULL)
1347 			break;
1348 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1349 			break;
1350 		MPASS(ncp->nc_dvp == dvp);
1351 		blps[1] = NCP2BUCKETLOCK(ncp);
1352 		if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1353 			break;
1354 		if (ncp == dvp->v_cache_dd &&
1355 		    (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1356 		    blps[1] == NCP2BUCKETLOCK(ncp) &&
1357 		    VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1358 			break;
1359 		cache_unlock_vnodes_cel(cel);
1360 		cel->vlp[0] = NULL;
1361 		cel->vlp[1] = NULL;
1362 		cel->vlp[2] = NULL;
1363 	}
1364 	cache_lock_buckets_cel(cel, blps[0], blps[1]);
1365 }
1366 
1367 static void
1368 cache_enter_unlock(struct celockstate *cel)
1369 {
1370 
1371 	cache_unlock_buckets_cel(cel);
1372 	cache_unlock_vnodes_cel(cel);
1373 }
1374 
1375 /*
1376  * Add an entry to the cache.
1377  */
1378 void
1379 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1380     struct timespec *tsp, struct timespec *dtsp)
1381 {
1382 	struct celockstate cel;
1383 	struct namecache *ncp, *n2, *ndd;
1384 	struct namecache_ts *n3;
1385 	struct nchashhead *ncpp;
1386 	uint32_t hash;
1387 	int flag;
1388 	int len;
1389 
1390 	CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
1391 	VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
1392 	    ("cache_enter: Adding a doomed vnode"));
1393 	VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
1394 	    ("cache_enter: Doomed vnode used as src"));
1395 
1396 	if (!doingcache)
1397 		return;
1398 
1399 	/*
1400 	 * Avoid blowout in namecache entries.
1401 	 */
1402 	if (numcache >= desiredvnodes * ncsizefactor)
1403 		return;
1404 
1405 	cache_celockstate_init(&cel);
1406 	ndd = NULL;
1407 	flag = 0;
1408 	if (cnp->cn_nameptr[0] == '.') {
1409 		if (cnp->cn_namelen == 1)
1410 			return;
1411 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
1412 			len = cnp->cn_namelen;
1413 			hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1414 			cache_enter_lock_dd(&cel, dvp, vp, hash);
1415 			/*
1416 			 * If dotdot entry already exists, just retarget it
1417 			 * to new parent vnode, otherwise continue with new
1418 			 * namecache entry allocation.
1419 			 */
1420 			if ((ncp = dvp->v_cache_dd) != NULL &&
1421 			    ncp->nc_flag & NCF_ISDOTDOT) {
1422 				KASSERT(ncp->nc_dvp == dvp,
1423 				    ("wrong isdotdot parent"));
1424 				if (ncp->nc_vp != NULL) {
1425 					TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
1426 					    ncp, nc_dst);
1427 				} else {
1428 					cache_negative_remove(ncp, false);
1429 				}
1430 				if (vp != NULL) {
1431 					TAILQ_INSERT_HEAD(&vp->v_cache_dst,
1432 					    ncp, nc_dst);
1433 				} else {
1434 					cache_negative_insert(ncp);
1435 				}
1436 				ncp->nc_vp = vp;
1437 				cache_enter_unlock(&cel);
1438 				return;
1439 			}
1440 			dvp->v_cache_dd = NULL;
1441 			cache_enter_unlock(&cel);
1442 			cache_celockstate_init(&cel);
1443 			SDT_PROBE3(vfs, namecache, enter, done, dvp, "..", vp);
1444 			flag = NCF_ISDOTDOT;
1445 		}
1446 	}
1447 
1448 	/*
1449 	 * Calculate the hash key and setup as much of the new
1450 	 * namecache entry as possible before acquiring the lock.
1451 	 */
1452 	ncp = cache_alloc(cnp->cn_namelen, tsp != NULL);
1453 	ncp->nc_vp = vp;
1454 	ncp->nc_dvp = dvp;
1455 	ncp->nc_flag = flag;
1456 	if (tsp != NULL) {
1457 		n3 = (struct namecache_ts *)ncp;
1458 		n3->nc_time = *tsp;
1459 		n3->nc_ticks = ticks;
1460 		n3->nc_flag |= NCF_TS;
1461 		if (dtsp != NULL) {
1462 			n3->nc_dotdottime = *dtsp;
1463 			n3->nc_flag |= NCF_DTS;
1464 		}
1465 	}
1466 	len = ncp->nc_nlen = cnp->cn_namelen;
1467 	hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1468 	strlcpy(nc_get_name(ncp), cnp->cn_nameptr, len + 1);
1469 	cache_enter_lock(&cel, dvp, vp, hash);
1470 
1471 	/*
1472 	 * See if this vnode or negative entry is already in the cache
1473 	 * with this name.  This can happen with concurrent lookups of
1474 	 * the same path name.
1475 	 */
1476 	ncpp = NCHHASH(hash);
1477 	LIST_FOREACH(n2, ncpp, nc_hash) {
1478 		if (n2->nc_dvp == dvp &&
1479 		    n2->nc_nlen == cnp->cn_namelen &&
1480 		    !bcmp(nc_get_name(n2), cnp->cn_nameptr, n2->nc_nlen)) {
1481 			if (tsp != NULL) {
1482 				KASSERT((n2->nc_flag & NCF_TS) != 0,
1483 				    ("no NCF_TS"));
1484 				n3 = (struct namecache_ts *)n2;
1485 				n3->nc_time =
1486 				    ((struct namecache_ts *)ncp)->nc_time;
1487 				n3->nc_ticks =
1488 				    ((struct namecache_ts *)ncp)->nc_ticks;
1489 				if (dtsp != NULL) {
1490 					n3->nc_dotdottime =
1491 					    ((struct namecache_ts *)ncp)->
1492 					    nc_dotdottime;
1493 					n3->nc_flag |= NCF_DTS;
1494 				}
1495 			}
1496 			goto out_unlock_free;
1497 		}
1498 	}
1499 
1500 	if (flag == NCF_ISDOTDOT) {
1501 		/*
1502 		 * See if we are trying to add .. entry, but some other lookup
1503 		 * has populated v_cache_dd pointer already.
1504 		 */
1505 		if (dvp->v_cache_dd != NULL)
1506 			goto out_unlock_free;
1507 		KASSERT(vp == NULL || vp->v_type == VDIR,
1508 		    ("wrong vnode type %p", vp));
1509 		dvp->v_cache_dd = ncp;
1510 	}
1511 
1512 	atomic_add_rel_long(&numcache, 1);
1513 	if (vp != NULL) {
1514 		if (vp->v_type == VDIR) {
1515 			if (flag != NCF_ISDOTDOT) {
1516 				/*
1517 				 * For this case, the cache entry maps both the
1518 				 * directory name in it and the name ".." for the
1519 				 * directory's parent.
1520 				 */
1521 				if ((ndd = vp->v_cache_dd) != NULL) {
1522 					if ((ndd->nc_flag & NCF_ISDOTDOT) != 0)
1523 						cache_zap_locked(ndd, false);
1524 					else
1525 						ndd = NULL;
1526 				}
1527 				vp->v_cache_dd = ncp;
1528 			}
1529 		} else {
1530 			vp->v_cache_dd = NULL;
1531 		}
1532 	}
1533 
1534 	if (flag != NCF_ISDOTDOT) {
1535 		if (LIST_EMPTY(&dvp->v_cache_src)) {
1536 			vhold(dvp);
1537 			atomic_add_rel_long(&numcachehv, 1);
1538 		}
1539 		LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
1540 	}
1541 
1542 	/*
1543 	 * Insert the new namecache entry into the appropriate chain
1544 	 * within the cache entries table.
1545 	 */
1546 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
1547 
1548 	/*
1549 	 * If the entry is "negative", we place it into the
1550 	 * "negative" cache queue, otherwise, we place it into the
1551 	 * destination vnode's cache entries queue.
1552 	 */
1553 	if (vp != NULL) {
1554 		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
1555 		SDT_PROBE3(vfs, namecache, enter, done, dvp, nc_get_name(ncp),
1556 		    vp);
1557 	} else {
1558 		if (cnp->cn_flags & ISWHITEOUT)
1559 			ncp->nc_flag |= NCF_WHITE;
1560 		cache_negative_insert(ncp);
1561 		SDT_PROBE2(vfs, namecache, enter_negative, done, dvp,
1562 		    nc_get_name(ncp));
1563 	}
1564 	cache_enter_unlock(&cel);
1565 	if (numneg * ncnegfactor > numcache)
1566 		cache_negative_zap_one();
1567 	cache_free(ndd);
1568 	return;
1569 out_unlock_free:
1570 	cache_enter_unlock(&cel);
1571 	cache_free(ncp);
1572 	return;
1573 }
1574 
1575 static u_int
1576 cache_roundup_2(u_int val)
1577 {
1578 	u_int res;
1579 
1580 	for (res = 1; res <= val; res <<= 1)
1581 		continue;
1582 
1583 	return (res);
1584 }
1585 
1586 /*
1587  * Name cache initialization, from vfs_init() when we are booting
1588  */
1589 static void
1590 nchinit(void *dummy __unused)
1591 {
1592 	u_int i;
1593 
1594 	TAILQ_INIT(&ncneg);
1595 
1596 	cache_zone_small = uma_zcreate("S VFS Cache",
1597 	    sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1,
1598 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
1599 	cache_zone_small_ts = uma_zcreate("STS VFS Cache",
1600 	    sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1,
1601 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
1602 	cache_zone_large = uma_zcreate("L VFS Cache",
1603 	    sizeof(struct namecache) + NAME_MAX + 1,
1604 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
1605 	cache_zone_large_ts = uma_zcreate("LTS VFS Cache",
1606 	    sizeof(struct namecache_ts) + NAME_MAX + 1,
1607 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
1608 
1609 	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
1610 	numbucketlocks = cache_roundup_2(mp_ncpus * 64);
1611 	bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE,
1612 	    M_WAITOK | M_ZERO);
1613 	for (i = 0; i < numbucketlocks; i++)
1614 		rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE);
1615 	numvnodelocks = cache_roundup_2(mp_ncpus * 64);
1616 	vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE,
1617 	    M_WAITOK | M_ZERO);
1618 	for (i = 0; i < numvnodelocks; i++)
1619 		mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE);
1620 	ncpurgeminvnodes = numbucketlocks;
1621 
1622 	numcalls = counter_u64_alloc(M_WAITOK);
1623 	dothits = counter_u64_alloc(M_WAITOK);
1624 	dotdothits = counter_u64_alloc(M_WAITOK);
1625 	numchecks = counter_u64_alloc(M_WAITOK);
1626 	nummiss = counter_u64_alloc(M_WAITOK);
1627 	nummisszap = counter_u64_alloc(M_WAITOK);
1628 	numposzaps = counter_u64_alloc(M_WAITOK);
1629 	numposhits = counter_u64_alloc(M_WAITOK);
1630 	numnegzaps = counter_u64_alloc(M_WAITOK);
1631 	numneghits = counter_u64_alloc(M_WAITOK);
1632 	numfullpathcalls = counter_u64_alloc(M_WAITOK);
1633 	numfullpathfail1 = counter_u64_alloc(M_WAITOK);
1634 	numfullpathfail2 = counter_u64_alloc(M_WAITOK);
1635 	numfullpathfail4 = counter_u64_alloc(M_WAITOK);
1636 	numfullpathfound = counter_u64_alloc(M_WAITOK);
1637 }
1638 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
1639 
1640 void
1641 cache_changesize(int newmaxvnodes)
1642 {
1643 	struct nchashhead *new_nchashtbl, *old_nchashtbl;
1644 	u_long new_nchash, old_nchash;
1645 	struct namecache *ncp;
1646 	uint32_t hash;
1647 	int i;
1648 
1649 	new_nchashtbl = hashinit(newmaxvnodes * 2, M_VFSCACHE, &new_nchash);
1650 	/* If same hash table size, nothing to do */
1651 	if (nchash == new_nchash) {
1652 		free(new_nchashtbl, M_VFSCACHE);
1653 		return;
1654 	}
1655 	/*
1656 	 * Move everything from the old hash table to the new table.
1657 	 * None of the namecache entries in the table can be removed
1658 	 * because to do so, they have to be removed from the hash table.
1659 	 */
1660 	cache_lock_all_vnodes();
1661 	cache_lock_all_buckets();
1662 	old_nchashtbl = nchashtbl;
1663 	old_nchash = nchash;
1664 	nchashtbl = new_nchashtbl;
1665 	nchash = new_nchash;
1666 	for (i = 0; i <= old_nchash; i++) {
1667 		while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) {
1668 			hash = cache_get_hash(nc_get_name(ncp), ncp->nc_nlen,
1669 			    ncp->nc_dvp);
1670 			LIST_REMOVE(ncp, nc_hash);
1671 			LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
1672 		}
1673 	}
1674 	cache_unlock_all_buckets();
1675 	cache_unlock_all_vnodes();
1676 	free(old_nchashtbl, M_VFSCACHE);
1677 }
1678 
1679 /*
1680  * Invalidate all entries to a particular vnode.
1681  */
1682 void
1683 cache_purge(struct vnode *vp)
1684 {
1685 	TAILQ_HEAD(, namecache) ncps;
1686 	struct namecache *ncp, *nnp;
1687 	struct mtx *vlp, *vlp2;
1688 
1689 	CTR1(KTR_VFS, "cache_purge(%p)", vp);
1690 	SDT_PROBE1(vfs, namecache, purge, done, vp);
1691 	if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
1692 	    vp->v_cache_dd == NULL)
1693 		return;
1694 	TAILQ_INIT(&ncps);
1695 	vlp = VP2VNODELOCK(vp);
1696 	vlp2 = NULL;
1697 	mtx_lock(vlp);
1698 retry:
1699 	while (!LIST_EMPTY(&vp->v_cache_src)) {
1700 		ncp = LIST_FIRST(&vp->v_cache_src);
1701 		if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
1702 			goto retry;
1703 		TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1704 	}
1705 	while (!TAILQ_EMPTY(&vp->v_cache_dst)) {
1706 		ncp = TAILQ_FIRST(&vp->v_cache_dst);
1707 		if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
1708 			goto retry;
1709 		TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1710 	}
1711 	ncp = vp->v_cache_dd;
1712 	if (ncp != NULL) {
1713 		KASSERT(ncp->nc_flag & NCF_ISDOTDOT,
1714 		   ("lost dotdot link"));
1715 		if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
1716 			goto retry;
1717 		TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1718 	}
1719 	KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
1720 	mtx_unlock(vlp);
1721 	if (vlp2 != NULL)
1722 		mtx_unlock(vlp2);
1723 	TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
1724 		cache_free(ncp);
1725 	}
1726 }
1727 
1728 /*
1729  * Invalidate all negative entries for a particular directory vnode.
1730  */
1731 void
1732 cache_purge_negative(struct vnode *vp)
1733 {
1734 	TAILQ_HEAD(, namecache) ncps;
1735 	struct namecache *ncp, *nnp;
1736 	struct mtx *vlp;
1737 
1738 	CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
1739 	SDT_PROBE1(vfs, namecache, purge_negative, done, vp);
1740 	TAILQ_INIT(&ncps);
1741 	vlp = VP2VNODELOCK(vp);
1742 	mtx_lock(vlp);
1743 	LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) {
1744 		if (ncp->nc_vp != NULL)
1745 			continue;
1746 		cache_zap_negative_locked_vnode_kl(ncp, vp);
1747 		TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1748 	}
1749 	mtx_unlock(vlp);
1750 	TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
1751 		cache_free(ncp);
1752 	}
1753 }
1754 
1755 /*
1756  * Flush all entries referencing a particular filesystem.
1757  */
1758 void
1759 cache_purgevfs(struct mount *mp, bool force)
1760 {
1761 	TAILQ_HEAD(, namecache) ncps;
1762 	struct mtx *vlp1, *vlp2;
1763 	struct rwlock *blp;
1764 	struct nchashhead *bucket;
1765 	struct namecache *ncp, *nnp;
1766 	u_long i, j, n_nchash;
1767 	int error;
1768 
1769 	/* Scan hash tables for applicable entries */
1770 	SDT_PROBE1(vfs, namecache, purgevfs, done, mp);
1771 	if (!force && mp->mnt_nvnodelistsize <= ncpurgeminvnodes)
1772 		return;
1773 	TAILQ_INIT(&ncps);
1774 	n_nchash = nchash + 1;
1775 	vlp1 = vlp2 = NULL;
1776 	for (i = 0; i < numbucketlocks; i++) {
1777 		blp = (struct rwlock *)&bucketlocks[i];
1778 		rw_wlock(blp);
1779 		for (j = i; j < n_nchash; j += numbucketlocks) {
1780 retry:
1781 			bucket = &nchashtbl[j];
1782 			LIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) {
1783 				cache_assert_bucket_locked(ncp, RA_WLOCKED);
1784 				if (ncp->nc_dvp->v_mount != mp)
1785 					continue;
1786 				error = cache_zap_wlocked_bucket_kl(ncp, blp,
1787 				    &vlp1, &vlp2);
1788 				if (error != 0)
1789 					goto retry;
1790 				TAILQ_INSERT_HEAD(&ncps, ncp, nc_dst);
1791 			}
1792 		}
1793 		rw_wunlock(blp);
1794 		if (vlp1 == NULL && vlp2 == NULL)
1795 			cache_maybe_yield();
1796 	}
1797 	if (vlp1 != NULL)
1798 		mtx_unlock(vlp1);
1799 	if (vlp2 != NULL)
1800 		mtx_unlock(vlp2);
1801 
1802 	TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
1803 		cache_free(ncp);
1804 	}
1805 }
1806 
1807 /*
1808  * Perform canonical checks and cache lookup and pass on to filesystem
1809  * through the vop_cachedlookup only if needed.
1810  */
1811 
1812 int
1813 vfs_cache_lookup(struct vop_lookup_args *ap)
1814 {
1815 	struct vnode *dvp;
1816 	int error;
1817 	struct vnode **vpp = ap->a_vpp;
1818 	struct componentname *cnp = ap->a_cnp;
1819 	struct ucred *cred = cnp->cn_cred;
1820 	int flags = cnp->cn_flags;
1821 	struct thread *td = cnp->cn_thread;
1822 
1823 	*vpp = NULL;
1824 	dvp = ap->a_dvp;
1825 
1826 	if (dvp->v_type != VDIR)
1827 		return (ENOTDIR);
1828 
1829 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
1830 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
1831 		return (EROFS);
1832 
1833 	error = VOP_ACCESS(dvp, VEXEC, cred, td);
1834 	if (error)
1835 		return (error);
1836 
1837 	error = cache_lookup(dvp, vpp, cnp, NULL, NULL);
1838 	if (error == 0)
1839 		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
1840 	if (error == -1)
1841 		return (0);
1842 	return (error);
1843 }
1844 
1845 /*
1846  * XXX All of these sysctls would probably be more productive dead.
1847  */
1848 static int disablecwd;
1849 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
1850    "Disable the getcwd syscall");
1851 
1852 /* Implementation of the getcwd syscall. */
1853 int
1854 sys___getcwd(struct thread *td, struct __getcwd_args *uap)
1855 {
1856 
1857 	return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen,
1858 	    MAXPATHLEN));
1859 }
1860 
1861 int
1862 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, u_int buflen,
1863     u_int path_max)
1864 {
1865 	char *bp, *tmpbuf;
1866 	struct filedesc *fdp;
1867 	struct vnode *cdir, *rdir;
1868 	int error;
1869 
1870 	if (disablecwd)
1871 		return (ENODEV);
1872 	if (buflen < 2)
1873 		return (EINVAL);
1874 	if (buflen > path_max)
1875 		buflen = path_max;
1876 
1877 	tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
1878 	fdp = td->td_proc->p_fd;
1879 	FILEDESC_SLOCK(fdp);
1880 	cdir = fdp->fd_cdir;
1881 	VREF(cdir);
1882 	rdir = fdp->fd_rdir;
1883 	VREF(rdir);
1884 	FILEDESC_SUNLOCK(fdp);
1885 	error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
1886 	vrele(rdir);
1887 	vrele(cdir);
1888 
1889 	if (!error) {
1890 		if (bufseg == UIO_SYSSPACE)
1891 			bcopy(bp, buf, strlen(bp) + 1);
1892 		else
1893 			error = copyout(bp, buf, strlen(bp) + 1);
1894 #ifdef KTRACE
1895 	if (KTRPOINT(curthread, KTR_NAMEI))
1896 		ktrnamei(bp);
1897 #endif
1898 	}
1899 	free(tmpbuf, M_TEMP);
1900 	return (error);
1901 }
1902 
1903 /*
1904  * Thus begins the fullpath magic.
1905  */
1906 
1907 static int disablefullpath;
1908 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
1909     "Disable the vn_fullpath function");
1910 
1911 /*
1912  * Retrieve the full filesystem path that correspond to a vnode from the name
1913  * cache (if available)
1914  */
1915 int
1916 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
1917 {
1918 	char *buf;
1919 	struct filedesc *fdp;
1920 	struct vnode *rdir;
1921 	int error;
1922 
1923 	if (disablefullpath)
1924 		return (ENODEV);
1925 	if (vn == NULL)
1926 		return (EINVAL);
1927 
1928 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1929 	fdp = td->td_proc->p_fd;
1930 	FILEDESC_SLOCK(fdp);
1931 	rdir = fdp->fd_rdir;
1932 	VREF(rdir);
1933 	FILEDESC_SUNLOCK(fdp);
1934 	error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
1935 	vrele(rdir);
1936 
1937 	if (!error)
1938 		*freebuf = buf;
1939 	else
1940 		free(buf, M_TEMP);
1941 	return (error);
1942 }
1943 
1944 /*
1945  * This function is similar to vn_fullpath, but it attempts to lookup the
1946  * pathname relative to the global root mount point.  This is required for the
1947  * auditing sub-system, as audited pathnames must be absolute, relative to the
1948  * global root mount point.
1949  */
1950 int
1951 vn_fullpath_global(struct thread *td, struct vnode *vn,
1952     char **retbuf, char **freebuf)
1953 {
1954 	char *buf;
1955 	int error;
1956 
1957 	if (disablefullpath)
1958 		return (ENODEV);
1959 	if (vn == NULL)
1960 		return (EINVAL);
1961 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1962 	error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
1963 	if (!error)
1964 		*freebuf = buf;
1965 	else
1966 		free(buf, M_TEMP);
1967 	return (error);
1968 }
1969 
1970 int
1971 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
1972 {
1973 	struct vnode *dvp;
1974 	struct namecache *ncp;
1975 	struct mtx *vlp;
1976 	int error;
1977 
1978 	vlp = VP2VNODELOCK(*vp);
1979 	mtx_lock(vlp);
1980 	TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
1981 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1982 			break;
1983 	}
1984 	if (ncp != NULL) {
1985 		if (*buflen < ncp->nc_nlen) {
1986 			mtx_unlock(vlp);
1987 			vrele(*vp);
1988 			counter_u64_add(numfullpathfail4, 1);
1989 			error = ENOMEM;
1990 			SDT_PROBE3(vfs, namecache, fullpath, return, error,
1991 			    vp, NULL);
1992 			return (error);
1993 		}
1994 		*buflen -= ncp->nc_nlen;
1995 		memcpy(buf + *buflen, nc_get_name(ncp), ncp->nc_nlen);
1996 		SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp,
1997 		    nc_get_name(ncp), vp);
1998 		dvp = *vp;
1999 		*vp = ncp->nc_dvp;
2000 		vref(*vp);
2001 		mtx_unlock(vlp);
2002 		vrele(dvp);
2003 		return (0);
2004 	}
2005 	SDT_PROBE1(vfs, namecache, fullpath, miss, vp);
2006 
2007 	mtx_unlock(vlp);
2008 	vn_lock(*vp, LK_SHARED | LK_RETRY);
2009 	error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
2010 	vput(*vp);
2011 	if (error) {
2012 		counter_u64_add(numfullpathfail2, 1);
2013 		SDT_PROBE3(vfs, namecache, fullpath, return,  error, vp, NULL);
2014 		return (error);
2015 	}
2016 
2017 	*vp = dvp;
2018 	if (dvp->v_iflag & VI_DOOMED) {
2019 		/* forced unmount */
2020 		vrele(dvp);
2021 		error = ENOENT;
2022 		SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
2023 		return (error);
2024 	}
2025 	/*
2026 	 * *vp has its use count incremented still.
2027 	 */
2028 
2029 	return (0);
2030 }
2031 
2032 /*
2033  * The magic behind kern___getcwd() and vn_fullpath().
2034  */
2035 static int
2036 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
2037     char *buf, char **retbuf, u_int buflen)
2038 {
2039 	int error, slash_prefixed;
2040 #ifdef KDTRACE_HOOKS
2041 	struct vnode *startvp = vp;
2042 #endif
2043 	struct vnode *vp1;
2044 
2045 	buflen--;
2046 	buf[buflen] = '\0';
2047 	error = 0;
2048 	slash_prefixed = 0;
2049 
2050 	SDT_PROBE1(vfs, namecache, fullpath, entry, vp);
2051 	counter_u64_add(numfullpathcalls, 1);
2052 	vref(vp);
2053 	if (vp->v_type != VDIR) {
2054 		error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2055 		if (error)
2056 			return (error);
2057 		if (buflen == 0) {
2058 			vrele(vp);
2059 			return (ENOMEM);
2060 		}
2061 		buf[--buflen] = '/';
2062 		slash_prefixed = 1;
2063 	}
2064 	while (vp != rdir && vp != rootvnode) {
2065 		if (vp->v_vflag & VV_ROOT) {
2066 			if (vp->v_iflag & VI_DOOMED) {	/* forced unmount */
2067 				vrele(vp);
2068 				error = ENOENT;
2069 				SDT_PROBE3(vfs, namecache, fullpath, return,
2070 				    error, vp, NULL);
2071 				break;
2072 			}
2073 			vp1 = vp->v_mount->mnt_vnodecovered;
2074 			vref(vp1);
2075 			vrele(vp);
2076 			vp = vp1;
2077 			continue;
2078 		}
2079 		if (vp->v_type != VDIR) {
2080 			vrele(vp);
2081 			counter_u64_add(numfullpathfail1, 1);
2082 			error = ENOTDIR;
2083 			SDT_PROBE3(vfs, namecache, fullpath, return,
2084 			    error, vp, NULL);
2085 			break;
2086 		}
2087 		error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2088 		if (error)
2089 			break;
2090 		if (buflen == 0) {
2091 			vrele(vp);
2092 			error = ENOMEM;
2093 			SDT_PROBE3(vfs, namecache, fullpath, return, error,
2094 			    startvp, NULL);
2095 			break;
2096 		}
2097 		buf[--buflen] = '/';
2098 		slash_prefixed = 1;
2099 	}
2100 	if (error)
2101 		return (error);
2102 	if (!slash_prefixed) {
2103 		if (buflen == 0) {
2104 			vrele(vp);
2105 			counter_u64_add(numfullpathfail4, 1);
2106 			SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM,
2107 			    startvp, NULL);
2108 			return (ENOMEM);
2109 		}
2110 		buf[--buflen] = '/';
2111 	}
2112 	counter_u64_add(numfullpathfound, 1);
2113 	vrele(vp);
2114 
2115 	SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen);
2116 	*retbuf = buf + buflen;
2117 	return (0);
2118 }
2119 
2120 struct vnode *
2121 vn_dir_dd_ino(struct vnode *vp)
2122 {
2123 	struct namecache *ncp;
2124 	struct vnode *ddvp;
2125 	struct mtx *vlp;
2126 
2127 	ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino");
2128 	vlp = VP2VNODELOCK(vp);
2129 	mtx_lock(vlp);
2130 	TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) {
2131 		if ((ncp->nc_flag & NCF_ISDOTDOT) != 0)
2132 			continue;
2133 		ddvp = ncp->nc_dvp;
2134 		vhold(ddvp);
2135 		mtx_unlock(vlp);
2136 		if (vget(ddvp, LK_SHARED | LK_NOWAIT | LK_VNHELD, curthread))
2137 			return (NULL);
2138 		return (ddvp);
2139 	}
2140 	mtx_unlock(vlp);
2141 	return (NULL);
2142 }
2143 
2144 int
2145 vn_commname(struct vnode *vp, char *buf, u_int buflen)
2146 {
2147 	struct namecache *ncp;
2148 	struct mtx *vlp;
2149 	int l;
2150 
2151 	vlp = VP2VNODELOCK(vp);
2152 	mtx_lock(vlp);
2153 	TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
2154 		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2155 			break;
2156 	if (ncp == NULL) {
2157 		mtx_unlock(vlp);
2158 		return (ENOENT);
2159 	}
2160 	l = min(ncp->nc_nlen, buflen - 1);
2161 	memcpy(buf, nc_get_name(ncp), l);
2162 	mtx_unlock(vlp);
2163 	buf[l] = '\0';
2164 	return (0);
2165 }
2166 
2167 /* ABI compat shims for old kernel modules. */
2168 #undef cache_enter
2169 
2170 void	cache_enter(struct vnode *dvp, struct vnode *vp,
2171 	    struct componentname *cnp);
2172 
2173 void
2174 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2175 {
2176 
2177 	cache_enter_time(dvp, vp, cnp, NULL, NULL);
2178 }
2179 
2180 /*
2181  * This function updates path string to vnode's full global path
2182  * and checks the size of the new path string against the pathlen argument.
2183  *
2184  * Requires a locked, referenced vnode.
2185  * Vnode is re-locked on success or ENODEV, otherwise unlocked.
2186  *
2187  * If sysctl debug.disablefullpath is set, ENODEV is returned,
2188  * vnode is left locked and path remain untouched.
2189  *
2190  * If vp is a directory, the call to vn_fullpath_global() always succeeds
2191  * because it falls back to the ".." lookup if the namecache lookup fails.
2192  */
2193 int
2194 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path,
2195     u_int pathlen)
2196 {
2197 	struct nameidata nd;
2198 	struct vnode *vp1;
2199 	char *rpath, *fbuf;
2200 	int error;
2201 
2202 	ASSERT_VOP_ELOCKED(vp, __func__);
2203 
2204 	/* Return ENODEV if sysctl debug.disablefullpath==1 */
2205 	if (disablefullpath)
2206 		return (ENODEV);
2207 
2208 	/* Construct global filesystem path from vp. */
2209 	VOP_UNLOCK(vp, 0);
2210 	error = vn_fullpath_global(td, vp, &rpath, &fbuf);
2211 
2212 	if (error != 0) {
2213 		vrele(vp);
2214 		return (error);
2215 	}
2216 
2217 	if (strlen(rpath) >= pathlen) {
2218 		vrele(vp);
2219 		error = ENAMETOOLONG;
2220 		goto out;
2221 	}
2222 
2223 	/*
2224 	 * Re-lookup the vnode by path to detect a possible rename.
2225 	 * As a side effect, the vnode is relocked.
2226 	 * If vnode was renamed, return ENOENT.
2227 	 */
2228 	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
2229 	    UIO_SYSSPACE, path, td);
2230 	error = namei(&nd);
2231 	if (error != 0) {
2232 		vrele(vp);
2233 		goto out;
2234 	}
2235 	NDFREE(&nd, NDF_ONLY_PNBUF);
2236 	vp1 = nd.ni_vp;
2237 	vrele(vp);
2238 	if (vp1 == vp)
2239 		strcpy(path, rpath);
2240 	else {
2241 		vput(vp1);
2242 		error = ENOENT;
2243 	}
2244 
2245 out:
2246 	free(fbuf, M_TEMP);
2247 	return (error);
2248 }
2249