xref: /illumos-gate/usr/src/uts/common/fs/dnlc.c (revision f012ee0c3db17469b492c2cf757226f3d7b1ebbc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2017 by Delphix. All rights reserved.
24  */
25 
26 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 /*
30  * University Copyright- Copyright (c) 1982, 1986, 1988
31  * The Regents of the University of California
32  * All Rights Reserved
33  *
34  * University Acknowledgment- Portions of this document are derived from
35  * software developed by the University of California, Berkeley, and its
36  * contributors.
37  */
38 
39 #include <sys/types.h>
40 #include <sys/systm.h>
41 #include <sys/param.h>
42 #include <sys/t_lock.h>
43 #include <sys/systm.h>
44 #include <sys/vfs.h>
45 #include <sys/vnode.h>
46 #include <sys/dnlc.h>
47 #include <sys/kmem.h>
48 #include <sys/cmn_err.h>
49 #include <sys/vtrace.h>
50 #include <sys/bitmap.h>
51 #include <sys/var.h>
52 #include <sys/sysmacros.h>
53 #include <sys/kstat.h>
54 #include <sys/atomic.h>
55 #include <sys/taskq.h>
56 
57 /*
58  * Directory name lookup cache.
59  * Based on code originally done by Robert Elz at Melbourne.
60  *
61  * Names found by directory scans are retained in a cache
62  * for future reference.  Each hash chain is ordered by LRU
63  * Cache is indexed by hash value obtained from (vp, name)
64  * where the vp refers to the directory containing the name.
65  */
66 
67 /*
68  * We want to be able to identify files that are referenced only by the DNLC.
69  * When adding a reference from the DNLC, call VN_HOLD_DNLC instead of VN_HOLD,
70  * since multiple DNLC references should only be counted once in v_count. This
71  * file contains only two(2) calls to VN_HOLD, renamed VN_HOLD_CALLER in the
72  * hope that no one will mistakenly add a VN_HOLD to this file. (Unfortunately
73  * it is not possible to #undef VN_HOLD and retain VN_HOLD_CALLER. Ideally a
74  * Makefile rule would grep uncommented C tokens to check that VN_HOLD is
75  * referenced only once in this file, to define VN_HOLD_CALLER.)
76  */
77 #define	VN_HOLD_CALLER	VN_HOLD
78 #define	VN_HOLD_DNLC(vp)	{	\
79 	mutex_enter(&(vp)->v_lock);	\
80 	if ((vp)->v_count_dnlc == 0) {	\
81 		VN_HOLD_LOCKED(vp);	\
82 	}				\
83 	(vp)->v_count_dnlc++;		\
84 	mutex_exit(&(vp)->v_lock);	\
85 }
86 #define	VN_RELE_DNLC(vp)	{	\
87 	vn_rele_dnlc(vp);		\
88 }
89 
90 /*
91  * Tunable nc_hashavelen is the average length desired for this chain, from
92  * which the size of the nc_hash table is derived at create time.
93  */
94 #define	NC_HASHAVELEN_DEFAULT	4
95 int nc_hashavelen = NC_HASHAVELEN_DEFAULT;
96 
97 /*
98  * NC_MOVETOFRONT is the move-to-front threshold: if the hash lookup
99  * depth exceeds this value, we move the looked-up entry to the front of
100  * its hash chain.  The idea is to make sure that the most frequently
101  * accessed entries are found most quickly (by keeping them near the
102  * front of their hash chains).
103  */
104 #define	NC_MOVETOFRONT	2
105 
106 /*
107  *
108  * DNLC_MAX_RELE is used to size an array on the stack when releasing
109  * vnodes. This array is used rather than calling VN_RELE() inline because
110  * all dnlc locks must be dropped by that time in order to avoid a
111  * possible deadlock. This deadlock occurs when the dnlc holds the last
112  * reference to the vnode and so the VOP_INACTIVE vector is called which
113  * can in turn call back into the dnlc. A global array was used but had
114  * many problems:
115  *	1) Actually doesn't have an upper bound on the array size as
116  *	   entries can be added after starting the purge.
117  *	2) The locking scheme causes a hang.
118  *	3) Caused serialisation on the global lock.
119  *	4) The array was often unnecessarily huge.
120  *
121  * Note the current value 8 allows up to 4 cache entries (to be purged
122  * from each hash chain), before having to cycle around and retry.
123  * This ought to be ample given that nc_hashavelen is typically very small.
124  */
125 #define	DNLC_MAX_RELE	8 /* must be even */
126 
127 /*
128  * Hash table of name cache entries for fast lookup, dynamically
129  * allocated at startup.
130  */
131 nc_hash_t *nc_hash;
132 
133 /*
134  * Rotors. Used to select entries on a round-robin basis.
135  */
136 static nc_hash_t *dnlc_purge_fs1_rotor;
137 static nc_hash_t *dnlc_free_rotor;
138 
139 /*
140  * # of dnlc entries (uninitialized)
141  *
142  * the initial value was chosen as being
143  * a random string of bits, probably not
144  * normally chosen by a systems administrator
145  */
146 int ncsize = -1;
147 volatile uint32_t dnlc_nentries = 0;	/* current num of name cache entries */
148 static int nc_hashsz;			/* size of hash table */
149 static int nc_hashmask;			/* size of hash table minus 1 */
150 
151 /*
152  * The dnlc_reduce_cache() taskq queue is activated when there are
153  * ncsize name cache entries and if no parameter is provided, it reduces
154  * the size down to dnlc_nentries_low_water, which is by default one
155  * hundreth less (or 99%) of ncsize.
156  *
157  * If a parameter is provided to dnlc_reduce_cache(), then we reduce
158  * the size down based on ncsize_onepercent - where ncsize_onepercent
159  * is 1% of ncsize; however, we never let dnlc_reduce_cache() reduce
160  * the size below 3% of ncsize (ncsize_min_percent).
161  */
162 #define	DNLC_LOW_WATER_DIVISOR_DEFAULT 100
163 uint_t dnlc_low_water_divisor = DNLC_LOW_WATER_DIVISOR_DEFAULT;
164 uint_t dnlc_nentries_low_water;
165 int dnlc_reduce_idle = 1; /* no locking needed */
166 uint_t ncsize_onepercent;
167 uint_t ncsize_min_percent;
168 
169 /*
170  * If dnlc_nentries hits dnlc_max_nentries (twice ncsize)
171  * then this means the dnlc_reduce_cache() taskq is failing to
172  * keep up. In this case we refuse to add new entries to the dnlc
173  * until the taskq catches up.
174  */
175 uint_t dnlc_max_nentries; /* twice ncsize */
176 uint64_t dnlc_max_nentries_cnt = 0; /* statistic on times we failed */
177 
178 /*
179  * Tunable to define when we should just remove items from
180  * the end of the chain.
181  */
182 #define	DNLC_LONG_CHAIN 8
183 uint_t dnlc_long_chain = DNLC_LONG_CHAIN;
184 
185 /*
186  * ncstats has been deprecated, due to the integer size of the counters
187  * which can easily overflow in the dnlc.
188  * It is maintained (at some expense) for compatability.
189  * The preferred interface is the kstat accessible nc_stats below.
190  */
191 struct ncstats ncstats;
192 
193 struct nc_stats ncs = {
194 	{ "hits",			KSTAT_DATA_UINT64 },
195 	{ "misses",			KSTAT_DATA_UINT64 },
196 	{ "negative_cache_hits",	KSTAT_DATA_UINT64 },
197 	{ "enters",			KSTAT_DATA_UINT64 },
198 	{ "double_enters",		KSTAT_DATA_UINT64 },
199 	{ "purge_total_entries",	KSTAT_DATA_UINT64 },
200 	{ "purge_all",			KSTAT_DATA_UINT64 },
201 	{ "purge_vp",			KSTAT_DATA_UINT64 },
202 	{ "purge_vfs",			KSTAT_DATA_UINT64 },
203 	{ "purge_fs1",			KSTAT_DATA_UINT64 },
204 	{ "pick_free",			KSTAT_DATA_UINT64 },
205 	{ "pick_heuristic",		KSTAT_DATA_UINT64 },
206 	{ "pick_last",			KSTAT_DATA_UINT64 },
207 
208 	/* directory caching stats */
209 
210 	{ "dir_hits",			KSTAT_DATA_UINT64 },
211 	{ "dir_misses",			KSTAT_DATA_UINT64 },
212 	{ "dir_cached_current",		KSTAT_DATA_UINT64 },
213 	{ "dir_entries_cached_current",	KSTAT_DATA_UINT64 },
214 	{ "dir_cached_total",		KSTAT_DATA_UINT64 },
215 	{ "dir_start_no_memory",	KSTAT_DATA_UINT64 },
216 	{ "dir_add_no_memory",		KSTAT_DATA_UINT64 },
217 	{ "dir_add_abort",		KSTAT_DATA_UINT64 },
218 	{ "dir_add_max",		KSTAT_DATA_UINT64 },
219 	{ "dir_remove_entry_fail",	KSTAT_DATA_UINT64 },
220 	{ "dir_remove_space_fail",	KSTAT_DATA_UINT64 },
221 	{ "dir_update_fail",		KSTAT_DATA_UINT64 },
222 	{ "dir_fini_purge",		KSTAT_DATA_UINT64 },
223 	{ "dir_reclaim_last",		KSTAT_DATA_UINT64 },
224 	{ "dir_reclaim_any",		KSTAT_DATA_UINT64 },
225 };
226 
227 static int doingcache = 1;
228 
229 vnode_t negative_cache_vnode;
230 
231 /*
232  * Insert entry at the front of the queue
233  */
234 #define	nc_inshash(ncp, hp) \
235 { \
236 	(ncp)->hash_next = (hp)->hash_next; \
237 	(ncp)->hash_prev = (ncache_t *)(hp); \
238 	(hp)->hash_next->hash_prev = (ncp); \
239 	(hp)->hash_next = (ncp); \
240 }
241 
242 /*
243  * Remove entry from hash queue
244  */
245 #define	nc_rmhash(ncp) \
246 { \
247 	(ncp)->hash_prev->hash_next = (ncp)->hash_next; \
248 	(ncp)->hash_next->hash_prev = (ncp)->hash_prev; \
249 	(ncp)->hash_prev = NULL; \
250 	(ncp)->hash_next = NULL; \
251 }
252 
253 /*
254  * Free an entry.
255  */
256 #define	dnlc_free(ncp) \
257 { \
258 	kmem_free((ncp), sizeof (ncache_t) + (ncp)->namlen); \
259 	atomic_dec_32(&dnlc_nentries); \
260 }
261 
262 
263 /*
264  * Cached directory info.
265  * ======================
266  */
267 
268 /*
269  * Cached directory free space hash function.
270  * Needs the free space handle and the dcp to get the hash table size
271  * Returns the hash index.
272  */
273 #define	DDFHASH(handle, dcp) ((handle >> 2) & (dcp)->dc_fhash_mask)
274 
275 /*
276  * Cached directory name entry hash function.
277  * Uses the name and returns in the input arguments the hash and the name
278  * length.
279  */
280 #define	DNLC_DIR_HASH(name, hash, namelen)			\
281 	{							\
282 		char Xc;					\
283 		const char *Xcp;				\
284 		hash = *name;					\
285 		for (Xcp = (name + 1); (Xc = *Xcp) != 0; Xcp++)	\
286 			hash = (hash << 4) + hash + Xc;		\
287 		ASSERT((Xcp - (name)) <= ((1 << NBBY) - 1));	\
288 		namelen = Xcp - (name);				\
289 	}
290 
291 /* special dircache_t pointer to indicate error should be returned */
292 /*
293  * The anchor directory cache pointer can contain 3 types of values,
294  * 1) NULL: No directory cache
295  * 2) DC_RET_LOW_MEM (-1): There was a directory cache that found to be
296  *    too big or a memory shortage occurred. This value remains in the
297  *    pointer until a dnlc_dir_start() which returns the a DNOMEM error.
298  *    This is kludgy but efficient and only visible in this source file.
299  * 3) A valid cache pointer.
300  */
301 #define	DC_RET_LOW_MEM (dircache_t *)1
302 #define	VALID_DIR_CACHE(dcp) ((dircache_t *)(dcp) > DC_RET_LOW_MEM)
303 
304 /* Tunables */
305 uint_t dnlc_dir_enable = 1; /* disable caching directories by setting to 0 */
306 uint_t dnlc_dir_min_size = 40; /* min no of directory entries before caching */
307 uint_t dnlc_dir_max_size = UINT_MAX; /* ditto maximum */
308 uint_t dnlc_dir_hash_size_shift = 3; /* 8 entries per hash bucket */
309 uint_t dnlc_dir_min_reclaim =  350000; /* approx 1MB of dcentrys */
310 /*
311  * dnlc_dir_hash_resize_shift determines when the hash tables
312  * get re-adjusted due to growth or shrinkage
313  * - currently 2 indicating that there can be at most 4
314  * times or at least one quarter the number of entries
315  * before hash table readjustment. Note that with
316  * dnlc_dir_hash_size_shift above set at 3 this would
317  * mean readjustment would occur if the average number
318  * of entries went above 32 or below 2
319  */
320 uint_t dnlc_dir_hash_resize_shift = 2; /* readjust rate */
321 
322 static kmem_cache_t *dnlc_dir_space_cache; /* free space entry cache */
323 static dchead_t dc_head; /* anchor of cached directories */
324 
325 /* Prototypes */
326 static ncache_t *dnlc_get(uchar_t namlen);
327 static ncache_t *dnlc_search(vnode_t *dp, const char *name, uchar_t namlen,
328     int hash);
329 static void dnlc_dir_reclaim(void *unused);
330 static void dnlc_dir_abort(dircache_t *dcp);
331 static void dnlc_dir_adjust_fhash(dircache_t *dcp);
332 static void dnlc_dir_adjust_nhash(dircache_t *dcp);
333 static void do_dnlc_reduce_cache(void *);
334 
335 
336 /*
337  * Initialize the directory cache.
338  */
339 void
340 dnlc_init()
341 {
342 	nc_hash_t *hp;
343 	kstat_t *ksp;
344 	int i;
345 
346 	/*
347 	 * Set up the size of the dnlc (ncsize) and its low water mark.
348 	 */
349 	if (ncsize == -1) {
350 		/* calculate a reasonable size for the low water */
351 		dnlc_nentries_low_water = 4 * (v.v_proc + maxusers) + 320;
352 		ncsize = dnlc_nentries_low_water +
353 		    (dnlc_nentries_low_water / dnlc_low_water_divisor);
354 	} else {
355 		/* don't change the user specified ncsize */
356 		dnlc_nentries_low_water =
357 		    ncsize - (ncsize / dnlc_low_water_divisor);
358 	}
359 	if (ncsize <= 0) {
360 		doingcache = 0;
361 		dnlc_dir_enable = 0; /* also disable directory caching */
362 		ncsize = 0;
363 		cmn_err(CE_NOTE, "name cache (dnlc) disabled");
364 		return;
365 	}
366 	dnlc_max_nentries = ncsize * 2;
367 	ncsize_onepercent = ncsize / 100;
368 	ncsize_min_percent = ncsize_onepercent * 3;
369 
370 	/*
371 	 * Initialise the hash table.
372 	 * Compute hash size rounding to the next power of two.
373 	 */
374 	nc_hashsz = ncsize / nc_hashavelen;
375 	nc_hashsz = 1 << highbit(nc_hashsz);
376 	nc_hashmask = nc_hashsz - 1;
377 	nc_hash = kmem_zalloc(nc_hashsz * sizeof (*nc_hash), KM_SLEEP);
378 	for (i = 0; i < nc_hashsz; i++) {
379 		hp = (nc_hash_t *)&nc_hash[i];
380 		mutex_init(&hp->hash_lock, NULL, MUTEX_DEFAULT, NULL);
381 		hp->hash_next = (ncache_t *)hp;
382 		hp->hash_prev = (ncache_t *)hp;
383 	}
384 
385 	/*
386 	 * Initialize rotors
387 	 */
388 	dnlc_free_rotor = dnlc_purge_fs1_rotor = &nc_hash[0];
389 
390 	/*
391 	 * Set up the directory caching to use kmem_cache_alloc
392 	 * for its free space entries so that we can get a callback
393 	 * when the system is short on memory, to allow us to free
394 	 * up some memory. we don't use the constructor/deconstructor
395 	 * functions.
396 	 */
397 	dnlc_dir_space_cache = kmem_cache_create("dnlc_space_cache",
398 	    sizeof (dcfree_t), 0, NULL, NULL, dnlc_dir_reclaim, NULL,
399 	    NULL, 0);
400 
401 	/*
402 	 * Initialise the head of the cached directory structures
403 	 */
404 	mutex_init(&dc_head.dch_lock, NULL, MUTEX_DEFAULT, NULL);
405 	dc_head.dch_next = (dircache_t *)&dc_head;
406 	dc_head.dch_prev = (dircache_t *)&dc_head;
407 
408 	/*
409 	 * Put a hold on the negative cache vnode so that it never goes away
410 	 * (VOP_INACTIVE isn't called on it). The mutex_enter() isn't necessary
411 	 * for correctness, but VN_HOLD_LOCKED() asserts that it's held, so
412 	 * we oblige.
413 	 */
414 	mutex_enter(&negative_cache_vnode.v_lock);
415 	negative_cache_vnode.v_count = 0;
416 	VN_HOLD_LOCKED(&negative_cache_vnode);
417 	negative_cache_vnode.v_count_dnlc = 0;
418 	mutex_exit(&negative_cache_vnode.v_lock);
419 
420 	/*
421 	 * Initialise kstats - both the old compatability raw kind and
422 	 * the more extensive named stats.
423 	 */
424 	ksp = kstat_create("unix", 0, "ncstats", "misc", KSTAT_TYPE_RAW,
425 	    sizeof (struct ncstats), KSTAT_FLAG_VIRTUAL);
426 	if (ksp) {
427 		ksp->ks_data = (void *) &ncstats;
428 		kstat_install(ksp);
429 	}
430 	ksp = kstat_create("unix", 0, "dnlcstats", "misc", KSTAT_TYPE_NAMED,
431 	    sizeof (ncs) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
432 	if (ksp) {
433 		ksp->ks_data = (void *) &ncs;
434 		kstat_install(ksp);
435 	}
436 }
437 
438 /*
439  * Add a name to the directory cache.
440  */
441 void
442 dnlc_enter(vnode_t *dp, const char *name, vnode_t *vp)
443 {
444 	ncache_t *ncp;
445 	nc_hash_t *hp;
446 	uchar_t namlen;
447 	int hash;
448 
449 	TRACE_0(TR_FAC_NFS, TR_DNLC_ENTER_START, "dnlc_enter_start:");
450 
451 	if (!doingcache) {
452 		TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
453 		    "dnlc_enter_end:(%S) %d", "not caching", 0);
454 		return;
455 	}
456 
457 	/*
458 	 * Get a new dnlc entry. Assume the entry won't be in the cache
459 	 * and initialize it now
460 	 */
461 	DNLCHASH(name, dp, hash, namlen);
462 	if ((ncp = dnlc_get(namlen)) == NULL)
463 		return;
464 	ncp->dp = dp;
465 	VN_HOLD_DNLC(dp);
466 	ncp->vp = vp;
467 	VN_HOLD_DNLC(vp);
468 	bcopy(name, ncp->name, namlen + 1); /* name and null */
469 	ncp->hash = hash;
470 	hp = &nc_hash[hash & nc_hashmask];
471 
472 	mutex_enter(&hp->hash_lock);
473 	if (dnlc_search(dp, name, namlen, hash) != NULL) {
474 		mutex_exit(&hp->hash_lock);
475 		ncstats.dbl_enters++;
476 		ncs.ncs_dbl_enters.value.ui64++;
477 		VN_RELE_DNLC(dp);
478 		VN_RELE_DNLC(vp);
479 		dnlc_free(ncp);		/* crfree done here */
480 		TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
481 		    "dnlc_enter_end:(%S) %d", "dbl enter", ncstats.dbl_enters);
482 		return;
483 	}
484 	/*
485 	 * Insert back into the hash chain.
486 	 */
487 	nc_inshash(ncp, hp);
488 	mutex_exit(&hp->hash_lock);
489 	ncstats.enters++;
490 	ncs.ncs_enters.value.ui64++;
491 	TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
492 	    "dnlc_enter_end:(%S) %d", "done", ncstats.enters);
493 }
494 
495 /*
496  * Add a name to the directory cache.
497  *
498  * This function is basically identical with
499  * dnlc_enter().  The difference is that when the
500  * desired dnlc entry is found, the vnode in the
501  * ncache is compared with the vnode passed in.
502  *
503  * If they are not equal then the ncache is
504  * updated with the passed in vnode.  Otherwise
505  * it just frees up the newly allocated dnlc entry.
506  */
507 void
508 dnlc_update(vnode_t *dp, const char *name, vnode_t *vp)
509 {
510 	ncache_t *ncp;
511 	ncache_t *tcp;
512 	vnode_t *tvp;
513 	nc_hash_t *hp;
514 	int hash;
515 	uchar_t namlen;
516 
517 	TRACE_0(TR_FAC_NFS, TR_DNLC_ENTER_START, "dnlc_update_start:");
518 
519 	if (!doingcache) {
520 		TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
521 		    "dnlc_update_end:(%S) %d", "not caching", 0);
522 		return;
523 	}
524 
525 	/*
526 	 * Get a new dnlc entry and initialize it now.
527 	 * If we fail to get a new entry, call dnlc_remove() to purge
528 	 * any existing dnlc entry including negative cache (DNLC_NO_VNODE)
529 	 * entry.
530 	 * Failure to clear an existing entry could result in false dnlc
531 	 * lookup (negative/stale entry).
532 	 */
533 	DNLCHASH(name, dp, hash, namlen);
534 	if ((ncp = dnlc_get(namlen)) == NULL) {
535 		dnlc_remove(dp, name);
536 		return;
537 	}
538 	ncp->dp = dp;
539 	VN_HOLD_DNLC(dp);
540 	ncp->vp = vp;
541 	VN_HOLD_DNLC(vp);
542 	bcopy(name, ncp->name, namlen + 1); /* name and null */
543 	ncp->hash = hash;
544 	hp = &nc_hash[hash & nc_hashmask];
545 
546 	mutex_enter(&hp->hash_lock);
547 	if ((tcp = dnlc_search(dp, name, namlen, hash)) != NULL) {
548 		if (tcp->vp != vp) {
549 			tvp = tcp->vp;
550 			tcp->vp = vp;
551 			mutex_exit(&hp->hash_lock);
552 			VN_RELE_DNLC(tvp);
553 			ncstats.enters++;
554 			ncs.ncs_enters.value.ui64++;
555 			TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
556 			    "dnlc_update_end:(%S) %d", "done", ncstats.enters);
557 		} else {
558 			mutex_exit(&hp->hash_lock);
559 			VN_RELE_DNLC(vp);
560 			ncstats.dbl_enters++;
561 			ncs.ncs_dbl_enters.value.ui64++;
562 			TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
563 			    "dnlc_update_end:(%S) %d",
564 			    "dbl enter", ncstats.dbl_enters);
565 		}
566 		VN_RELE_DNLC(dp);
567 		dnlc_free(ncp);		/* crfree done here */
568 		return;
569 	}
570 	/*
571 	 * insert the new entry, since it is not in dnlc yet
572 	 */
573 	nc_inshash(ncp, hp);
574 	mutex_exit(&hp->hash_lock);
575 	ncstats.enters++;
576 	ncs.ncs_enters.value.ui64++;
577 	TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
578 	    "dnlc_update_end:(%S) %d", "done", ncstats.enters);
579 }
580 
581 /*
582  * Look up a name in the directory name cache.
583  *
584  * Return a doubly-held vnode if found: one hold so that it may
585  * remain in the cache for other users, the other hold so that
586  * the cache is not re-cycled and the identity of the vnode is
587  * lost before the caller can use the vnode.
588  */
589 vnode_t *
590 dnlc_lookup(vnode_t *dp, const char *name)
591 {
592 	ncache_t *ncp;
593 	nc_hash_t *hp;
594 	vnode_t *vp;
595 	int hash, depth;
596 	uchar_t namlen;
597 
598 	TRACE_2(TR_FAC_NFS, TR_DNLC_LOOKUP_START,
599 	    "dnlc_lookup_start:dp %x name %s", dp, name);
600 
601 	if (!doingcache) {
602 		TRACE_4(TR_FAC_NFS, TR_DNLC_LOOKUP_END,
603 		    "dnlc_lookup_end:%S %d vp %x name %s",
604 		    "not_caching", 0, NULL, name);
605 		return (NULL);
606 	}
607 
608 	DNLCHASH(name, dp, hash, namlen);
609 	depth = 1;
610 	hp = &nc_hash[hash & nc_hashmask];
611 	mutex_enter(&hp->hash_lock);
612 
613 	for (ncp = hp->hash_next; ncp != (ncache_t *)hp;
614 	    ncp = ncp->hash_next) {
615 		if (ncp->hash == hash &&	/* fast signature check */
616 		    ncp->dp == dp &&
617 		    ncp->namlen == namlen &&
618 		    bcmp(ncp->name, name, namlen) == 0) {
619 			/*
620 			 * Move this entry to the head of its hash chain
621 			 * if it's not already close.
622 			 */
623 			if (depth > NC_MOVETOFRONT) {
624 				ncache_t *next = ncp->hash_next;
625 				ncache_t *prev = ncp->hash_prev;
626 
627 				prev->hash_next = next;
628 				next->hash_prev = prev;
629 				ncp->hash_next = next = hp->hash_next;
630 				ncp->hash_prev = (ncache_t *)hp;
631 				next->hash_prev = ncp;
632 				hp->hash_next = ncp;
633 
634 				ncstats.move_to_front++;
635 			}
636 
637 			/*
638 			 * Put a hold on the vnode now so its identity
639 			 * can't change before the caller has a chance to
640 			 * put a hold on it.
641 			 */
642 			vp = ncp->vp;
643 			VN_HOLD_CALLER(vp); /* VN_HOLD 1 of 2 in this file */
644 			mutex_exit(&hp->hash_lock);
645 			ncstats.hits++;
646 			ncs.ncs_hits.value.ui64++;
647 			if (vp == DNLC_NO_VNODE) {
648 				ncs.ncs_neg_hits.value.ui64++;
649 			}
650 			TRACE_4(TR_FAC_NFS, TR_DNLC_LOOKUP_END,
651 			    "dnlc_lookup_end:%S %d vp %x name %s", "hit",
652 			    ncstats.hits, vp, name);
653 			return (vp);
654 		}
655 		depth++;
656 	}
657 
658 	mutex_exit(&hp->hash_lock);
659 	ncstats.misses++;
660 	ncs.ncs_misses.value.ui64++;
661 	TRACE_4(TR_FAC_NFS, TR_DNLC_LOOKUP_END,
662 	    "dnlc_lookup_end:%S %d vp %x name %s", "miss", ncstats.misses,
663 	    NULL, name);
664 	return (NULL);
665 }
666 
667 /*
668  * Remove an entry in the directory name cache.
669  */
670 void
671 dnlc_remove(vnode_t *dp, const char *name)
672 {
673 	ncache_t *ncp;
674 	nc_hash_t *hp;
675 	uchar_t namlen;
676 	int hash;
677 
678 	if (!doingcache)
679 		return;
680 	DNLCHASH(name, dp, hash, namlen);
681 	hp = &nc_hash[hash & nc_hashmask];
682 
683 	mutex_enter(&hp->hash_lock);
684 	if (ncp = dnlc_search(dp, name, namlen, hash)) {
685 		/*
686 		 * Free up the entry
687 		 */
688 		nc_rmhash(ncp);
689 		mutex_exit(&hp->hash_lock);
690 		VN_RELE_DNLC(ncp->vp);
691 		VN_RELE_DNLC(ncp->dp);
692 		dnlc_free(ncp);
693 		return;
694 	}
695 	mutex_exit(&hp->hash_lock);
696 }
697 
698 /*
699  * Purge the entire cache.
700  */
701 void
702 dnlc_purge()
703 {
704 	nc_hash_t *nch;
705 	ncache_t *ncp;
706 	int index;
707 	int i;
708 	vnode_t *nc_rele[DNLC_MAX_RELE];
709 
710 	if (!doingcache)
711 		return;
712 
713 	ncstats.purges++;
714 	ncs.ncs_purge_all.value.ui64++;
715 
716 	for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
717 		index = 0;
718 		mutex_enter(&nch->hash_lock);
719 		ncp = nch->hash_next;
720 		while (ncp != (ncache_t *)nch) {
721 			ncache_t *np;
722 
723 			np = ncp->hash_next;
724 			nc_rele[index++] = ncp->vp;
725 			nc_rele[index++] = ncp->dp;
726 
727 			nc_rmhash(ncp);
728 			dnlc_free(ncp);
729 			ncp = np;
730 			ncs.ncs_purge_total.value.ui64++;
731 			if (index == DNLC_MAX_RELE)
732 				break;
733 		}
734 		mutex_exit(&nch->hash_lock);
735 
736 		/* Release holds on all the vnodes now that we have no locks */
737 		for (i = 0; i < index; i++) {
738 			VN_RELE_DNLC(nc_rele[i]);
739 		}
740 		if (ncp != (ncache_t *)nch) {
741 			nch--; /* Do current hash chain again */
742 		}
743 	}
744 }
745 
746 /*
747  * Purge any cache entries referencing a vnode. Exit as soon as the dnlc
748  * reference count goes to zero (the caller still holds a reference).
749  */
750 void
751 dnlc_purge_vp(vnode_t *vp)
752 {
753 	nc_hash_t *nch;
754 	ncache_t *ncp;
755 	int index;
756 	vnode_t *nc_rele[DNLC_MAX_RELE];
757 
758 	ASSERT(vp->v_count > 0);
759 	if (vp->v_count_dnlc == 0) {
760 		return;
761 	}
762 
763 	if (!doingcache)
764 		return;
765 
766 	ncstats.purges++;
767 	ncs.ncs_purge_vp.value.ui64++;
768 
769 	for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
770 		index = 0;
771 		mutex_enter(&nch->hash_lock);
772 		ncp = nch->hash_next;
773 		while (ncp != (ncache_t *)nch) {
774 			ncache_t *np;
775 
776 			np = ncp->hash_next;
777 			if (ncp->dp == vp || ncp->vp == vp) {
778 				nc_rele[index++] = ncp->vp;
779 				nc_rele[index++] = ncp->dp;
780 				nc_rmhash(ncp);
781 				dnlc_free(ncp);
782 				ncs.ncs_purge_total.value.ui64++;
783 				if (index == DNLC_MAX_RELE) {
784 					ncp = np;
785 					break;
786 				}
787 			}
788 			ncp = np;
789 		}
790 		mutex_exit(&nch->hash_lock);
791 
792 		/* Release holds on all the vnodes now that we have no locks */
793 		while (index) {
794 			VN_RELE_DNLC(nc_rele[--index]);
795 		}
796 
797 		if (vp->v_count_dnlc == 0) {
798 			return;
799 		}
800 
801 		if (ncp != (ncache_t *)nch) {
802 			nch--; /* Do current hash chain again */
803 		}
804 	}
805 }
806 
807 /*
808  * Purge cache entries referencing a vfsp.  Caller supplies a count
809  * of entries to purge; up to that many will be freed.  A count of
810  * zero indicates that all such entries should be purged.  Returns
811  * the number of entries that were purged.
812  */
813 int
814 dnlc_purge_vfsp(vfs_t *vfsp, int count)
815 {
816 	nc_hash_t *nch;
817 	ncache_t *ncp;
818 	int n = 0;
819 	int index;
820 	int i;
821 	vnode_t *nc_rele[DNLC_MAX_RELE];
822 
823 	if (!doingcache)
824 		return (0);
825 
826 	ncstats.purges++;
827 	ncs.ncs_purge_vfs.value.ui64++;
828 
829 	for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
830 		index = 0;
831 		mutex_enter(&nch->hash_lock);
832 		ncp = nch->hash_next;
833 		while (ncp != (ncache_t *)nch) {
834 			ncache_t *np;
835 
836 			np = ncp->hash_next;
837 			ASSERT(ncp->dp != NULL);
838 			ASSERT(ncp->vp != NULL);
839 			if ((ncp->dp->v_vfsp == vfsp) ||
840 			    (ncp->vp->v_vfsp == vfsp)) {
841 				n++;
842 				nc_rele[index++] = ncp->vp;
843 				nc_rele[index++] = ncp->dp;
844 				nc_rmhash(ncp);
845 				dnlc_free(ncp);
846 				ncs.ncs_purge_total.value.ui64++;
847 				if (index == DNLC_MAX_RELE) {
848 					ncp = np;
849 					break;
850 				}
851 				if (count != 0 && n >= count) {
852 					break;
853 				}
854 			}
855 			ncp = np;
856 		}
857 		mutex_exit(&nch->hash_lock);
858 		/* Release holds on all the vnodes now that we have no locks */
859 		for (i = 0; i < index; i++) {
860 			VN_RELE_DNLC(nc_rele[i]);
861 		}
862 		if (count != 0 && n >= count) {
863 			return (n);
864 		}
865 		if (ncp != (ncache_t *)nch) {
866 			nch--; /* Do current hash chain again */
867 		}
868 	}
869 	return (n);
870 }
871 
872 /*
873  * Purge 1 entry from the dnlc that is part of the filesystem(s)
874  * represented by 'vop'. The purpose of this routine is to allow
875  * users of the dnlc to free a vnode that is being held by the dnlc.
876  *
877  * If we find a vnode that we release which will result in
878  * freeing the underlying vnode (count was 1), return 1, 0
879  * if no appropriate vnodes found.
880  *
881  * Note, vop is not the 'right' identifier for a filesystem.
882  */
883 int
884 dnlc_fs_purge1(vnodeops_t *vop)
885 {
886 	nc_hash_t *end;
887 	nc_hash_t *hp;
888 	ncache_t *ncp;
889 	vnode_t *vp;
890 
891 	if (!doingcache)
892 		return (0);
893 
894 	ncs.ncs_purge_fs1.value.ui64++;
895 
896 	/*
897 	 * Scan the dnlc entries looking for a likely candidate.
898 	 */
899 	hp = end = dnlc_purge_fs1_rotor;
900 
901 	do {
902 		if (++hp == &nc_hash[nc_hashsz])
903 			hp = nc_hash;
904 		dnlc_purge_fs1_rotor = hp;
905 		if (hp->hash_next == (ncache_t *)hp)
906 			continue;
907 		mutex_enter(&hp->hash_lock);
908 		for (ncp = hp->hash_prev;
909 		    ncp != (ncache_t *)hp;
910 		    ncp = ncp->hash_prev) {
911 			vp = ncp->vp;
912 			if (!vn_has_cached_data(vp) && (vp->v_count == 1) &&
913 			    vn_matchops(vp, vop))
914 				break;
915 		}
916 		if (ncp != (ncache_t *)hp) {
917 			nc_rmhash(ncp);
918 			mutex_exit(&hp->hash_lock);
919 			VN_RELE_DNLC(ncp->dp);
920 			VN_RELE_DNLC(vp)
921 			dnlc_free(ncp);
922 			ncs.ncs_purge_total.value.ui64++;
923 			return (1);
924 		}
925 		mutex_exit(&hp->hash_lock);
926 	} while (hp != end);
927 	return (0);
928 }
929 
930 /*
931  * Perform a reverse lookup in the DNLC.  This will find the first occurrence of
932  * the vnode.  If successful, it will return the vnode of the parent, and the
933  * name of the entry in the given buffer.  If it cannot be found, or the buffer
934  * is too small, then it will return NULL.  Note that this is a highly
935  * inefficient function, since the DNLC is constructed solely for forward
936  * lookups.
937  */
938 vnode_t *
939 dnlc_reverse_lookup(vnode_t *vp, char *buf, size_t buflen)
940 {
941 	nc_hash_t *nch;
942 	ncache_t *ncp;
943 	vnode_t *pvp;
944 
945 	if (!doingcache)
946 		return (NULL);
947 
948 	for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
949 		mutex_enter(&nch->hash_lock);
950 		ncp = nch->hash_next;
951 		while (ncp != (ncache_t *)nch) {
952 			/*
953 			 * We ignore '..' entries since it can create
954 			 * confusion and infinite loops.
955 			 */
956 			if (ncp->vp == vp && !(ncp->namlen == 2 &&
957 			    0 == bcmp(ncp->name, "..", 2)) &&
958 			    ncp->namlen < buflen) {
959 				bcopy(ncp->name, buf, ncp->namlen);
960 				buf[ncp->namlen] = '\0';
961 				pvp = ncp->dp;
962 				/* VN_HOLD 2 of 2 in this file */
963 				VN_HOLD_CALLER(pvp);
964 				mutex_exit(&nch->hash_lock);
965 				return (pvp);
966 			}
967 			ncp = ncp->hash_next;
968 		}
969 		mutex_exit(&nch->hash_lock);
970 	}
971 
972 	return (NULL);
973 }
974 /*
975  * Utility routine to search for a cache entry. Return the
976  * ncache entry if found, NULL otherwise.
977  */
978 static ncache_t *
979 dnlc_search(vnode_t *dp, const char *name, uchar_t namlen, int hash)
980 {
981 	nc_hash_t *hp;
982 	ncache_t *ncp;
983 
984 	hp = &nc_hash[hash & nc_hashmask];
985 
986 	for (ncp = hp->hash_next; ncp != (ncache_t *)hp; ncp = ncp->hash_next) {
987 		if (ncp->hash == hash &&
988 		    ncp->dp == dp &&
989 		    ncp->namlen == namlen &&
990 		    bcmp(ncp->name, name, namlen) == 0)
991 			return (ncp);
992 	}
993 	return (NULL);
994 }
995 
996 #if ((1 << NBBY) - 1) < (MAXNAMELEN - 1)
997 #error ncache_t name length representation is too small
998 #endif
999 
1000 void
1001 dnlc_reduce_cache(void *reduce_percent)
1002 {
1003 	if (dnlc_reduce_idle && (dnlc_nentries >= ncsize || reduce_percent)) {
1004 		dnlc_reduce_idle = 0;
1005 		if ((taskq_dispatch(system_taskq, do_dnlc_reduce_cache,
1006 		    reduce_percent, TQ_NOSLEEP)) == NULL)
1007 			dnlc_reduce_idle = 1;
1008 	}
1009 }
1010 
1011 /*
1012  * Get a new name cache entry.
1013  * If the dnlc_reduce_cache() taskq isn't keeping up with demand, or memory
1014  * is short then just return NULL. If we're over ncsize then kick off a
1015  * thread to free some in use entries down to dnlc_nentries_low_water.
1016  * Caller must initialise all fields except namlen.
1017  * Component names are defined to be less than MAXNAMELEN
1018  * which includes a null.
1019  */
1020 static ncache_t *
1021 dnlc_get(uchar_t namlen)
1022 {
1023 	ncache_t *ncp;
1024 
1025 	if (dnlc_nentries > dnlc_max_nentries) {
1026 		dnlc_max_nentries_cnt++; /* keep a statistic */
1027 		return (NULL);
1028 	}
1029 	ncp = kmem_alloc(sizeof (ncache_t) + namlen, KM_NOSLEEP);
1030 	if (ncp == NULL) {
1031 		return (NULL);
1032 	}
1033 	ncp->namlen = namlen;
1034 	atomic_inc_32(&dnlc_nentries);
1035 	dnlc_reduce_cache(NULL);
1036 	return (ncp);
1037 }
1038 
1039 /*
1040  * Taskq routine to free up name cache entries to reduce the
1041  * cache size to the low water mark if "reduce_percent" is not provided.
1042  * If "reduce_percent" is provided, reduce cache size by
1043  * (ncsize_onepercent * reduce_percent).
1044  */
1045 /*ARGSUSED*/
1046 static void
1047 do_dnlc_reduce_cache(void *reduce_percent)
1048 {
1049 	nc_hash_t *hp = dnlc_free_rotor, *start_hp = hp;
1050 	vnode_t *vp;
1051 	ncache_t *ncp;
1052 	int cnt;
1053 	uint_t low_water = dnlc_nentries_low_water;
1054 
1055 	if (reduce_percent) {
1056 		uint_t reduce_cnt;
1057 
1058 		/*
1059 		 * Never try to reduce the current number
1060 		 * of cache entries below 3% of ncsize.
1061 		 */
1062 		if (dnlc_nentries <= ncsize_min_percent) {
1063 			dnlc_reduce_idle = 1;
1064 			return;
1065 		}
1066 		reduce_cnt = ncsize_onepercent *
1067 		    (uint_t)(uintptr_t)reduce_percent;
1068 
1069 		if (reduce_cnt > dnlc_nentries ||
1070 		    dnlc_nentries - reduce_cnt < ncsize_min_percent)
1071 			low_water = ncsize_min_percent;
1072 		else
1073 			low_water = dnlc_nentries - reduce_cnt;
1074 	}
1075 
1076 	do {
1077 		/*
1078 		 * Find the first non empty hash queue without locking.
1079 		 * Only look at each hash queue once to avoid an infinite loop.
1080 		 */
1081 		do {
1082 			if (++hp == &nc_hash[nc_hashsz])
1083 				hp = nc_hash;
1084 		} while (hp->hash_next == (ncache_t *)hp && hp != start_hp);
1085 
1086 		/* return if all hash queues are empty. */
1087 		if (hp->hash_next == (ncache_t *)hp) {
1088 			dnlc_reduce_idle = 1;
1089 			return;
1090 		}
1091 
1092 		mutex_enter(&hp->hash_lock);
1093 		for (cnt = 0, ncp = hp->hash_prev; ncp != (ncache_t *)hp;
1094 		    ncp = ncp->hash_prev, cnt++) {
1095 			vp = ncp->vp;
1096 			/*
1097 			 * A name cache entry with a reference count
1098 			 * of one is only referenced by the dnlc.
1099 			 * Also negative cache entries are purged first.
1100 			 */
1101 			if (!vn_has_cached_data(vp) &&
1102 			    ((vp->v_count == 1) || (vp == DNLC_NO_VNODE))) {
1103 				ncs.ncs_pick_heur.value.ui64++;
1104 				goto found;
1105 			}
1106 			/*
1107 			 * Remove from the end of the chain if the
1108 			 * chain is too long
1109 			 */
1110 			if (cnt > dnlc_long_chain) {
1111 				ncp = hp->hash_prev;
1112 				ncs.ncs_pick_last.value.ui64++;
1113 				vp = ncp->vp;
1114 				goto found;
1115 			}
1116 		}
1117 		/* check for race and continue */
1118 		if (hp->hash_next == (ncache_t *)hp) {
1119 			mutex_exit(&hp->hash_lock);
1120 			continue;
1121 		}
1122 
1123 		ncp = hp->hash_prev; /* pick the last one in the hash queue */
1124 		ncs.ncs_pick_last.value.ui64++;
1125 		vp = ncp->vp;
1126 found:
1127 		/*
1128 		 * Remove from hash chain.
1129 		 */
1130 		nc_rmhash(ncp);
1131 		mutex_exit(&hp->hash_lock);
1132 		VN_RELE_DNLC(vp);
1133 		VN_RELE_DNLC(ncp->dp);
1134 		dnlc_free(ncp);
1135 	} while (dnlc_nentries > low_water);
1136 
1137 	dnlc_free_rotor = hp;
1138 	dnlc_reduce_idle = 1;
1139 }
1140 
1141 /*
1142  * Directory caching routines
1143  * ==========================
1144  *
1145  * See dnlc.h for details of the interfaces below.
1146  */
1147 
1148 /*
1149  * Lookup up an entry in a complete or partial directory cache.
1150  */
1151 dcret_t
1152 dnlc_dir_lookup(dcanchor_t *dcap, const char *name, uint64_t *handle)
1153 {
1154 	dircache_t *dcp;
1155 	dcentry_t *dep;
1156 	int hash;
1157 	int ret;
1158 	uchar_t namlen;
1159 
1160 	/*
1161 	 * can test without lock as we are only a cache
1162 	 */
1163 	if (!VALID_DIR_CACHE(dcap->dca_dircache)) {
1164 		ncs.ncs_dir_misses.value.ui64++;
1165 		return (DNOCACHE);
1166 	}
1167 
1168 	if (!dnlc_dir_enable) {
1169 		return (DNOCACHE);
1170 	}
1171 
1172 	mutex_enter(&dcap->dca_lock);
1173 	dcp = (dircache_t *)dcap->dca_dircache;
1174 	if (VALID_DIR_CACHE(dcp)) {
1175 		dcp->dc_actime = ddi_get_lbolt64();
1176 		DNLC_DIR_HASH(name, hash, namlen);
1177 		dep = dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1178 		while (dep != NULL) {
1179 			if ((dep->de_hash == hash) &&
1180 			    (namlen == dep->de_namelen) &&
1181 			    bcmp(dep->de_name, name, namlen) == 0) {
1182 				*handle = dep->de_handle;
1183 				mutex_exit(&dcap->dca_lock);
1184 				ncs.ncs_dir_hits.value.ui64++;
1185 				return (DFOUND);
1186 			}
1187 			dep = dep->de_next;
1188 		}
1189 		if (dcp->dc_complete) {
1190 			ret = DNOENT;
1191 		} else {
1192 			ret = DNOCACHE;
1193 		}
1194 		mutex_exit(&dcap->dca_lock);
1195 		return (ret);
1196 	} else {
1197 		mutex_exit(&dcap->dca_lock);
1198 		ncs.ncs_dir_misses.value.ui64++;
1199 		return (DNOCACHE);
1200 	}
1201 }
1202 
1203 /*
1204  * Start a new directory cache. An estimate of the number of
1205  * entries is provided to as a quick check to ensure the directory
1206  * is cacheable.
1207  */
1208 dcret_t
1209 dnlc_dir_start(dcanchor_t *dcap, uint_t num_entries)
1210 {
1211 	dircache_t *dcp;
1212 
1213 	if (!dnlc_dir_enable ||
1214 	    (num_entries < dnlc_dir_min_size)) {
1215 		return (DNOCACHE);
1216 	}
1217 
1218 	if (num_entries > dnlc_dir_max_size) {
1219 		return (DTOOBIG);
1220 	}
1221 
1222 	mutex_enter(&dc_head.dch_lock);
1223 	mutex_enter(&dcap->dca_lock);
1224 
1225 	if (dcap->dca_dircache == DC_RET_LOW_MEM) {
1226 		dcap->dca_dircache = NULL;
1227 		mutex_exit(&dcap->dca_lock);
1228 		mutex_exit(&dc_head.dch_lock);
1229 		return (DNOMEM);
1230 	}
1231 
1232 	/*
1233 	 * Check if there's currently a cache.
1234 	 * This probably only occurs on a race.
1235 	 */
1236 	if (dcap->dca_dircache != NULL) {
1237 		mutex_exit(&dcap->dca_lock);
1238 		mutex_exit(&dc_head.dch_lock);
1239 		return (DNOCACHE);
1240 	}
1241 
1242 	/*
1243 	 * Allocate the dircache struct, entry and free space hash tables.
1244 	 * These tables are initially just one entry but dynamically resize
1245 	 * when entries and free space are added or removed.
1246 	 */
1247 	if ((dcp = kmem_zalloc(sizeof (dircache_t), KM_NOSLEEP)) == NULL) {
1248 		goto error;
1249 	}
1250 	if ((dcp->dc_namehash = kmem_zalloc(sizeof (dcentry_t *),
1251 	    KM_NOSLEEP)) == NULL) {
1252 		goto error;
1253 	}
1254 	if ((dcp->dc_freehash = kmem_zalloc(sizeof (dcfree_t *),
1255 	    KM_NOSLEEP)) == NULL) {
1256 		goto error;
1257 	}
1258 
1259 	dcp->dc_anchor = dcap; /* set back pointer to anchor */
1260 	dcap->dca_dircache = dcp;
1261 
1262 	/* add into head of global chain */
1263 	dcp->dc_next = dc_head.dch_next;
1264 	dcp->dc_prev = (dircache_t *)&dc_head;
1265 	dcp->dc_next->dc_prev = dcp;
1266 	dc_head.dch_next = dcp;
1267 
1268 	mutex_exit(&dcap->dca_lock);
1269 	mutex_exit(&dc_head.dch_lock);
1270 	ncs.ncs_cur_dirs.value.ui64++;
1271 	ncs.ncs_dirs_cached.value.ui64++;
1272 	return (DOK);
1273 error:
1274 	if (dcp != NULL) {
1275 		if (dcp->dc_namehash) {
1276 			kmem_free(dcp->dc_namehash, sizeof (dcentry_t *));
1277 		}
1278 		kmem_free(dcp, sizeof (dircache_t));
1279 	}
1280 	/*
1281 	 * Must also kmem_free dcp->dc_freehash if more error cases are added
1282 	 */
1283 	mutex_exit(&dcap->dca_lock);
1284 	mutex_exit(&dc_head.dch_lock);
1285 	ncs.ncs_dir_start_nm.value.ui64++;
1286 	return (DNOCACHE);
1287 }
1288 
1289 /*
1290  * Add a directopry entry to a partial or complete directory cache.
1291  */
1292 dcret_t
1293 dnlc_dir_add_entry(dcanchor_t *dcap, const char *name, uint64_t handle)
1294 {
1295 	dircache_t *dcp;
1296 	dcentry_t **hp, *dep;
1297 	int hash;
1298 	uint_t capacity;
1299 	uchar_t namlen;
1300 
1301 	/*
1302 	 * Allocate the dcentry struct, including the variable
1303 	 * size name. Note, the null terminator is not copied.
1304 	 *
1305 	 * We do this outside the lock to avoid possible deadlock if
1306 	 * dnlc_dir_reclaim() is called as a result of memory shortage.
1307 	 */
1308 	DNLC_DIR_HASH(name, hash, namlen);
1309 	dep = kmem_alloc(sizeof (dcentry_t) - 1 + namlen, KM_NOSLEEP);
1310 	if (dep == NULL) {
1311 #ifdef DEBUG
1312 		/*
1313 		 * The kmem allocator generates random failures for
1314 		 * KM_NOSLEEP calls (see KMEM_RANDOM_ALLOCATION_FAILURE)
1315 		 * So try again before we blow away a perfectly good cache.
1316 		 * This is done not to cover an error but purely for
1317 		 * performance running a debug kernel.
1318 		 * This random error only occurs in debug mode.
1319 		 */
1320 		dep = kmem_alloc(sizeof (dcentry_t) - 1 + namlen, KM_NOSLEEP);
1321 		if (dep != NULL)
1322 			goto ok;
1323 #endif
1324 		ncs.ncs_dir_add_nm.value.ui64++;
1325 		/*
1326 		 * Free a directory cache. This may be the one we are
1327 		 * called with.
1328 		 */
1329 		dnlc_dir_reclaim(NULL);
1330 		dep = kmem_alloc(sizeof (dcentry_t) - 1 + namlen, KM_NOSLEEP);
1331 		if (dep == NULL) {
1332 			/*
1333 			 * still no memory, better delete this cache
1334 			 */
1335 			mutex_enter(&dcap->dca_lock);
1336 			dcp = (dircache_t *)dcap->dca_dircache;
1337 			if (VALID_DIR_CACHE(dcp)) {
1338 				dnlc_dir_abort(dcp);
1339 				dcap->dca_dircache = DC_RET_LOW_MEM;
1340 			}
1341 			mutex_exit(&dcap->dca_lock);
1342 			ncs.ncs_dir_addabort.value.ui64++;
1343 			return (DNOCACHE);
1344 		}
1345 		/*
1346 		 * fall through as if the 1st kmem_alloc had worked
1347 		 */
1348 	}
1349 #ifdef DEBUG
1350 ok:
1351 #endif
1352 	mutex_enter(&dcap->dca_lock);
1353 	dcp = (dircache_t *)dcap->dca_dircache;
1354 	if (VALID_DIR_CACHE(dcp)) {
1355 		/*
1356 		 * If the total number of entries goes above the max
1357 		 * then free this cache
1358 		 */
1359 		if ((dcp->dc_num_entries + dcp->dc_num_free) >
1360 		    dnlc_dir_max_size) {
1361 			mutex_exit(&dcap->dca_lock);
1362 			dnlc_dir_purge(dcap);
1363 			kmem_free(dep, sizeof (dcentry_t) - 1 + namlen);
1364 			ncs.ncs_dir_add_max.value.ui64++;
1365 			return (DTOOBIG);
1366 		}
1367 		dcp->dc_num_entries++;
1368 		capacity = (dcp->dc_nhash_mask + 1) << dnlc_dir_hash_size_shift;
1369 		if (dcp->dc_num_entries >=
1370 		    (capacity << dnlc_dir_hash_resize_shift)) {
1371 			dnlc_dir_adjust_nhash(dcp);
1372 		}
1373 		hp = &dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1374 
1375 		/*
1376 		 * Initialise and chain in new entry
1377 		 */
1378 		dep->de_handle = handle;
1379 		dep->de_hash = hash;
1380 		/*
1381 		 * Note de_namelen is a uchar_t to conserve space
1382 		 * and alignment padding. The max length of any
1383 		 * pathname component is defined as MAXNAMELEN
1384 		 * which is 256 (including the terminating null).
1385 		 * So provided this doesn't change, we don't include the null,
1386 		 * we always use bcmp to compare strings, and we don't
1387 		 * start storing full names, then we are ok.
1388 		 * The space savings is worth it.
1389 		 */
1390 		dep->de_namelen = namlen;
1391 		bcopy(name, dep->de_name, namlen);
1392 		dep->de_next = *hp;
1393 		*hp = dep;
1394 		dcp->dc_actime = ddi_get_lbolt64();
1395 		mutex_exit(&dcap->dca_lock);
1396 		ncs.ncs_dir_num_ents.value.ui64++;
1397 		return (DOK);
1398 	} else {
1399 		mutex_exit(&dcap->dca_lock);
1400 		kmem_free(dep, sizeof (dcentry_t) - 1 + namlen);
1401 		return (DNOCACHE);
1402 	}
1403 }
1404 
1405 /*
1406  * Add free space to a partial or complete directory cache.
1407  */
1408 dcret_t
1409 dnlc_dir_add_space(dcanchor_t *dcap, uint_t len, uint64_t handle)
1410 {
1411 	dircache_t *dcp;
1412 	dcfree_t *dfp, **hp;
1413 	uint_t capacity;
1414 
1415 	/*
1416 	 * We kmem_alloc outside the lock to avoid possible deadlock if
1417 	 * dnlc_dir_reclaim() is called as a result of memory shortage.
1418 	 */
1419 	dfp = kmem_cache_alloc(dnlc_dir_space_cache, KM_NOSLEEP);
1420 	if (dfp == NULL) {
1421 #ifdef DEBUG
1422 		/*
1423 		 * The kmem allocator generates random failures for
1424 		 * KM_NOSLEEP calls (see KMEM_RANDOM_ALLOCATION_FAILURE)
1425 		 * So try again before we blow away a perfectly good cache.
1426 		 * This random error only occurs in debug mode
1427 		 */
1428 		dfp = kmem_cache_alloc(dnlc_dir_space_cache, KM_NOSLEEP);
1429 		if (dfp != NULL)
1430 			goto ok;
1431 #endif
1432 		ncs.ncs_dir_add_nm.value.ui64++;
1433 		/*
1434 		 * Free a directory cache. This may be the one we are
1435 		 * called with.
1436 		 */
1437 		dnlc_dir_reclaim(NULL);
1438 		dfp = kmem_cache_alloc(dnlc_dir_space_cache, KM_NOSLEEP);
1439 		if (dfp == NULL) {
1440 			/*
1441 			 * still no memory, better delete this cache
1442 			 */
1443 			mutex_enter(&dcap->dca_lock);
1444 			dcp = (dircache_t *)dcap->dca_dircache;
1445 			if (VALID_DIR_CACHE(dcp)) {
1446 				dnlc_dir_abort(dcp);
1447 				dcap->dca_dircache = DC_RET_LOW_MEM;
1448 			}
1449 			mutex_exit(&dcap->dca_lock);
1450 			ncs.ncs_dir_addabort.value.ui64++;
1451 			return (DNOCACHE);
1452 		}
1453 		/*
1454 		 * fall through as if the 1st kmem_alloc had worked
1455 		 */
1456 	}
1457 
1458 #ifdef DEBUG
1459 ok:
1460 #endif
1461 	mutex_enter(&dcap->dca_lock);
1462 	dcp = (dircache_t *)dcap->dca_dircache;
1463 	if (VALID_DIR_CACHE(dcp)) {
1464 		if ((dcp->dc_num_entries + dcp->dc_num_free) >
1465 		    dnlc_dir_max_size) {
1466 			mutex_exit(&dcap->dca_lock);
1467 			dnlc_dir_purge(dcap);
1468 			kmem_cache_free(dnlc_dir_space_cache, dfp);
1469 			ncs.ncs_dir_add_max.value.ui64++;
1470 			return (DTOOBIG);
1471 		}
1472 		dcp->dc_num_free++;
1473 		capacity = (dcp->dc_fhash_mask + 1) << dnlc_dir_hash_size_shift;
1474 		if (dcp->dc_num_free >=
1475 		    (capacity << dnlc_dir_hash_resize_shift)) {
1476 			dnlc_dir_adjust_fhash(dcp);
1477 		}
1478 		/*
1479 		 * Initialise and chain a new entry
1480 		 */
1481 		dfp->df_handle = handle;
1482 		dfp->df_len = len;
1483 		dcp->dc_actime = ddi_get_lbolt64();
1484 		hp = &(dcp->dc_freehash[DDFHASH(handle, dcp)]);
1485 		dfp->df_next = *hp;
1486 		*hp = dfp;
1487 		mutex_exit(&dcap->dca_lock);
1488 		ncs.ncs_dir_num_ents.value.ui64++;
1489 		return (DOK);
1490 	} else {
1491 		mutex_exit(&dcap->dca_lock);
1492 		kmem_cache_free(dnlc_dir_space_cache, dfp);
1493 		return (DNOCACHE);
1494 	}
1495 }
1496 
1497 /*
1498  * Mark a directory cache as complete.
1499  */
1500 void
1501 dnlc_dir_complete(dcanchor_t *dcap)
1502 {
1503 	dircache_t *dcp;
1504 
1505 	mutex_enter(&dcap->dca_lock);
1506 	dcp = (dircache_t *)dcap->dca_dircache;
1507 	if (VALID_DIR_CACHE(dcp)) {
1508 		dcp->dc_complete = B_TRUE;
1509 	}
1510 	mutex_exit(&dcap->dca_lock);
1511 }
1512 
1513 /*
1514  * Internal routine to delete a partial or full directory cache.
1515  * No additional locking needed.
1516  */
1517 static void
1518 dnlc_dir_abort(dircache_t *dcp)
1519 {
1520 	dcentry_t *dep, *nhp;
1521 	dcfree_t *fep, *fhp;
1522 	uint_t nhtsize = dcp->dc_nhash_mask + 1; /* name hash table size */
1523 	uint_t fhtsize = dcp->dc_fhash_mask + 1; /* free hash table size */
1524 	uint_t i;
1525 
1526 	/*
1527 	 * Free up the cached name entries and hash table
1528 	 */
1529 	for (i = 0; i < nhtsize; i++) { /* for each hash bucket */
1530 		nhp = dcp->dc_namehash[i];
1531 		while (nhp != NULL) { /* for each chained entry */
1532 			dep = nhp->de_next;
1533 			kmem_free(nhp, sizeof (dcentry_t) - 1 +
1534 			    nhp->de_namelen);
1535 			nhp = dep;
1536 		}
1537 	}
1538 	kmem_free(dcp->dc_namehash, sizeof (dcentry_t *) * nhtsize);
1539 
1540 	/*
1541 	 * Free up the free space entries and hash table
1542 	 */
1543 	for (i = 0; i < fhtsize; i++) { /* for each hash bucket */
1544 		fhp = dcp->dc_freehash[i];
1545 		while (fhp != NULL) { /* for each chained entry */
1546 			fep = fhp->df_next;
1547 			kmem_cache_free(dnlc_dir_space_cache, fhp);
1548 			fhp = fep;
1549 		}
1550 	}
1551 	kmem_free(dcp->dc_freehash, sizeof (dcfree_t *) * fhtsize);
1552 
1553 	/*
1554 	 * Finally free the directory cache structure itself
1555 	 */
1556 	ncs.ncs_dir_num_ents.value.ui64 -= (dcp->dc_num_entries +
1557 	    dcp->dc_num_free);
1558 	kmem_free(dcp, sizeof (dircache_t));
1559 	ncs.ncs_cur_dirs.value.ui64--;
1560 }
1561 
1562 /*
1563  * Remove a partial or complete directory cache
1564  */
1565 void
1566 dnlc_dir_purge(dcanchor_t *dcap)
1567 {
1568 	dircache_t *dcp;
1569 
1570 	mutex_enter(&dc_head.dch_lock);
1571 	mutex_enter(&dcap->dca_lock);
1572 	dcp = (dircache_t *)dcap->dca_dircache;
1573 	if (!VALID_DIR_CACHE(dcp)) {
1574 		mutex_exit(&dcap->dca_lock);
1575 		mutex_exit(&dc_head.dch_lock);
1576 		return;
1577 	}
1578 	dcap->dca_dircache = NULL;
1579 	/*
1580 	 * Unchain from global list
1581 	 */
1582 	dcp->dc_prev->dc_next = dcp->dc_next;
1583 	dcp->dc_next->dc_prev = dcp->dc_prev;
1584 	mutex_exit(&dcap->dca_lock);
1585 	mutex_exit(&dc_head.dch_lock);
1586 	dnlc_dir_abort(dcp);
1587 }
1588 
1589 /*
1590  * Remove an entry from a complete or partial directory cache.
1591  * Return the handle if it's non null.
1592  */
1593 dcret_t
1594 dnlc_dir_rem_entry(dcanchor_t *dcap, const char *name, uint64_t *handlep)
1595 {
1596 	dircache_t *dcp;
1597 	dcentry_t **prevpp, *te;
1598 	uint_t capacity;
1599 	int hash;
1600 	int ret;
1601 	uchar_t namlen;
1602 
1603 	if (!dnlc_dir_enable) {
1604 		return (DNOCACHE);
1605 	}
1606 
1607 	mutex_enter(&dcap->dca_lock);
1608 	dcp = (dircache_t *)dcap->dca_dircache;
1609 	if (VALID_DIR_CACHE(dcp)) {
1610 		dcp->dc_actime = ddi_get_lbolt64();
1611 		if (dcp->dc_nhash_mask > 0) { /* ie not minimum */
1612 			capacity = (dcp->dc_nhash_mask + 1) <<
1613 			    dnlc_dir_hash_size_shift;
1614 			if (dcp->dc_num_entries <=
1615 			    (capacity >> dnlc_dir_hash_resize_shift)) {
1616 				dnlc_dir_adjust_nhash(dcp);
1617 			}
1618 		}
1619 		DNLC_DIR_HASH(name, hash, namlen);
1620 		prevpp = &dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1621 		while (*prevpp != NULL) {
1622 			if (((*prevpp)->de_hash == hash) &&
1623 			    (namlen == (*prevpp)->de_namelen) &&
1624 			    bcmp((*prevpp)->de_name, name, namlen) == 0) {
1625 				if (handlep != NULL) {
1626 					*handlep = (*prevpp)->de_handle;
1627 				}
1628 				te = *prevpp;
1629 				*prevpp = (*prevpp)->de_next;
1630 				kmem_free(te, sizeof (dcentry_t) - 1 +
1631 				    te->de_namelen);
1632 
1633 				/*
1634 				 * If the total number of entries
1635 				 * falls below half the minimum number
1636 				 * of entries then free this cache.
1637 				 */
1638 				if (--dcp->dc_num_entries <
1639 				    (dnlc_dir_min_size >> 1)) {
1640 					mutex_exit(&dcap->dca_lock);
1641 					dnlc_dir_purge(dcap);
1642 				} else {
1643 					mutex_exit(&dcap->dca_lock);
1644 				}
1645 				ncs.ncs_dir_num_ents.value.ui64--;
1646 				return (DFOUND);
1647 			}
1648 			prevpp = &((*prevpp)->de_next);
1649 		}
1650 		if (dcp->dc_complete) {
1651 			ncs.ncs_dir_reme_fai.value.ui64++;
1652 			ret = DNOENT;
1653 		} else {
1654 			ret = DNOCACHE;
1655 		}
1656 		mutex_exit(&dcap->dca_lock);
1657 		return (ret);
1658 	} else {
1659 		mutex_exit(&dcap->dca_lock);
1660 		return (DNOCACHE);
1661 	}
1662 }
1663 
1664 
1665 /*
1666  * Remove free space of at least the given length from a complete
1667  * or partial directory cache.
1668  */
1669 dcret_t
1670 dnlc_dir_rem_space_by_len(dcanchor_t *dcap, uint_t len, uint64_t *handlep)
1671 {
1672 	dircache_t *dcp;
1673 	dcfree_t **prevpp, *tfp;
1674 	uint_t fhtsize; /* free hash table size */
1675 	uint_t i;
1676 	uint_t capacity;
1677 	int ret;
1678 
1679 	if (!dnlc_dir_enable) {
1680 		return (DNOCACHE);
1681 	}
1682 
1683 	mutex_enter(&dcap->dca_lock);
1684 	dcp = (dircache_t *)dcap->dca_dircache;
1685 	if (VALID_DIR_CACHE(dcp)) {
1686 		dcp->dc_actime = ddi_get_lbolt64();
1687 		if (dcp->dc_fhash_mask > 0) { /* ie not minimum */
1688 			capacity = (dcp->dc_fhash_mask + 1) <<
1689 			    dnlc_dir_hash_size_shift;
1690 			if (dcp->dc_num_free <=
1691 			    (capacity >> dnlc_dir_hash_resize_shift)) {
1692 				dnlc_dir_adjust_fhash(dcp);
1693 			}
1694 		}
1695 		/*
1696 		 * Search for an entry of the appropriate size
1697 		 * on a first fit basis.
1698 		 */
1699 		fhtsize = dcp->dc_fhash_mask + 1;
1700 		for (i = 0; i < fhtsize; i++) { /* for each hash bucket */
1701 			prevpp = &(dcp->dc_freehash[i]);
1702 			while (*prevpp != NULL) {
1703 				if ((*prevpp)->df_len >= len) {
1704 					*handlep = (*prevpp)->df_handle;
1705 					tfp = *prevpp;
1706 					*prevpp = (*prevpp)->df_next;
1707 					dcp->dc_num_free--;
1708 					mutex_exit(&dcap->dca_lock);
1709 					kmem_cache_free(dnlc_dir_space_cache,
1710 					    tfp);
1711 					ncs.ncs_dir_num_ents.value.ui64--;
1712 					return (DFOUND);
1713 				}
1714 				prevpp = &((*prevpp)->df_next);
1715 			}
1716 		}
1717 		if (dcp->dc_complete) {
1718 			ret = DNOENT;
1719 		} else {
1720 			ret = DNOCACHE;
1721 		}
1722 		mutex_exit(&dcap->dca_lock);
1723 		return (ret);
1724 	} else {
1725 		mutex_exit(&dcap->dca_lock);
1726 		return (DNOCACHE);
1727 	}
1728 }
1729 
1730 /*
1731  * Remove free space with the given handle from a complete or partial
1732  * directory cache.
1733  */
1734 dcret_t
1735 dnlc_dir_rem_space_by_handle(dcanchor_t *dcap, uint64_t handle)
1736 {
1737 	dircache_t *dcp;
1738 	dcfree_t **prevpp, *tfp;
1739 	uint_t capacity;
1740 	int ret;
1741 
1742 	if (!dnlc_dir_enable) {
1743 		return (DNOCACHE);
1744 	}
1745 
1746 	mutex_enter(&dcap->dca_lock);
1747 	dcp = (dircache_t *)dcap->dca_dircache;
1748 	if (VALID_DIR_CACHE(dcp)) {
1749 		dcp->dc_actime = ddi_get_lbolt64();
1750 		if (dcp->dc_fhash_mask > 0) { /* ie not minimum */
1751 			capacity = (dcp->dc_fhash_mask + 1) <<
1752 			    dnlc_dir_hash_size_shift;
1753 			if (dcp->dc_num_free <=
1754 			    (capacity >> dnlc_dir_hash_resize_shift)) {
1755 				dnlc_dir_adjust_fhash(dcp);
1756 			}
1757 		}
1758 
1759 		/*
1760 		 * search for the exact entry
1761 		 */
1762 		prevpp = &(dcp->dc_freehash[DDFHASH(handle, dcp)]);
1763 		while (*prevpp != NULL) {
1764 			if ((*prevpp)->df_handle == handle) {
1765 				tfp = *prevpp;
1766 				*prevpp = (*prevpp)->df_next;
1767 				dcp->dc_num_free--;
1768 				mutex_exit(&dcap->dca_lock);
1769 				kmem_cache_free(dnlc_dir_space_cache, tfp);
1770 				ncs.ncs_dir_num_ents.value.ui64--;
1771 				return (DFOUND);
1772 			}
1773 			prevpp = &((*prevpp)->df_next);
1774 		}
1775 		if (dcp->dc_complete) {
1776 			ncs.ncs_dir_rems_fai.value.ui64++;
1777 			ret = DNOENT;
1778 		} else {
1779 			ret = DNOCACHE;
1780 		}
1781 		mutex_exit(&dcap->dca_lock);
1782 		return (ret);
1783 	} else {
1784 		mutex_exit(&dcap->dca_lock);
1785 		return (DNOCACHE);
1786 	}
1787 }
1788 
1789 /*
1790  * Update the handle of an directory cache entry.
1791  */
1792 dcret_t
1793 dnlc_dir_update(dcanchor_t *dcap, const char *name, uint64_t handle)
1794 {
1795 	dircache_t *dcp;
1796 	dcentry_t *dep;
1797 	int hash;
1798 	int ret;
1799 	uchar_t namlen;
1800 
1801 	if (!dnlc_dir_enable) {
1802 		return (DNOCACHE);
1803 	}
1804 
1805 	mutex_enter(&dcap->dca_lock);
1806 	dcp = (dircache_t *)dcap->dca_dircache;
1807 	if (VALID_DIR_CACHE(dcp)) {
1808 		dcp->dc_actime = ddi_get_lbolt64();
1809 		DNLC_DIR_HASH(name, hash, namlen);
1810 		dep = dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1811 		while (dep != NULL) {
1812 			if ((dep->de_hash == hash) &&
1813 			    (namlen == dep->de_namelen) &&
1814 			    bcmp(dep->de_name, name, namlen) == 0) {
1815 				dep->de_handle = handle;
1816 				mutex_exit(&dcap->dca_lock);
1817 				return (DFOUND);
1818 			}
1819 			dep = dep->de_next;
1820 		}
1821 		if (dcp->dc_complete) {
1822 			ncs.ncs_dir_upd_fail.value.ui64++;
1823 			ret = DNOENT;
1824 		} else {
1825 			ret = DNOCACHE;
1826 		}
1827 		mutex_exit(&dcap->dca_lock);
1828 		return (ret);
1829 	} else {
1830 		mutex_exit(&dcap->dca_lock);
1831 		return (DNOCACHE);
1832 	}
1833 }
1834 
1835 void
1836 dnlc_dir_fini(dcanchor_t *dcap)
1837 {
1838 	dircache_t *dcp;
1839 
1840 	mutex_enter(&dc_head.dch_lock);
1841 	mutex_enter(&dcap->dca_lock);
1842 	dcp = (dircache_t *)dcap->dca_dircache;
1843 	if (VALID_DIR_CACHE(dcp)) {
1844 		/*
1845 		 * Unchain from global list
1846 		 */
1847 		ncs.ncs_dir_finipurg.value.ui64++;
1848 		dcp->dc_prev->dc_next = dcp->dc_next;
1849 		dcp->dc_next->dc_prev = dcp->dc_prev;
1850 	} else {
1851 		dcp = NULL;
1852 	}
1853 	dcap->dca_dircache = NULL;
1854 	mutex_exit(&dcap->dca_lock);
1855 	mutex_exit(&dc_head.dch_lock);
1856 	mutex_destroy(&dcap->dca_lock);
1857 	if (dcp) {
1858 		dnlc_dir_abort(dcp);
1859 	}
1860 }
1861 
1862 /*
1863  * Reclaim callback for dnlc directory caching.
1864  * Invoked by the kernel memory allocator when memory gets tight.
1865  * This is a pretty serious condition and can lead easily lead to system
1866  * hangs if not enough space is returned.
1867  *
1868  * Deciding which directory (or directories) to purge is tricky.
1869  * Purging everything is an overkill, but purging just the oldest used
1870  * was found to lead to hangs. The largest cached directories use the
1871  * most memory, but take the most effort to rebuild, whereas the smaller
1872  * ones have little value and give back little space. So what to do?
1873  *
1874  * The current policy is to continue purging the oldest used directories
1875  * until at least dnlc_dir_min_reclaim directory entries have been purged.
1876  */
1877 /*ARGSUSED*/
1878 static void
1879 dnlc_dir_reclaim(void *unused)
1880 {
1881 	dircache_t *dcp, *oldest;
1882 	uint_t dirent_cnt = 0;
1883 
1884 	mutex_enter(&dc_head.dch_lock);
1885 	while (dirent_cnt < dnlc_dir_min_reclaim) {
1886 		dcp = dc_head.dch_next;
1887 		oldest = NULL;
1888 		while (dcp != (dircache_t *)&dc_head) {
1889 			if (oldest == NULL) {
1890 				oldest = dcp;
1891 			} else {
1892 				if (dcp->dc_actime < oldest->dc_actime) {
1893 					oldest = dcp;
1894 				}
1895 			}
1896 			dcp = dcp->dc_next;
1897 		}
1898 		if (oldest == NULL) {
1899 			/* nothing to delete */
1900 			mutex_exit(&dc_head.dch_lock);
1901 			return;
1902 		}
1903 		/*
1904 		 * remove from directory chain and purge
1905 		 */
1906 		oldest->dc_prev->dc_next = oldest->dc_next;
1907 		oldest->dc_next->dc_prev = oldest->dc_prev;
1908 		mutex_enter(&oldest->dc_anchor->dca_lock);
1909 		/*
1910 		 * If this was the last entry then it must be too large.
1911 		 * Mark it as such by saving a special dircache_t
1912 		 * pointer (DC_RET_LOW_MEM) in the anchor. The error DNOMEM
1913 		 * will be presented to the caller of dnlc_dir_start()
1914 		 */
1915 		if (oldest->dc_next == oldest->dc_prev) {
1916 			oldest->dc_anchor->dca_dircache = DC_RET_LOW_MEM;
1917 			ncs.ncs_dir_rec_last.value.ui64++;
1918 		} else {
1919 			oldest->dc_anchor->dca_dircache = NULL;
1920 			ncs.ncs_dir_recl_any.value.ui64++;
1921 		}
1922 		mutex_exit(&oldest->dc_anchor->dca_lock);
1923 		dirent_cnt += oldest->dc_num_entries;
1924 		dnlc_dir_abort(oldest);
1925 	}
1926 	mutex_exit(&dc_head.dch_lock);
1927 }
1928 
1929 /*
1930  * Dynamically grow or shrink the size of the name hash table
1931  */
1932 static void
1933 dnlc_dir_adjust_nhash(dircache_t *dcp)
1934 {
1935 	dcentry_t **newhash, *dep, **nhp, *tep;
1936 	uint_t newsize;
1937 	uint_t oldsize;
1938 	uint_t newsizemask;
1939 	int i;
1940 
1941 	/*
1942 	 * Allocate new hash table
1943 	 */
1944 	newsize = dcp->dc_num_entries >> dnlc_dir_hash_size_shift;
1945 	newhash = kmem_zalloc(sizeof (dcentry_t *) * newsize, KM_NOSLEEP);
1946 	if (newhash == NULL) {
1947 		/*
1948 		 * System is short on memory just return
1949 		 * Note, the old hash table is still usable.
1950 		 * This return is unlikely to repeatedy occur, because
1951 		 * either some other directory caches will be reclaimed
1952 		 * due to memory shortage, thus freeing memory, or this
1953 		 * directory cahe will be reclaimed.
1954 		 */
1955 		return;
1956 	}
1957 	oldsize = dcp->dc_nhash_mask + 1;
1958 	dcp->dc_nhash_mask = newsizemask = newsize - 1;
1959 
1960 	/*
1961 	 * Move entries from the old table to the new
1962 	 */
1963 	for (i = 0; i < oldsize; i++) { /* for each hash bucket */
1964 		dep = dcp->dc_namehash[i];
1965 		while (dep != NULL) { /* for each chained entry */
1966 			tep = dep;
1967 			dep = dep->de_next;
1968 			nhp = &newhash[tep->de_hash & newsizemask];
1969 			tep->de_next = *nhp;
1970 			*nhp = tep;
1971 		}
1972 	}
1973 
1974 	/*
1975 	 * delete old hash table and set new one in place
1976 	 */
1977 	kmem_free(dcp->dc_namehash, sizeof (dcentry_t *) * oldsize);
1978 	dcp->dc_namehash = newhash;
1979 }
1980 
1981 /*
1982  * Dynamically grow or shrink the size of the free space hash table
1983  */
1984 static void
1985 dnlc_dir_adjust_fhash(dircache_t *dcp)
1986 {
1987 	dcfree_t **newhash, *dfp, **nhp, *tfp;
1988 	uint_t newsize;
1989 	uint_t oldsize;
1990 	int i;
1991 
1992 	/*
1993 	 * Allocate new hash table
1994 	 */
1995 	newsize = dcp->dc_num_free >> dnlc_dir_hash_size_shift;
1996 	newhash = kmem_zalloc(sizeof (dcfree_t *) * newsize, KM_NOSLEEP);
1997 	if (newhash == NULL) {
1998 		/*
1999 		 * System is short on memory just return
2000 		 * Note, the old hash table is still usable.
2001 		 * This return is unlikely to repeatedy occur, because
2002 		 * either some other directory caches will be reclaimed
2003 		 * due to memory shortage, thus freeing memory, or this
2004 		 * directory cahe will be reclaimed.
2005 		 */
2006 		return;
2007 	}
2008 	oldsize = dcp->dc_fhash_mask + 1;
2009 	dcp->dc_fhash_mask = newsize - 1;
2010 
2011 	/*
2012 	 * Move entries from the old table to the new
2013 	 */
2014 	for (i = 0; i < oldsize; i++) { /* for each hash bucket */
2015 		dfp = dcp->dc_freehash[i];
2016 		while (dfp != NULL) { /* for each chained entry */
2017 			tfp = dfp;
2018 			dfp = dfp->df_next;
2019 			nhp = &newhash[DDFHASH(tfp->df_handle, dcp)];
2020 			tfp->df_next = *nhp;
2021 			*nhp = tfp;
2022 		}
2023 	}
2024 
2025 	/*
2026 	 * delete old hash table and set new one in place
2027 	 */
2028 	kmem_free(dcp->dc_freehash, sizeof (dcfree_t *) * oldsize);
2029 	dcp->dc_freehash = newhash;
2030 }
2031