xref: /titanic_44/usr/src/uts/common/fs/dnlc.c (revision cb8a054b1ab30d5caa746e6c44f29d4c9d3071c1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 /*
30  * University Copyright- Copyright (c) 1982, 1986, 1988
31  * The Regents of the University of California
32  * All Rights Reserved
33  *
34  * University Acknowledgment- Portions of this document are derived from
35  * software developed by the University of California, Berkeley, and its
36  * contributors.
37  */
38 
39 #include <sys/types.h>
40 #include <sys/systm.h>
41 #include <sys/param.h>
42 #include <sys/t_lock.h>
43 #include <sys/systm.h>
44 #include <sys/vfs.h>
45 #include <sys/vnode.h>
46 #include <sys/dnlc.h>
47 #include <sys/kmem.h>
48 #include <sys/cmn_err.h>
49 #include <sys/vtrace.h>
50 #include <sys/bitmap.h>
51 #include <sys/var.h>
52 #include <sys/sysmacros.h>
53 #include <sys/kstat.h>
54 #include <sys/atomic.h>
55 #include <sys/taskq.h>
56 
57 /*
58  * Directory name lookup cache.
59  * Based on code originally done by Robert Elz at Melbourne.
60  *
61  * Names found by directory scans are retained in a cache
62  * for future reference.  Each hash chain is ordered by LRU
63  * Cache is indexed by hash value obtained from (vp, name)
64  * where the vp refers to the directory containing the name.
65  */
66 
67 /*
68  * We want to be able to identify files that are referenced only by the DNLC.
69  * When adding a reference from the DNLC, call VN_HOLD_DNLC instead of VN_HOLD,
70  * since multiple DNLC references should only be counted once in v_count. This
71  * file contains only two(2) calls to VN_HOLD, renamed VN_HOLD_CALLER in the
72  * hope that no one will mistakenly add a VN_HOLD to this file. (Unfortunately
73  * it is not possible to #undef VN_HOLD and retain VN_HOLD_CALLER. Ideally a
74  * Makefile rule would grep uncommented C tokens to check that VN_HOLD is
75  * referenced only once in this file, to define VN_HOLD_CALLER.)
76  */
77 #define	VN_HOLD_CALLER	VN_HOLD
78 #define	VN_HOLD_DNLC(vp)	{	\
79 	mutex_enter(&(vp)->v_lock);	\
80 	if ((vp)->v_count_dnlc == 0)	\
81 		(vp)->v_count++;	\
82 	(vp)->v_count_dnlc++;		\
83 	mutex_exit(&(vp)->v_lock);	\
84 }
85 #define	VN_RELE_DNLC(vp)	{	\
86 	vn_rele_dnlc(vp);		\
87 }
88 
89 /*
90  * Tunable nc_hashavelen is the average length desired for this chain, from
91  * which the size of the nc_hash table is derived at create time.
92  */
93 #define	NC_HASHAVELEN_DEFAULT	4
94 int nc_hashavelen = NC_HASHAVELEN_DEFAULT;
95 
96 /*
97  * NC_MOVETOFRONT is the move-to-front threshold: if the hash lookup
98  * depth exceeds this value, we move the looked-up entry to the front of
99  * its hash chain.  The idea is to make sure that the most frequently
100  * accessed entries are found most quickly (by keeping them near the
101  * front of their hash chains).
102  */
103 #define	NC_MOVETOFRONT	2
104 
105 /*
106  *
107  * DNLC_MAX_RELE is used to size an array on the stack when releasing
108  * vnodes. This array is used rather than calling VN_RELE() inline because
109  * all dnlc locks must be dropped by that time in order to avoid a
110  * possible deadlock. This deadlock occurs when the dnlc holds the last
111  * reference to the vnode and so the VOP_INACTIVE vector is called which
112  * can in turn call back into the dnlc. A global array was used but had
113  * many problems:
114  *	1) Actually doesn't have an upper bound on the array size as
115  *	   entries can be added after starting the purge.
116  *	2) The locking scheme causes a hang.
117  *	3) Caused serialisation on the global lock.
118  *	4) The array was often unnecessarily huge.
119  *
120  * Note the current value 8 allows up to 4 cache entries (to be purged
121  * from each hash chain), before having to cycle around and retry.
122  * This ought to be ample given that nc_hashavelen is typically very small.
123  */
124 #define	DNLC_MAX_RELE	8 /* must be even */
125 
126 /*
127  * Hash table of name cache entries for fast lookup, dynamically
128  * allocated at startup.
129  */
130 nc_hash_t *nc_hash;
131 
132 /*
133  * Rotors. Used to select entries on a round-robin basis.
134  */
135 static nc_hash_t *dnlc_purge_fs1_rotor;
136 static nc_hash_t *dnlc_free_rotor;
137 
138 /*
139  * # of dnlc entries (uninitialized)
140  *
141  * the initial value was chosen as being
142  * a random string of bits, probably not
143  * normally chosen by a systems administrator
144  */
145 int ncsize = -1;
146 volatile uint32_t dnlc_nentries = 0;	/* current num of name cache entries */
147 static int nc_hashsz;			/* size of hash table */
148 static int nc_hashmask;			/* size of hash table minus 1 */
149 
150 /*
151  * The dnlc_reduce_cache() taskq queue is activated when there are
152  * ncsize name cache entries and if no parameter is provided, it reduces
153  * the size down to dnlc_nentries_low_water, which is by default one
154  * hundreth less (or 99%) of ncsize.
155  *
156  * If a parameter is provided to dnlc_reduce_cache(), then we reduce
157  * the size down based on ncsize_onepercent - where ncsize_onepercent
158  * is 1% of ncsize; however, we never let dnlc_reduce_cache() reduce
159  * the size below 3% of ncsize (ncsize_min_percent).
160  */
161 #define	DNLC_LOW_WATER_DIVISOR_DEFAULT 100
162 uint_t dnlc_low_water_divisor = DNLC_LOW_WATER_DIVISOR_DEFAULT;
163 uint_t dnlc_nentries_low_water;
164 int dnlc_reduce_idle = 1; /* no locking needed */
165 uint_t ncsize_onepercent;
166 uint_t ncsize_min_percent;
167 
168 /*
169  * If dnlc_nentries hits dnlc_max_nentries (twice ncsize)
170  * then this means the dnlc_reduce_cache() taskq is failing to
171  * keep up. In this case we refuse to add new entries to the dnlc
172  * until the taskq catches up.
173  */
174 uint_t dnlc_max_nentries; /* twice ncsize */
175 uint64_t dnlc_max_nentries_cnt = 0; /* statistic on times we failed */
176 
177 /*
178  * Tunable to define when we should just remove items from
179  * the end of the chain.
180  */
181 #define	DNLC_LONG_CHAIN 8
182 uint_t dnlc_long_chain = DNLC_LONG_CHAIN;
183 
184 /*
185  * ncstats has been deprecated, due to the integer size of the counters
186  * which can easily overflow in the dnlc.
187  * It is maintained (at some expense) for compatability.
188  * The preferred interface is the kstat accessible nc_stats below.
189  */
190 struct ncstats ncstats;
191 
192 struct nc_stats ncs = {
193 	{ "hits",			KSTAT_DATA_UINT64 },
194 	{ "misses",			KSTAT_DATA_UINT64 },
195 	{ "negative_cache_hits",	KSTAT_DATA_UINT64 },
196 	{ "enters",			KSTAT_DATA_UINT64 },
197 	{ "double_enters",		KSTAT_DATA_UINT64 },
198 	{ "purge_total_entries",	KSTAT_DATA_UINT64 },
199 	{ "purge_all",			KSTAT_DATA_UINT64 },
200 	{ "purge_vp",			KSTAT_DATA_UINT64 },
201 	{ "purge_vfs",			KSTAT_DATA_UINT64 },
202 	{ "purge_fs1",			KSTAT_DATA_UINT64 },
203 	{ "pick_free",			KSTAT_DATA_UINT64 },
204 	{ "pick_heuristic",		KSTAT_DATA_UINT64 },
205 	{ "pick_last",			KSTAT_DATA_UINT64 },
206 
207 	/* directory caching stats */
208 
209 	{ "dir_hits",			KSTAT_DATA_UINT64 },
210 	{ "dir_misses",			KSTAT_DATA_UINT64 },
211 	{ "dir_cached_current",		KSTAT_DATA_UINT64 },
212 	{ "dir_entries_cached_current",	KSTAT_DATA_UINT64 },
213 	{ "dir_cached_total",		KSTAT_DATA_UINT64 },
214 	{ "dir_start_no_memory",	KSTAT_DATA_UINT64 },
215 	{ "dir_add_no_memory",		KSTAT_DATA_UINT64 },
216 	{ "dir_add_abort",		KSTAT_DATA_UINT64 },
217 	{ "dir_add_max",		KSTAT_DATA_UINT64 },
218 	{ "dir_remove_entry_fail",	KSTAT_DATA_UINT64 },
219 	{ "dir_remove_space_fail",	KSTAT_DATA_UINT64 },
220 	{ "dir_update_fail",		KSTAT_DATA_UINT64 },
221 	{ "dir_fini_purge",		KSTAT_DATA_UINT64 },
222 	{ "dir_reclaim_last",		KSTAT_DATA_UINT64 },
223 	{ "dir_reclaim_any",		KSTAT_DATA_UINT64 },
224 };
225 
226 static int doingcache = 1;
227 
228 vnode_t negative_cache_vnode;
229 
230 /*
231  * Insert entry at the front of the queue
232  */
233 #define	nc_inshash(ncp, hp) \
234 { \
235 	(ncp)->hash_next = (hp)->hash_next; \
236 	(ncp)->hash_prev = (ncache_t *)(hp); \
237 	(hp)->hash_next->hash_prev = (ncp); \
238 	(hp)->hash_next = (ncp); \
239 }
240 
241 /*
242  * Remove entry from hash queue
243  */
244 #define	nc_rmhash(ncp) \
245 { \
246 	(ncp)->hash_prev->hash_next = (ncp)->hash_next; \
247 	(ncp)->hash_next->hash_prev = (ncp)->hash_prev; \
248 	(ncp)->hash_prev = NULL; \
249 	(ncp)->hash_next = NULL; \
250 }
251 
252 /*
253  * Free an entry.
254  */
255 #define	dnlc_free(ncp) \
256 { \
257 	kmem_free((ncp), sizeof (ncache_t) + (ncp)->namlen); \
258 	atomic_add_32(&dnlc_nentries, -1); \
259 }
260 
261 
262 /*
263  * Cached directory info.
264  * ======================
265  */
266 
267 /*
268  * Cached directory free space hash function.
269  * Needs the free space handle and the dcp to get the hash table size
270  * Returns the hash index.
271  */
272 #define	DDFHASH(handle, dcp) ((handle >> 2) & (dcp)->dc_fhash_mask)
273 
274 /*
275  * Cached directory name entry hash function.
276  * Uses the name and returns in the input arguments the hash and the name
277  * length.
278  */
279 #define	DNLC_DIR_HASH(name, hash, namelen)			\
280 	{							\
281 		char Xc, *Xcp;					\
282 		hash = *name;					\
283 		for (Xcp = (name + 1); (Xc = *Xcp) != 0; Xcp++)	\
284 			hash = (hash << 4) + hash + Xc;		\
285 		ASSERT((Xcp - (name)) <= ((1 << NBBY) - 1));	\
286 		namelen = Xcp - (name);				\
287 	}
288 
289 /* special dircache_t pointer to indicate error should be returned */
290 /*
291  * The anchor directory cache pointer can contain 3 types of values,
292  * 1) NULL: No directory cache
293  * 2) DC_RET_LOW_MEM (-1): There was a directory cache that found to be
294  *    too big or a memory shortage occurred. This value remains in the
295  *    pointer until a dnlc_dir_start() which returns the a DNOMEM error.
296  *    This is kludgy but efficient and only visible in this source file.
297  * 3) A valid cache pointer.
298  */
299 #define	DC_RET_LOW_MEM (dircache_t *)1
300 #define	VALID_DIR_CACHE(dcp) ((dircache_t *)(dcp) > DC_RET_LOW_MEM)
301 
302 /* Tunables */
303 uint_t dnlc_dir_enable = 1; /* disable caching directories by setting to 0 */
304 uint_t dnlc_dir_min_size = 40; /* min no of directory entries before caching */
305 uint_t dnlc_dir_max_size = UINT_MAX; /* ditto maximum */
306 uint_t dnlc_dir_hash_size_shift = 3; /* 8 entries per hash bucket */
307 uint_t dnlc_dir_min_reclaim =  350000; /* approx 1MB of dcentrys */
308 /*
309  * dnlc_dir_hash_resize_shift determines when the hash tables
310  * get re-adjusted due to growth or shrinkage
311  * - currently 2 indicating that there can be at most 4
312  * times or at least one quarter the number of entries
313  * before hash table readjustment. Note that with
314  * dnlc_dir_hash_size_shift above set at 3 this would
315  * mean readjustment would occur if the average number
316  * of entries went above 32 or below 2
317  */
318 uint_t dnlc_dir_hash_resize_shift = 2; /* readjust rate */
319 
320 static kmem_cache_t *dnlc_dir_space_cache; /* free space entry cache */
321 static dchead_t dc_head; /* anchor of cached directories */
322 
323 /* Prototypes */
324 static ncache_t *dnlc_get(uchar_t namlen);
325 static ncache_t *dnlc_search(vnode_t *dp, char *name, uchar_t namlen, int hash);
326 static void dnlc_dir_reclaim(void *unused);
327 static void dnlc_dir_abort(dircache_t *dcp);
328 static void dnlc_dir_adjust_fhash(dircache_t *dcp);
329 static void dnlc_dir_adjust_nhash(dircache_t *dcp);
330 static void do_dnlc_reduce_cache(void *);
331 
332 
333 /*
334  * Initialize the directory cache.
335  */
336 void
337 dnlc_init()
338 {
339 	nc_hash_t *hp;
340 	kstat_t *ksp;
341 	int i;
342 
343 	/*
344 	 * Set up the size of the dnlc (ncsize) and its low water mark.
345 	 */
346 	if (ncsize == -1) {
347 		/* calculate a reasonable size for the low water */
348 		dnlc_nentries_low_water = 4 * (v.v_proc + maxusers) + 320;
349 		ncsize = dnlc_nentries_low_water +
350 		    (dnlc_nentries_low_water / dnlc_low_water_divisor);
351 	} else {
352 		/* don't change the user specified ncsize */
353 		dnlc_nentries_low_water =
354 		    ncsize - (ncsize / dnlc_low_water_divisor);
355 	}
356 	if (ncsize <= 0) {
357 		doingcache = 0;
358 		dnlc_dir_enable = 0; /* also disable directory caching */
359 		ncsize = 0;
360 		cmn_err(CE_NOTE, "name cache (dnlc) disabled");
361 		return;
362 	}
363 	dnlc_max_nentries = ncsize * 2;
364 	ncsize_onepercent = ncsize / 100;
365 	ncsize_min_percent = ncsize_onepercent * 3;
366 
367 	/*
368 	 * Initialise the hash table.
369 	 * Compute hash size rounding to the next power of two.
370 	 */
371 	nc_hashsz = ncsize / nc_hashavelen;
372 	nc_hashsz = 1 << highbit(nc_hashsz);
373 	nc_hashmask = nc_hashsz - 1;
374 	nc_hash = kmem_zalloc(nc_hashsz * sizeof (*nc_hash), KM_SLEEP);
375 	for (i = 0; i < nc_hashsz; i++) {
376 		hp = (nc_hash_t *)&nc_hash[i];
377 		mutex_init(&hp->hash_lock, NULL, MUTEX_DEFAULT, NULL);
378 		hp->hash_next = (ncache_t *)hp;
379 		hp->hash_prev = (ncache_t *)hp;
380 	}
381 
382 	/*
383 	 * Initialize rotors
384 	 */
385 	dnlc_free_rotor = dnlc_purge_fs1_rotor = &nc_hash[0];
386 
387 	/*
388 	 * Set up the directory caching to use kmem_cache_alloc
389 	 * for its free space entries so that we can get a callback
390 	 * when the system is short on memory, to allow us to free
391 	 * up some memory. we don't use the constructor/deconstructor
392 	 * functions.
393 	 */
394 	dnlc_dir_space_cache = kmem_cache_create("dnlc_space_cache",
395 	    sizeof (dcfree_t), 0, NULL, NULL, dnlc_dir_reclaim, NULL,
396 	    NULL, 0);
397 
398 	/*
399 	 * Initialise the head of the cached directory structures
400 	 */
401 	mutex_init(&dc_head.dch_lock, NULL, MUTEX_DEFAULT, NULL);
402 	dc_head.dch_next = (dircache_t *)&dc_head;
403 	dc_head.dch_prev = (dircache_t *)&dc_head;
404 
405 	/*
406 	 * Initialise the reference count of the negative cache vnode to 1
407 	 * so that it never goes away (VOP_INACTIVE isn't called on it).
408 	 */
409 	negative_cache_vnode.v_count = 1;
410 	negative_cache_vnode.v_count_dnlc = 0;
411 
412 	/*
413 	 * Initialise kstats - both the old compatability raw kind and
414 	 * the more extensive named stats.
415 	 */
416 	ksp = kstat_create("unix", 0, "ncstats", "misc", KSTAT_TYPE_RAW,
417 	    sizeof (struct ncstats), KSTAT_FLAG_VIRTUAL);
418 	if (ksp) {
419 		ksp->ks_data = (void *) &ncstats;
420 		kstat_install(ksp);
421 	}
422 	ksp = kstat_create("unix", 0, "dnlcstats", "misc", KSTAT_TYPE_NAMED,
423 	    sizeof (ncs) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
424 	if (ksp) {
425 		ksp->ks_data = (void *) &ncs;
426 		kstat_install(ksp);
427 	}
428 }
429 
430 /*
431  * Add a name to the directory cache.
432  */
433 void
434 dnlc_enter(vnode_t *dp, char *name, vnode_t *vp)
435 {
436 	ncache_t *ncp;
437 	nc_hash_t *hp;
438 	uchar_t namlen;
439 	int hash;
440 
441 	TRACE_0(TR_FAC_NFS, TR_DNLC_ENTER_START, "dnlc_enter_start:");
442 
443 	if (!doingcache) {
444 		TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
445 		    "dnlc_enter_end:(%S) %d", "not caching", 0);
446 		return;
447 	}
448 
449 	/*
450 	 * Get a new dnlc entry. Assume the entry won't be in the cache
451 	 * and initialize it now
452 	 */
453 	DNLCHASH(name, dp, hash, namlen);
454 	if ((ncp = dnlc_get(namlen)) == NULL)
455 		return;
456 	ncp->dp = dp;
457 	VN_HOLD_DNLC(dp);
458 	ncp->vp = vp;
459 	VN_HOLD_DNLC(vp);
460 	bcopy(name, ncp->name, namlen + 1); /* name and null */
461 	ncp->hash = hash;
462 	hp = &nc_hash[hash & nc_hashmask];
463 
464 	mutex_enter(&hp->hash_lock);
465 	if (dnlc_search(dp, name, namlen, hash) != NULL) {
466 		mutex_exit(&hp->hash_lock);
467 		ncstats.dbl_enters++;
468 		ncs.ncs_dbl_enters.value.ui64++;
469 		VN_RELE_DNLC(dp);
470 		VN_RELE_DNLC(vp);
471 		dnlc_free(ncp);		/* crfree done here */
472 		TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
473 		    "dnlc_enter_end:(%S) %d", "dbl enter", ncstats.dbl_enters);
474 		return;
475 	}
476 	/*
477 	 * Insert back into the hash chain.
478 	 */
479 	nc_inshash(ncp, hp);
480 	mutex_exit(&hp->hash_lock);
481 	ncstats.enters++;
482 	ncs.ncs_enters.value.ui64++;
483 	TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
484 	    "dnlc_enter_end:(%S) %d", "done", ncstats.enters);
485 }
486 
487 /*
488  * Add a name to the directory cache.
489  *
490  * This function is basically identical with
491  * dnlc_enter().  The difference is that when the
492  * desired dnlc entry is found, the vnode in the
493  * ncache is compared with the vnode passed in.
494  *
495  * If they are not equal then the ncache is
496  * updated with the passed in vnode.  Otherwise
497  * it just frees up the newly allocated dnlc entry.
498  */
499 void
500 dnlc_update(vnode_t *dp, char *name, vnode_t *vp)
501 {
502 	ncache_t *ncp;
503 	ncache_t *tcp;
504 	vnode_t *tvp;
505 	nc_hash_t *hp;
506 	int hash;
507 	uchar_t namlen;
508 
509 	TRACE_0(TR_FAC_NFS, TR_DNLC_ENTER_START, "dnlc_update_start:");
510 
511 	if (!doingcache) {
512 		TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
513 		    "dnlc_update_end:(%S) %d", "not caching", 0);
514 		return;
515 	}
516 
517 	/*
518 	 * Get a new dnlc entry and initialize it now.
519 	 * If we fail to get a new entry, call dnlc_remove() to purge
520 	 * any existing dnlc entry including negative cache (DNLC_NO_VNODE)
521 	 * entry.
522 	 * Failure to clear an existing entry could result in false dnlc
523 	 * lookup (negative/stale entry).
524 	 */
525 	DNLCHASH(name, dp, hash, namlen);
526 	if ((ncp = dnlc_get(namlen)) == NULL) {
527 		dnlc_remove(dp, name);
528 		return;
529 	}
530 	ncp->dp = dp;
531 	VN_HOLD_DNLC(dp);
532 	ncp->vp = vp;
533 	VN_HOLD_DNLC(vp);
534 	bcopy(name, ncp->name, namlen + 1); /* name and null */
535 	ncp->hash = hash;
536 	hp = &nc_hash[hash & nc_hashmask];
537 
538 	mutex_enter(&hp->hash_lock);
539 	if ((tcp = dnlc_search(dp, name, namlen, hash)) != NULL) {
540 		if (tcp->vp != vp) {
541 			tvp = tcp->vp;
542 			tcp->vp = vp;
543 			mutex_exit(&hp->hash_lock);
544 			VN_RELE_DNLC(tvp);
545 			ncstats.enters++;
546 			ncs.ncs_enters.value.ui64++;
547 			TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
548 			    "dnlc_update_end:(%S) %d", "done", ncstats.enters);
549 		} else {
550 			mutex_exit(&hp->hash_lock);
551 			VN_RELE_DNLC(vp);
552 			ncstats.dbl_enters++;
553 			ncs.ncs_dbl_enters.value.ui64++;
554 			TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
555 			    "dnlc_update_end:(%S) %d",
556 			    "dbl enter", ncstats.dbl_enters);
557 		}
558 		VN_RELE_DNLC(dp);
559 		dnlc_free(ncp);		/* crfree done here */
560 		return;
561 	}
562 	/*
563 	 * insert the new entry, since it is not in dnlc yet
564 	 */
565 	nc_inshash(ncp, hp);
566 	mutex_exit(&hp->hash_lock);
567 	ncstats.enters++;
568 	ncs.ncs_enters.value.ui64++;
569 	TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
570 	    "dnlc_update_end:(%S) %d", "done", ncstats.enters);
571 }
572 
573 /*
574  * Look up a name in the directory name cache.
575  *
576  * Return a doubly-held vnode if found: one hold so that it may
577  * remain in the cache for other users, the other hold so that
578  * the cache is not re-cycled and the identity of the vnode is
579  * lost before the caller can use the vnode.
580  */
581 vnode_t *
582 dnlc_lookup(vnode_t *dp, char *name)
583 {
584 	ncache_t *ncp;
585 	nc_hash_t *hp;
586 	vnode_t *vp;
587 	int hash, depth;
588 	uchar_t namlen;
589 
590 	TRACE_2(TR_FAC_NFS, TR_DNLC_LOOKUP_START,
591 	    "dnlc_lookup_start:dp %x name %s", dp, name);
592 
593 	if (!doingcache) {
594 		TRACE_4(TR_FAC_NFS, TR_DNLC_LOOKUP_END,
595 		    "dnlc_lookup_end:%S %d vp %x name %s",
596 		    "not_caching", 0, NULL, name);
597 		return (NULL);
598 	}
599 
600 	DNLCHASH(name, dp, hash, namlen);
601 	depth = 1;
602 	hp = &nc_hash[hash & nc_hashmask];
603 	mutex_enter(&hp->hash_lock);
604 
605 	for (ncp = hp->hash_next; ncp != (ncache_t *)hp;
606 	    ncp = ncp->hash_next) {
607 		if (ncp->hash == hash &&	/* fast signature check */
608 		    ncp->dp == dp &&
609 		    ncp->namlen == namlen &&
610 		    bcmp(ncp->name, name, namlen) == 0) {
611 			/*
612 			 * Move this entry to the head of its hash chain
613 			 * if it's not already close.
614 			 */
615 			if (depth > NC_MOVETOFRONT) {
616 				ncache_t *next = ncp->hash_next;
617 				ncache_t *prev = ncp->hash_prev;
618 
619 				prev->hash_next = next;
620 				next->hash_prev = prev;
621 				ncp->hash_next = next = hp->hash_next;
622 				ncp->hash_prev = (ncache_t *)hp;
623 				next->hash_prev = ncp;
624 				hp->hash_next = ncp;
625 
626 				ncstats.move_to_front++;
627 			}
628 
629 			/*
630 			 * Put a hold on the vnode now so its identity
631 			 * can't change before the caller has a chance to
632 			 * put a hold on it.
633 			 */
634 			vp = ncp->vp;
635 			VN_HOLD_CALLER(vp); /* VN_HOLD 1 of 2 in this file */
636 			mutex_exit(&hp->hash_lock);
637 			ncstats.hits++;
638 			ncs.ncs_hits.value.ui64++;
639 			if (vp == DNLC_NO_VNODE) {
640 				ncs.ncs_neg_hits.value.ui64++;
641 			}
642 			TRACE_4(TR_FAC_NFS, TR_DNLC_LOOKUP_END,
643 			    "dnlc_lookup_end:%S %d vp %x name %s", "hit",
644 			    ncstats.hits, vp, name);
645 			return (vp);
646 		}
647 		depth++;
648 	}
649 
650 	mutex_exit(&hp->hash_lock);
651 	ncstats.misses++;
652 	ncs.ncs_misses.value.ui64++;
653 	TRACE_4(TR_FAC_NFS, TR_DNLC_LOOKUP_END,
654 	    "dnlc_lookup_end:%S %d vp %x name %s", "miss", ncstats.misses,
655 	    NULL, name);
656 	return (NULL);
657 }
658 
659 /*
660  * Remove an entry in the directory name cache.
661  */
662 void
663 dnlc_remove(vnode_t *dp, char *name)
664 {
665 	ncache_t *ncp;
666 	nc_hash_t *hp;
667 	uchar_t namlen;
668 	int hash;
669 
670 	if (!doingcache)
671 		return;
672 	DNLCHASH(name, dp, hash, namlen);
673 	hp = &nc_hash[hash & nc_hashmask];
674 
675 	mutex_enter(&hp->hash_lock);
676 	if (ncp = dnlc_search(dp, name, namlen, hash)) {
677 		/*
678 		 * Free up the entry
679 		 */
680 		nc_rmhash(ncp);
681 		mutex_exit(&hp->hash_lock);
682 		VN_RELE_DNLC(ncp->vp);
683 		VN_RELE_DNLC(ncp->dp);
684 		dnlc_free(ncp);
685 		return;
686 	}
687 	mutex_exit(&hp->hash_lock);
688 }
689 
690 /*
691  * Purge the entire cache.
692  */
693 void
694 dnlc_purge()
695 {
696 	nc_hash_t *nch;
697 	ncache_t *ncp;
698 	int index;
699 	int i;
700 	vnode_t *nc_rele[DNLC_MAX_RELE];
701 
702 	if (!doingcache)
703 		return;
704 
705 	ncstats.purges++;
706 	ncs.ncs_purge_all.value.ui64++;
707 
708 	for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
709 		index = 0;
710 		mutex_enter(&nch->hash_lock);
711 		ncp = nch->hash_next;
712 		while (ncp != (ncache_t *)nch) {
713 			ncache_t *np;
714 
715 			np = ncp->hash_next;
716 			nc_rele[index++] = ncp->vp;
717 			nc_rele[index++] = ncp->dp;
718 
719 			nc_rmhash(ncp);
720 			dnlc_free(ncp);
721 			ncp = np;
722 			ncs.ncs_purge_total.value.ui64++;
723 			if (index == DNLC_MAX_RELE)
724 				break;
725 		}
726 		mutex_exit(&nch->hash_lock);
727 
728 		/* Release holds on all the vnodes now that we have no locks */
729 		for (i = 0; i < index; i++) {
730 			VN_RELE_DNLC(nc_rele[i]);
731 		}
732 		if (ncp != (ncache_t *)nch) {
733 			nch--; /* Do current hash chain again */
734 		}
735 	}
736 }
737 
738 /*
739  * Purge any cache entries referencing a vnode. Exit as soon as the dnlc
740  * reference count goes to zero (the caller still holds a reference).
741  */
742 void
743 dnlc_purge_vp(vnode_t *vp)
744 {
745 	nc_hash_t *nch;
746 	ncache_t *ncp;
747 	int index;
748 	vnode_t *nc_rele[DNLC_MAX_RELE];
749 
750 	ASSERT(vp->v_count > 0);
751 	if (vp->v_count_dnlc == 0) {
752 		return;
753 	}
754 
755 	if (!doingcache)
756 		return;
757 
758 	ncstats.purges++;
759 	ncs.ncs_purge_vp.value.ui64++;
760 
761 	for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
762 		index = 0;
763 		mutex_enter(&nch->hash_lock);
764 		ncp = nch->hash_next;
765 		while (ncp != (ncache_t *)nch) {
766 			ncache_t *np;
767 
768 			np = ncp->hash_next;
769 			if (ncp->dp == vp || ncp->vp == vp) {
770 				nc_rele[index++] = ncp->vp;
771 				nc_rele[index++] = ncp->dp;
772 				nc_rmhash(ncp);
773 				dnlc_free(ncp);
774 				ncs.ncs_purge_total.value.ui64++;
775 				if (index == DNLC_MAX_RELE) {
776 					ncp = np;
777 					break;
778 				}
779 			}
780 			ncp = np;
781 		}
782 		mutex_exit(&nch->hash_lock);
783 
784 		/* Release holds on all the vnodes now that we have no locks */
785 		while (index) {
786 			VN_RELE_DNLC(nc_rele[--index]);
787 		}
788 
789 		if (vp->v_count_dnlc == 0) {
790 			return;
791 		}
792 
793 		if (ncp != (ncache_t *)nch) {
794 			nch--; /* Do current hash chain again */
795 		}
796 	}
797 }
798 
799 /*
800  * Purge cache entries referencing a vfsp.  Caller supplies a count
801  * of entries to purge; up to that many will be freed.  A count of
802  * zero indicates that all such entries should be purged.  Returns
803  * the number of entries that were purged.
804  */
805 int
806 dnlc_purge_vfsp(vfs_t *vfsp, int count)
807 {
808 	nc_hash_t *nch;
809 	ncache_t *ncp;
810 	int n = 0;
811 	int index;
812 	int i;
813 	vnode_t *nc_rele[DNLC_MAX_RELE];
814 
815 	if (!doingcache)
816 		return (0);
817 
818 	ncstats.purges++;
819 	ncs.ncs_purge_vfs.value.ui64++;
820 
821 	for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
822 		index = 0;
823 		mutex_enter(&nch->hash_lock);
824 		ncp = nch->hash_next;
825 		while (ncp != (ncache_t *)nch) {
826 			ncache_t *np;
827 
828 			np = ncp->hash_next;
829 			ASSERT(ncp->dp != NULL);
830 			ASSERT(ncp->vp != NULL);
831 			if ((ncp->dp->v_vfsp == vfsp) ||
832 			    (ncp->vp->v_vfsp == vfsp)) {
833 				n++;
834 				nc_rele[index++] = ncp->vp;
835 				nc_rele[index++] = ncp->dp;
836 				nc_rmhash(ncp);
837 				dnlc_free(ncp);
838 				ncs.ncs_purge_total.value.ui64++;
839 				if (index == DNLC_MAX_RELE) {
840 					ncp = np;
841 					break;
842 				}
843 				if (count != 0 && n >= count) {
844 					break;
845 				}
846 			}
847 			ncp = np;
848 		}
849 		mutex_exit(&nch->hash_lock);
850 		/* Release holds on all the vnodes now that we have no locks */
851 		for (i = 0; i < index; i++) {
852 			VN_RELE_DNLC(nc_rele[i]);
853 		}
854 		if (count != 0 && n >= count) {
855 			return (n);
856 		}
857 		if (ncp != (ncache_t *)nch) {
858 			nch--; /* Do current hash chain again */
859 		}
860 	}
861 	return (n);
862 }
863 
864 /*
865  * Purge 1 entry from the dnlc that is part of the filesystem(s)
866  * represented by 'vop'. The purpose of this routine is to allow
867  * users of the dnlc to free a vnode that is being held by the dnlc.
868  *
869  * If we find a vnode that we release which will result in
870  * freeing the underlying vnode (count was 1), return 1, 0
871  * if no appropriate vnodes found.
872  *
873  * Note, vop is not the 'right' identifier for a filesystem.
874  */
875 int
876 dnlc_fs_purge1(vnodeops_t *vop)
877 {
878 	nc_hash_t *end;
879 	nc_hash_t *hp;
880 	ncache_t *ncp;
881 	vnode_t *vp;
882 
883 	if (!doingcache)
884 		return (0);
885 
886 	ncs.ncs_purge_fs1.value.ui64++;
887 
888 	/*
889 	 * Scan the dnlc entries looking for a likely candidate.
890 	 */
891 	hp = end = dnlc_purge_fs1_rotor;
892 
893 	do {
894 		if (++hp == &nc_hash[nc_hashsz])
895 			hp = nc_hash;
896 		dnlc_purge_fs1_rotor = hp;
897 		if (hp->hash_next == (ncache_t *)hp)
898 			continue;
899 		mutex_enter(&hp->hash_lock);
900 		for (ncp = hp->hash_prev;
901 		    ncp != (ncache_t *)hp;
902 		    ncp = ncp->hash_prev) {
903 			vp = ncp->vp;
904 			if (!vn_has_cached_data(vp) && (vp->v_count == 1) &&
905 			    vn_matchops(vp, vop))
906 				break;
907 		}
908 		if (ncp != (ncache_t *)hp) {
909 			nc_rmhash(ncp);
910 			mutex_exit(&hp->hash_lock);
911 			VN_RELE_DNLC(ncp->dp);
912 			VN_RELE_DNLC(vp)
913 			dnlc_free(ncp);
914 			ncs.ncs_purge_total.value.ui64++;
915 			return (1);
916 		}
917 		mutex_exit(&hp->hash_lock);
918 	} while (hp != end);
919 	return (0);
920 }
921 
922 /*
923  * Perform a reverse lookup in the DNLC.  This will find the first occurrence of
924  * the vnode.  If successful, it will return the vnode of the parent, and the
925  * name of the entry in the given buffer.  If it cannot be found, or the buffer
926  * is too small, then it will return NULL.  Note that this is a highly
927  * inefficient function, since the DNLC is constructed solely for forward
928  * lookups.
929  */
930 vnode_t *
931 dnlc_reverse_lookup(vnode_t *vp, char *buf, size_t buflen)
932 {
933 	nc_hash_t *nch;
934 	ncache_t *ncp;
935 	vnode_t *pvp;
936 
937 	if (!doingcache)
938 		return (NULL);
939 
940 	for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
941 		mutex_enter(&nch->hash_lock);
942 		ncp = nch->hash_next;
943 		while (ncp != (ncache_t *)nch) {
944 			/*
945 			 * We ignore '..' entries since it can create
946 			 * confusion and infinite loops.
947 			 */
948 			if (ncp->vp == vp && !(ncp->namlen == 2 &&
949 			    0 == bcmp(ncp->name, "..", 2)) &&
950 			    ncp->namlen < buflen) {
951 				bcopy(ncp->name, buf, ncp->namlen);
952 				buf[ncp->namlen] = '\0';
953 				pvp = ncp->dp;
954 				/* VN_HOLD 2 of 2 in this file */
955 				VN_HOLD_CALLER(pvp);
956 				mutex_exit(&nch->hash_lock);
957 				return (pvp);
958 			}
959 			ncp = ncp->hash_next;
960 		}
961 		mutex_exit(&nch->hash_lock);
962 	}
963 
964 	return (NULL);
965 }
966 /*
967  * Utility routine to search for a cache entry. Return the
968  * ncache entry if found, NULL otherwise.
969  */
970 static ncache_t *
971 dnlc_search(vnode_t *dp, char *name, uchar_t namlen, int hash)
972 {
973 	nc_hash_t *hp;
974 	ncache_t *ncp;
975 
976 	hp = &nc_hash[hash & nc_hashmask];
977 
978 	for (ncp = hp->hash_next; ncp != (ncache_t *)hp; ncp = ncp->hash_next) {
979 		if (ncp->hash == hash &&
980 		    ncp->dp == dp &&
981 		    ncp->namlen == namlen &&
982 		    bcmp(ncp->name, name, namlen) == 0)
983 			return (ncp);
984 	}
985 	return (NULL);
986 }
987 
988 #if ((1 << NBBY) - 1) < (MAXNAMELEN - 1)
989 #error ncache_t name length representation is too small
990 #endif
991 
992 void
993 dnlc_reduce_cache(void *reduce_percent)
994 {
995 	if (dnlc_reduce_idle && (dnlc_nentries >= ncsize || reduce_percent)) {
996 		dnlc_reduce_idle = 0;
997 		if ((taskq_dispatch(system_taskq, do_dnlc_reduce_cache,
998 		    reduce_percent, TQ_NOSLEEP)) == NULL)
999 			dnlc_reduce_idle = 1;
1000 	}
1001 }
1002 
1003 /*
1004  * Get a new name cache entry.
1005  * If the dnlc_reduce_cache() taskq isn't keeping up with demand, or memory
1006  * is short then just return NULL. If we're over ncsize then kick off a
1007  * thread to free some in use entries down to dnlc_nentries_low_water.
1008  * Caller must initialise all fields except namlen.
1009  * Component names are defined to be less than MAXNAMELEN
1010  * which includes a null.
1011  */
1012 static ncache_t *
1013 dnlc_get(uchar_t namlen)
1014 {
1015 	ncache_t *ncp;
1016 
1017 	if (dnlc_nentries > dnlc_max_nentries) {
1018 		dnlc_max_nentries_cnt++; /* keep a statistic */
1019 		return (NULL);
1020 	}
1021 	ncp = kmem_alloc(sizeof (ncache_t) + namlen, KM_NOSLEEP);
1022 	if (ncp == NULL) {
1023 		return (NULL);
1024 	}
1025 	ncp->namlen = namlen;
1026 	atomic_add_32(&dnlc_nentries, 1);
1027 	dnlc_reduce_cache(NULL);
1028 	return (ncp);
1029 }
1030 
1031 /*
1032  * Taskq routine to free up name cache entries to reduce the
1033  * cache size to the low water mark if "reduce_percent" is not provided.
1034  * If "reduce_percent" is provided, reduce cache size by
1035  * (ncsize_onepercent * reduce_percent).
1036  */
1037 /*ARGSUSED*/
1038 static void
1039 do_dnlc_reduce_cache(void *reduce_percent)
1040 {
1041 	nc_hash_t *hp = dnlc_free_rotor, *start_hp = hp;
1042 	vnode_t *vp;
1043 	ncache_t *ncp;
1044 	int cnt;
1045 	uint_t low_water = dnlc_nentries_low_water;
1046 
1047 	if (reduce_percent) {
1048 		uint_t reduce_cnt;
1049 
1050 		/*
1051 		 * Never try to reduce the current number
1052 		 * of cache entries below 3% of ncsize.
1053 		 */
1054 		if (dnlc_nentries <= ncsize_min_percent) {
1055 			dnlc_reduce_idle = 1;
1056 			return;
1057 		}
1058 		reduce_cnt = ncsize_onepercent *
1059 		    (uint_t)(uintptr_t)reduce_percent;
1060 
1061 		if (reduce_cnt > dnlc_nentries ||
1062 		    dnlc_nentries - reduce_cnt < ncsize_min_percent)
1063 			low_water = ncsize_min_percent;
1064 		else
1065 			low_water = dnlc_nentries - reduce_cnt;
1066 	}
1067 
1068 	do {
1069 		/*
1070 		 * Find the first non empty hash queue without locking.
1071 		 * Only look at each hash queue once to avoid an infinite loop.
1072 		 */
1073 		do {
1074 			if (++hp == &nc_hash[nc_hashsz])
1075 				hp = nc_hash;
1076 		} while (hp->hash_next == (ncache_t *)hp && hp != start_hp);
1077 
1078 		/* return if all hash queues are empty. */
1079 		if (hp->hash_next == (ncache_t *)hp) {
1080 			dnlc_reduce_idle = 1;
1081 			return;
1082 		}
1083 
1084 		mutex_enter(&hp->hash_lock);
1085 		for (cnt = 0, ncp = hp->hash_prev; ncp != (ncache_t *)hp;
1086 		    ncp = ncp->hash_prev, cnt++) {
1087 			vp = ncp->vp;
1088 			/*
1089 			 * A name cache entry with a reference count
1090 			 * of one is only referenced by the dnlc.
1091 			 * Also negative cache entries are purged first.
1092 			 */
1093 			if (!vn_has_cached_data(vp) &&
1094 			    ((vp->v_count == 1) || (vp == DNLC_NO_VNODE))) {
1095 				ncs.ncs_pick_heur.value.ui64++;
1096 				goto found;
1097 			}
1098 			/*
1099 			 * Remove from the end of the chain if the
1100 			 * chain is too long
1101 			 */
1102 			if (cnt > dnlc_long_chain) {
1103 				ncp = hp->hash_prev;
1104 				ncs.ncs_pick_last.value.ui64++;
1105 				vp = ncp->vp;
1106 				goto found;
1107 			}
1108 		}
1109 		/* check for race and continue */
1110 		if (hp->hash_next == (ncache_t *)hp) {
1111 			mutex_exit(&hp->hash_lock);
1112 			continue;
1113 		}
1114 
1115 		ncp = hp->hash_prev; /* pick the last one in the hash queue */
1116 		ncs.ncs_pick_last.value.ui64++;
1117 		vp = ncp->vp;
1118 found:
1119 		/*
1120 		 * Remove from hash chain.
1121 		 */
1122 		nc_rmhash(ncp);
1123 		mutex_exit(&hp->hash_lock);
1124 		VN_RELE_DNLC(vp);
1125 		VN_RELE_DNLC(ncp->dp);
1126 		dnlc_free(ncp);
1127 	} while (dnlc_nentries > low_water);
1128 
1129 	dnlc_free_rotor = hp;
1130 	dnlc_reduce_idle = 1;
1131 }
1132 
1133 /*
1134  * Directory caching routines
1135  * ==========================
1136  *
1137  * See dnlc.h for details of the interfaces below.
1138  */
1139 
1140 /*
1141  * Lookup up an entry in a complete or partial directory cache.
1142  */
1143 dcret_t
1144 dnlc_dir_lookup(dcanchor_t *dcap, char *name, uint64_t *handle)
1145 {
1146 	dircache_t *dcp;
1147 	dcentry_t *dep;
1148 	int hash;
1149 	int ret;
1150 	uchar_t namlen;
1151 
1152 	/*
1153 	 * can test without lock as we are only a cache
1154 	 */
1155 	if (!VALID_DIR_CACHE(dcap->dca_dircache)) {
1156 		ncs.ncs_dir_misses.value.ui64++;
1157 		return (DNOCACHE);
1158 	}
1159 
1160 	if (!dnlc_dir_enable) {
1161 		return (DNOCACHE);
1162 	}
1163 
1164 	mutex_enter(&dcap->dca_lock);
1165 	dcp = (dircache_t *)dcap->dca_dircache;
1166 	if (VALID_DIR_CACHE(dcp)) {
1167 		dcp->dc_actime = ddi_get_lbolt64();
1168 		DNLC_DIR_HASH(name, hash, namlen);
1169 		dep = dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1170 		while (dep != NULL) {
1171 			if ((dep->de_hash == hash) &&
1172 			    (namlen == dep->de_namelen) &&
1173 			    bcmp(dep->de_name, name, namlen) == 0) {
1174 				*handle = dep->de_handle;
1175 				mutex_exit(&dcap->dca_lock);
1176 				ncs.ncs_dir_hits.value.ui64++;
1177 				return (DFOUND);
1178 			}
1179 			dep = dep->de_next;
1180 		}
1181 		if (dcp->dc_complete) {
1182 			ret = DNOENT;
1183 		} else {
1184 			ret = DNOCACHE;
1185 		}
1186 		mutex_exit(&dcap->dca_lock);
1187 		return (ret);
1188 	} else {
1189 		mutex_exit(&dcap->dca_lock);
1190 		ncs.ncs_dir_misses.value.ui64++;
1191 		return (DNOCACHE);
1192 	}
1193 }
1194 
1195 /*
1196  * Start a new directory cache. An estimate of the number of
1197  * entries is provided to as a quick check to ensure the directory
1198  * is cacheable.
1199  */
1200 dcret_t
1201 dnlc_dir_start(dcanchor_t *dcap, uint_t num_entries)
1202 {
1203 	dircache_t *dcp;
1204 
1205 	if (!dnlc_dir_enable ||
1206 	    (num_entries < dnlc_dir_min_size)) {
1207 		return (DNOCACHE);
1208 	}
1209 
1210 	if (num_entries > dnlc_dir_max_size) {
1211 		return (DTOOBIG);
1212 	}
1213 
1214 	mutex_enter(&dc_head.dch_lock);
1215 	mutex_enter(&dcap->dca_lock);
1216 
1217 	if (dcap->dca_dircache == DC_RET_LOW_MEM) {
1218 		dcap->dca_dircache = NULL;
1219 		mutex_exit(&dcap->dca_lock);
1220 		mutex_exit(&dc_head.dch_lock);
1221 		return (DNOMEM);
1222 	}
1223 
1224 	/*
1225 	 * Check if there's currently a cache.
1226 	 * This probably only occurs on a race.
1227 	 */
1228 	if (dcap->dca_dircache != NULL) {
1229 		mutex_exit(&dcap->dca_lock);
1230 		mutex_exit(&dc_head.dch_lock);
1231 		return (DNOCACHE);
1232 	}
1233 
1234 	/*
1235 	 * Allocate the dircache struct, entry and free space hash tables.
1236 	 * These tables are initially just one entry but dynamically resize
1237 	 * when entries and free space are added or removed.
1238 	 */
1239 	if ((dcp = kmem_zalloc(sizeof (dircache_t), KM_NOSLEEP)) == NULL) {
1240 		goto error;
1241 	}
1242 	if ((dcp->dc_namehash = kmem_zalloc(sizeof (dcentry_t *),
1243 	    KM_NOSLEEP)) == NULL) {
1244 		goto error;
1245 	}
1246 	if ((dcp->dc_freehash = kmem_zalloc(sizeof (dcfree_t *),
1247 	    KM_NOSLEEP)) == NULL) {
1248 		goto error;
1249 	}
1250 
1251 	dcp->dc_anchor = dcap; /* set back pointer to anchor */
1252 	dcap->dca_dircache = dcp;
1253 
1254 	/* add into head of global chain */
1255 	dcp->dc_next = dc_head.dch_next;
1256 	dcp->dc_prev = (dircache_t *)&dc_head;
1257 	dcp->dc_next->dc_prev = dcp;
1258 	dc_head.dch_next = dcp;
1259 
1260 	mutex_exit(&dcap->dca_lock);
1261 	mutex_exit(&dc_head.dch_lock);
1262 	ncs.ncs_cur_dirs.value.ui64++;
1263 	ncs.ncs_dirs_cached.value.ui64++;
1264 	return (DOK);
1265 error:
1266 	if (dcp != NULL) {
1267 		if (dcp->dc_namehash) {
1268 			kmem_free(dcp->dc_namehash, sizeof (dcentry_t *));
1269 		}
1270 		kmem_free(dcp, sizeof (dircache_t));
1271 	}
1272 	/*
1273 	 * Must also kmem_free dcp->dc_freehash if more error cases are added
1274 	 */
1275 	mutex_exit(&dcap->dca_lock);
1276 	mutex_exit(&dc_head.dch_lock);
1277 	ncs.ncs_dir_start_nm.value.ui64++;
1278 	return (DNOCACHE);
1279 }
1280 
1281 /*
1282  * Add a directopry entry to a partial or complete directory cache.
1283  */
1284 dcret_t
1285 dnlc_dir_add_entry(dcanchor_t *dcap, char *name, uint64_t handle)
1286 {
1287 	dircache_t *dcp;
1288 	dcentry_t **hp, *dep;
1289 	int hash;
1290 	uint_t capacity;
1291 	uchar_t namlen;
1292 
1293 	/*
1294 	 * Allocate the dcentry struct, including the variable
1295 	 * size name. Note, the null terminator is not copied.
1296 	 *
1297 	 * We do this outside the lock to avoid possible deadlock if
1298 	 * dnlc_dir_reclaim() is called as a result of memory shortage.
1299 	 */
1300 	DNLC_DIR_HASH(name, hash, namlen);
1301 	dep = kmem_alloc(sizeof (dcentry_t) - 1 + namlen, KM_NOSLEEP);
1302 	if (dep == NULL) {
1303 #ifdef DEBUG
1304 		/*
1305 		 * The kmem allocator generates random failures for
1306 		 * KM_NOSLEEP calls (see KMEM_RANDOM_ALLOCATION_FAILURE)
1307 		 * So try again before we blow away a perfectly good cache.
1308 		 * This is done not to cover an error but purely for
1309 		 * performance running a debug kernel.
1310 		 * This random error only occurs in debug mode.
1311 		 */
1312 		dep = kmem_alloc(sizeof (dcentry_t) - 1 + namlen, KM_NOSLEEP);
1313 		if (dep != NULL)
1314 			goto ok;
1315 #endif
1316 		ncs.ncs_dir_add_nm.value.ui64++;
1317 		/*
1318 		 * Free a directory cache. This may be the one we are
1319 		 * called with.
1320 		 */
1321 		dnlc_dir_reclaim(NULL);
1322 		dep = kmem_alloc(sizeof (dcentry_t) - 1 + namlen, KM_NOSLEEP);
1323 		if (dep == NULL) {
1324 			/*
1325 			 * still no memory, better delete this cache
1326 			 */
1327 			mutex_enter(&dcap->dca_lock);
1328 			dcp = (dircache_t *)dcap->dca_dircache;
1329 			if (VALID_DIR_CACHE(dcp)) {
1330 				dnlc_dir_abort(dcp);
1331 				dcap->dca_dircache = DC_RET_LOW_MEM;
1332 			}
1333 			mutex_exit(&dcap->dca_lock);
1334 			ncs.ncs_dir_addabort.value.ui64++;
1335 			return (DNOCACHE);
1336 		}
1337 		/*
1338 		 * fall through as if the 1st kmem_alloc had worked
1339 		 */
1340 	}
1341 #ifdef DEBUG
1342 ok:
1343 #endif
1344 	mutex_enter(&dcap->dca_lock);
1345 	dcp = (dircache_t *)dcap->dca_dircache;
1346 	if (VALID_DIR_CACHE(dcp)) {
1347 		/*
1348 		 * If the total number of entries goes above the max
1349 		 * then free this cache
1350 		 */
1351 		if ((dcp->dc_num_entries + dcp->dc_num_free) >
1352 		    dnlc_dir_max_size) {
1353 			mutex_exit(&dcap->dca_lock);
1354 			dnlc_dir_purge(dcap);
1355 			kmem_free(dep, sizeof (dcentry_t) - 1 + namlen);
1356 			ncs.ncs_dir_add_max.value.ui64++;
1357 			return (DTOOBIG);
1358 		}
1359 		dcp->dc_num_entries++;
1360 		capacity = (dcp->dc_nhash_mask + 1) << dnlc_dir_hash_size_shift;
1361 		if (dcp->dc_num_entries >=
1362 		    (capacity << dnlc_dir_hash_resize_shift)) {
1363 			dnlc_dir_adjust_nhash(dcp);
1364 		}
1365 		hp = &dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1366 
1367 		/*
1368 		 * Initialise and chain in new entry
1369 		 */
1370 		dep->de_handle = handle;
1371 		dep->de_hash = hash;
1372 		/*
1373 		 * Note de_namelen is a uchar_t to conserve space
1374 		 * and alignment padding. The max length of any
1375 		 * pathname component is defined as MAXNAMELEN
1376 		 * which is 256 (including the terminating null).
1377 		 * So provided this doesn't change, we don't include the null,
1378 		 * we always use bcmp to compare strings, and we don't
1379 		 * start storing full names, then we are ok.
1380 		 * The space savings is worth it.
1381 		 */
1382 		dep->de_namelen = namlen;
1383 		bcopy(name, dep->de_name, namlen);
1384 		dep->de_next = *hp;
1385 		*hp = dep;
1386 		dcp->dc_actime = ddi_get_lbolt64();
1387 		mutex_exit(&dcap->dca_lock);
1388 		ncs.ncs_dir_num_ents.value.ui64++;
1389 		return (DOK);
1390 	} else {
1391 		mutex_exit(&dcap->dca_lock);
1392 		kmem_free(dep, sizeof (dcentry_t) - 1 + namlen);
1393 		return (DNOCACHE);
1394 	}
1395 }
1396 
1397 /*
1398  * Add free space to a partial or complete directory cache.
1399  */
1400 dcret_t
1401 dnlc_dir_add_space(dcanchor_t *dcap, uint_t len, uint64_t handle)
1402 {
1403 	dircache_t *dcp;
1404 	dcfree_t *dfp, **hp;
1405 	uint_t capacity;
1406 
1407 	/*
1408 	 * We kmem_alloc outside the lock to avoid possible deadlock if
1409 	 * dnlc_dir_reclaim() is called as a result of memory shortage.
1410 	 */
1411 	dfp = kmem_cache_alloc(dnlc_dir_space_cache, KM_NOSLEEP);
1412 	if (dfp == NULL) {
1413 #ifdef DEBUG
1414 		/*
1415 		 * The kmem allocator generates random failures for
1416 		 * KM_NOSLEEP calls (see KMEM_RANDOM_ALLOCATION_FAILURE)
1417 		 * So try again before we blow away a perfectly good cache.
1418 		 * This random error only occurs in debug mode
1419 		 */
1420 		dfp = kmem_cache_alloc(dnlc_dir_space_cache, KM_NOSLEEP);
1421 		if (dfp != NULL)
1422 			goto ok;
1423 #endif
1424 		ncs.ncs_dir_add_nm.value.ui64++;
1425 		/*
1426 		 * Free a directory cache. This may be the one we are
1427 		 * called with.
1428 		 */
1429 		dnlc_dir_reclaim(NULL);
1430 		dfp = kmem_cache_alloc(dnlc_dir_space_cache, KM_NOSLEEP);
1431 		if (dfp == NULL) {
1432 			/*
1433 			 * still no memory, better delete this cache
1434 			 */
1435 			mutex_enter(&dcap->dca_lock);
1436 			dcp = (dircache_t *)dcap->dca_dircache;
1437 			if (VALID_DIR_CACHE(dcp)) {
1438 				dnlc_dir_abort(dcp);
1439 				dcap->dca_dircache = DC_RET_LOW_MEM;
1440 			}
1441 			mutex_exit(&dcap->dca_lock);
1442 			ncs.ncs_dir_addabort.value.ui64++;
1443 			return (DNOCACHE);
1444 		}
1445 		/*
1446 		 * fall through as if the 1st kmem_alloc had worked
1447 		 */
1448 	}
1449 
1450 #ifdef DEBUG
1451 ok:
1452 #endif
1453 	mutex_enter(&dcap->dca_lock);
1454 	dcp = (dircache_t *)dcap->dca_dircache;
1455 	if (VALID_DIR_CACHE(dcp)) {
1456 		if ((dcp->dc_num_entries + dcp->dc_num_free) >
1457 		    dnlc_dir_max_size) {
1458 			mutex_exit(&dcap->dca_lock);
1459 			dnlc_dir_purge(dcap);
1460 			kmem_cache_free(dnlc_dir_space_cache, dfp);
1461 			ncs.ncs_dir_add_max.value.ui64++;
1462 			return (DTOOBIG);
1463 		}
1464 		dcp->dc_num_free++;
1465 		capacity = (dcp->dc_fhash_mask + 1) << dnlc_dir_hash_size_shift;
1466 		if (dcp->dc_num_free >=
1467 		    (capacity << dnlc_dir_hash_resize_shift)) {
1468 			dnlc_dir_adjust_fhash(dcp);
1469 		}
1470 		/*
1471 		 * Initialise and chain a new entry
1472 		 */
1473 		dfp->df_handle = handle;
1474 		dfp->df_len = len;
1475 		dcp->dc_actime = ddi_get_lbolt64();
1476 		hp = &(dcp->dc_freehash[DDFHASH(handle, dcp)]);
1477 		dfp->df_next = *hp;
1478 		*hp = dfp;
1479 		mutex_exit(&dcap->dca_lock);
1480 		ncs.ncs_dir_num_ents.value.ui64++;
1481 		return (DOK);
1482 	} else {
1483 		mutex_exit(&dcap->dca_lock);
1484 		kmem_cache_free(dnlc_dir_space_cache, dfp);
1485 		return (DNOCACHE);
1486 	}
1487 }
1488 
1489 /*
1490  * Mark a directory cache as complete.
1491  */
1492 void
1493 dnlc_dir_complete(dcanchor_t *dcap)
1494 {
1495 	dircache_t *dcp;
1496 
1497 	mutex_enter(&dcap->dca_lock);
1498 	dcp = (dircache_t *)dcap->dca_dircache;
1499 	if (VALID_DIR_CACHE(dcp)) {
1500 		dcp->dc_complete = B_TRUE;
1501 	}
1502 	mutex_exit(&dcap->dca_lock);
1503 }
1504 
1505 /*
1506  * Internal routine to delete a partial or full directory cache.
1507  * No additional locking needed.
1508  */
1509 static void
1510 dnlc_dir_abort(dircache_t *dcp)
1511 {
1512 	dcentry_t *dep, *nhp;
1513 	dcfree_t *fep, *fhp;
1514 	uint_t nhtsize = dcp->dc_nhash_mask + 1; /* name hash table size */
1515 	uint_t fhtsize = dcp->dc_fhash_mask + 1; /* free hash table size */
1516 	uint_t i;
1517 
1518 	/*
1519 	 * Free up the cached name entries and hash table
1520 	 */
1521 	for (i = 0; i < nhtsize; i++) { /* for each hash bucket */
1522 		nhp = dcp->dc_namehash[i];
1523 		while (nhp != NULL) { /* for each chained entry */
1524 			dep = nhp->de_next;
1525 			kmem_free(nhp, sizeof (dcentry_t) - 1 +
1526 			    nhp->de_namelen);
1527 			nhp = dep;
1528 		}
1529 	}
1530 	kmem_free(dcp->dc_namehash, sizeof (dcentry_t *) * nhtsize);
1531 
1532 	/*
1533 	 * Free up the free space entries and hash table
1534 	 */
1535 	for (i = 0; i < fhtsize; i++) { /* for each hash bucket */
1536 		fhp = dcp->dc_freehash[i];
1537 		while (fhp != NULL) { /* for each chained entry */
1538 			fep = fhp->df_next;
1539 			kmem_cache_free(dnlc_dir_space_cache, fhp);
1540 			fhp = fep;
1541 		}
1542 	}
1543 	kmem_free(dcp->dc_freehash, sizeof (dcfree_t *) * fhtsize);
1544 
1545 	/*
1546 	 * Finally free the directory cache structure itself
1547 	 */
1548 	ncs.ncs_dir_num_ents.value.ui64 -= (dcp->dc_num_entries +
1549 	    dcp->dc_num_free);
1550 	kmem_free(dcp, sizeof (dircache_t));
1551 	ncs.ncs_cur_dirs.value.ui64--;
1552 }
1553 
1554 /*
1555  * Remove a partial or complete directory cache
1556  */
1557 void
1558 dnlc_dir_purge(dcanchor_t *dcap)
1559 {
1560 	dircache_t *dcp;
1561 
1562 	mutex_enter(&dc_head.dch_lock);
1563 	mutex_enter(&dcap->dca_lock);
1564 	dcp = (dircache_t *)dcap->dca_dircache;
1565 	if (!VALID_DIR_CACHE(dcp)) {
1566 		mutex_exit(&dcap->dca_lock);
1567 		mutex_exit(&dc_head.dch_lock);
1568 		return;
1569 	}
1570 	dcap->dca_dircache = NULL;
1571 	/*
1572 	 * Unchain from global list
1573 	 */
1574 	dcp->dc_prev->dc_next = dcp->dc_next;
1575 	dcp->dc_next->dc_prev = dcp->dc_prev;
1576 	mutex_exit(&dcap->dca_lock);
1577 	mutex_exit(&dc_head.dch_lock);
1578 	dnlc_dir_abort(dcp);
1579 }
1580 
1581 /*
1582  * Remove an entry from a complete or partial directory cache.
1583  * Return the handle if it's non null.
1584  */
1585 dcret_t
1586 dnlc_dir_rem_entry(dcanchor_t *dcap, char *name, uint64_t *handlep)
1587 {
1588 	dircache_t *dcp;
1589 	dcentry_t **prevpp, *te;
1590 	uint_t capacity;
1591 	int hash;
1592 	int ret;
1593 	uchar_t namlen;
1594 
1595 	if (!dnlc_dir_enable) {
1596 		return (DNOCACHE);
1597 	}
1598 
1599 	mutex_enter(&dcap->dca_lock);
1600 	dcp = (dircache_t *)dcap->dca_dircache;
1601 	if (VALID_DIR_CACHE(dcp)) {
1602 		dcp->dc_actime = ddi_get_lbolt64();
1603 		if (dcp->dc_nhash_mask > 0) { /* ie not minimum */
1604 			capacity = (dcp->dc_nhash_mask + 1) <<
1605 			    dnlc_dir_hash_size_shift;
1606 			if (dcp->dc_num_entries <=
1607 			    (capacity >> dnlc_dir_hash_resize_shift)) {
1608 				dnlc_dir_adjust_nhash(dcp);
1609 			}
1610 		}
1611 		DNLC_DIR_HASH(name, hash, namlen);
1612 		prevpp = &dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1613 		while (*prevpp != NULL) {
1614 			if (((*prevpp)->de_hash == hash) &&
1615 			    (namlen == (*prevpp)->de_namelen) &&
1616 			    bcmp((*prevpp)->de_name, name, namlen) == 0) {
1617 				if (handlep != NULL) {
1618 					*handlep = (*prevpp)->de_handle;
1619 				}
1620 				te = *prevpp;
1621 				*prevpp = (*prevpp)->de_next;
1622 				kmem_free(te, sizeof (dcentry_t) - 1 +
1623 				    te->de_namelen);
1624 
1625 				/*
1626 				 * If the total number of entries
1627 				 * falls below half the minimum number
1628 				 * of entries then free this cache.
1629 				 */
1630 				if (--dcp->dc_num_entries <
1631 				    (dnlc_dir_min_size >> 1)) {
1632 					mutex_exit(&dcap->dca_lock);
1633 					dnlc_dir_purge(dcap);
1634 				} else {
1635 					mutex_exit(&dcap->dca_lock);
1636 				}
1637 				ncs.ncs_dir_num_ents.value.ui64--;
1638 				return (DFOUND);
1639 			}
1640 			prevpp = &((*prevpp)->de_next);
1641 		}
1642 		if (dcp->dc_complete) {
1643 			ncs.ncs_dir_reme_fai.value.ui64++;
1644 			ret = DNOENT;
1645 		} else {
1646 			ret = DNOCACHE;
1647 		}
1648 		mutex_exit(&dcap->dca_lock);
1649 		return (ret);
1650 	} else {
1651 		mutex_exit(&dcap->dca_lock);
1652 		return (DNOCACHE);
1653 	}
1654 }
1655 
1656 
1657 /*
1658  * Remove free space of at least the given length from a complete
1659  * or partial directory cache.
1660  */
1661 dcret_t
1662 dnlc_dir_rem_space_by_len(dcanchor_t *dcap, uint_t len, uint64_t *handlep)
1663 {
1664 	dircache_t *dcp;
1665 	dcfree_t **prevpp, *tfp;
1666 	uint_t fhtsize; /* free hash table size */
1667 	uint_t i;
1668 	uint_t capacity;
1669 	int ret;
1670 
1671 	if (!dnlc_dir_enable) {
1672 		return (DNOCACHE);
1673 	}
1674 
1675 	mutex_enter(&dcap->dca_lock);
1676 	dcp = (dircache_t *)dcap->dca_dircache;
1677 	if (VALID_DIR_CACHE(dcp)) {
1678 		dcp->dc_actime = ddi_get_lbolt64();
1679 		if (dcp->dc_fhash_mask > 0) { /* ie not minimum */
1680 			capacity = (dcp->dc_fhash_mask + 1) <<
1681 			    dnlc_dir_hash_size_shift;
1682 			if (dcp->dc_num_free <=
1683 			    (capacity >> dnlc_dir_hash_resize_shift)) {
1684 				dnlc_dir_adjust_fhash(dcp);
1685 			}
1686 		}
1687 		/*
1688 		 * Search for an entry of the appropriate size
1689 		 * on a first fit basis.
1690 		 */
1691 		fhtsize = dcp->dc_fhash_mask + 1;
1692 		for (i = 0; i < fhtsize; i++) { /* for each hash bucket */
1693 			prevpp = &(dcp->dc_freehash[i]);
1694 			while (*prevpp != NULL) {
1695 				if ((*prevpp)->df_len >= len) {
1696 					*handlep = (*prevpp)->df_handle;
1697 					tfp = *prevpp;
1698 					*prevpp = (*prevpp)->df_next;
1699 					dcp->dc_num_free--;
1700 					mutex_exit(&dcap->dca_lock);
1701 					kmem_cache_free(dnlc_dir_space_cache,
1702 					    tfp);
1703 					ncs.ncs_dir_num_ents.value.ui64--;
1704 					return (DFOUND);
1705 				}
1706 				prevpp = &((*prevpp)->df_next);
1707 			}
1708 		}
1709 		if (dcp->dc_complete) {
1710 			ret = DNOENT;
1711 		} else {
1712 			ret = DNOCACHE;
1713 		}
1714 		mutex_exit(&dcap->dca_lock);
1715 		return (ret);
1716 	} else {
1717 		mutex_exit(&dcap->dca_lock);
1718 		return (DNOCACHE);
1719 	}
1720 }
1721 
1722 /*
1723  * Remove free space with the given handle from a complete or partial
1724  * directory cache.
1725  */
1726 dcret_t
1727 dnlc_dir_rem_space_by_handle(dcanchor_t *dcap, uint64_t handle)
1728 {
1729 	dircache_t *dcp;
1730 	dcfree_t **prevpp, *tfp;
1731 	uint_t capacity;
1732 	int ret;
1733 
1734 	if (!dnlc_dir_enable) {
1735 		return (DNOCACHE);
1736 	}
1737 
1738 	mutex_enter(&dcap->dca_lock);
1739 	dcp = (dircache_t *)dcap->dca_dircache;
1740 	if (VALID_DIR_CACHE(dcp)) {
1741 		dcp->dc_actime = ddi_get_lbolt64();
1742 		if (dcp->dc_fhash_mask > 0) { /* ie not minimum */
1743 			capacity = (dcp->dc_fhash_mask + 1) <<
1744 			    dnlc_dir_hash_size_shift;
1745 			if (dcp->dc_num_free <=
1746 			    (capacity >> dnlc_dir_hash_resize_shift)) {
1747 				dnlc_dir_adjust_fhash(dcp);
1748 			}
1749 		}
1750 
1751 		/*
1752 		 * search for the exact entry
1753 		 */
1754 		prevpp = &(dcp->dc_freehash[DDFHASH(handle, dcp)]);
1755 		while (*prevpp != NULL) {
1756 			if ((*prevpp)->df_handle == handle) {
1757 				tfp = *prevpp;
1758 				*prevpp = (*prevpp)->df_next;
1759 				dcp->dc_num_free--;
1760 				mutex_exit(&dcap->dca_lock);
1761 				kmem_cache_free(dnlc_dir_space_cache, tfp);
1762 				ncs.ncs_dir_num_ents.value.ui64--;
1763 				return (DFOUND);
1764 			}
1765 			prevpp = &((*prevpp)->df_next);
1766 		}
1767 		if (dcp->dc_complete) {
1768 			ncs.ncs_dir_rems_fai.value.ui64++;
1769 			ret = DNOENT;
1770 		} else {
1771 			ret = DNOCACHE;
1772 		}
1773 		mutex_exit(&dcap->dca_lock);
1774 		return (ret);
1775 	} else {
1776 		mutex_exit(&dcap->dca_lock);
1777 		return (DNOCACHE);
1778 	}
1779 }
1780 
1781 /*
1782  * Update the handle of an directory cache entry.
1783  */
1784 dcret_t
1785 dnlc_dir_update(dcanchor_t *dcap, char *name, uint64_t handle)
1786 {
1787 	dircache_t *dcp;
1788 	dcentry_t *dep;
1789 	int hash;
1790 	int ret;
1791 	uchar_t namlen;
1792 
1793 	if (!dnlc_dir_enable) {
1794 		return (DNOCACHE);
1795 	}
1796 
1797 	mutex_enter(&dcap->dca_lock);
1798 	dcp = (dircache_t *)dcap->dca_dircache;
1799 	if (VALID_DIR_CACHE(dcp)) {
1800 		dcp->dc_actime = ddi_get_lbolt64();
1801 		DNLC_DIR_HASH(name, hash, namlen);
1802 		dep = dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1803 		while (dep != NULL) {
1804 			if ((dep->de_hash == hash) &&
1805 			    (namlen == dep->de_namelen) &&
1806 			    bcmp(dep->de_name, name, namlen) == 0) {
1807 				dep->de_handle = handle;
1808 				mutex_exit(&dcap->dca_lock);
1809 				return (DFOUND);
1810 			}
1811 			dep = dep->de_next;
1812 		}
1813 		if (dcp->dc_complete) {
1814 			ncs.ncs_dir_upd_fail.value.ui64++;
1815 			ret = DNOENT;
1816 		} else {
1817 			ret = DNOCACHE;
1818 		}
1819 		mutex_exit(&dcap->dca_lock);
1820 		return (ret);
1821 	} else {
1822 		mutex_exit(&dcap->dca_lock);
1823 		return (DNOCACHE);
1824 	}
1825 }
1826 
1827 void
1828 dnlc_dir_fini(dcanchor_t *dcap)
1829 {
1830 	dircache_t *dcp;
1831 
1832 	mutex_enter(&dc_head.dch_lock);
1833 	mutex_enter(&dcap->dca_lock);
1834 	dcp = (dircache_t *)dcap->dca_dircache;
1835 	if (VALID_DIR_CACHE(dcp)) {
1836 		/*
1837 		 * Unchain from global list
1838 		 */
1839 		ncs.ncs_dir_finipurg.value.ui64++;
1840 		dcp->dc_prev->dc_next = dcp->dc_next;
1841 		dcp->dc_next->dc_prev = dcp->dc_prev;
1842 	} else {
1843 		dcp = NULL;
1844 	}
1845 	dcap->dca_dircache = NULL;
1846 	mutex_exit(&dcap->dca_lock);
1847 	mutex_exit(&dc_head.dch_lock);
1848 	mutex_destroy(&dcap->dca_lock);
1849 	if (dcp) {
1850 		dnlc_dir_abort(dcp);
1851 	}
1852 }
1853 
1854 /*
1855  * Reclaim callback for dnlc directory caching.
1856  * Invoked by the kernel memory allocator when memory gets tight.
1857  * This is a pretty serious condition and can lead easily lead to system
1858  * hangs if not enough space is returned.
1859  *
1860  * Deciding which directory (or directories) to purge is tricky.
1861  * Purging everything is an overkill, but purging just the oldest used
1862  * was found to lead to hangs. The largest cached directories use the
1863  * most memory, but take the most effort to rebuild, whereas the smaller
1864  * ones have little value and give back little space. So what to do?
1865  *
1866  * The current policy is to continue purging the oldest used directories
1867  * until at least dnlc_dir_min_reclaim directory entries have been purged.
1868  */
1869 /*ARGSUSED*/
1870 static void
1871 dnlc_dir_reclaim(void *unused)
1872 {
1873 	dircache_t *dcp, *oldest;
1874 	uint_t dirent_cnt = 0;
1875 
1876 	mutex_enter(&dc_head.dch_lock);
1877 	while (dirent_cnt < dnlc_dir_min_reclaim) {
1878 		dcp = dc_head.dch_next;
1879 		oldest = NULL;
1880 		while (dcp != (dircache_t *)&dc_head) {
1881 			if (oldest == NULL) {
1882 				oldest = dcp;
1883 			} else {
1884 				if (dcp->dc_actime < oldest->dc_actime) {
1885 					oldest = dcp;
1886 				}
1887 			}
1888 			dcp = dcp->dc_next;
1889 		}
1890 		if (oldest == NULL) {
1891 			/* nothing to delete */
1892 			mutex_exit(&dc_head.dch_lock);
1893 			return;
1894 		}
1895 		/*
1896 		 * remove from directory chain and purge
1897 		 */
1898 		oldest->dc_prev->dc_next = oldest->dc_next;
1899 		oldest->dc_next->dc_prev = oldest->dc_prev;
1900 		mutex_enter(&oldest->dc_anchor->dca_lock);
1901 		/*
1902 		 * If this was the last entry then it must be too large.
1903 		 * Mark it as such by saving a special dircache_t
1904 		 * pointer (DC_RET_LOW_MEM) in the anchor. The error DNOMEM
1905 		 * will be presented to the caller of dnlc_dir_start()
1906 		 */
1907 		if (oldest->dc_next == oldest->dc_prev) {
1908 			oldest->dc_anchor->dca_dircache = DC_RET_LOW_MEM;
1909 			ncs.ncs_dir_rec_last.value.ui64++;
1910 		} else {
1911 			oldest->dc_anchor->dca_dircache = NULL;
1912 			ncs.ncs_dir_recl_any.value.ui64++;
1913 		}
1914 		mutex_exit(&oldest->dc_anchor->dca_lock);
1915 		dirent_cnt += oldest->dc_num_entries;
1916 		dnlc_dir_abort(oldest);
1917 	}
1918 	mutex_exit(&dc_head.dch_lock);
1919 }
1920 
1921 /*
1922  * Dynamically grow or shrink the size of the name hash table
1923  */
1924 static void
1925 dnlc_dir_adjust_nhash(dircache_t *dcp)
1926 {
1927 	dcentry_t **newhash, *dep, **nhp, *tep;
1928 	uint_t newsize;
1929 	uint_t oldsize;
1930 	uint_t newsizemask;
1931 	int i;
1932 
1933 	/*
1934 	 * Allocate new hash table
1935 	 */
1936 	newsize = dcp->dc_num_entries >> dnlc_dir_hash_size_shift;
1937 	newhash = kmem_zalloc(sizeof (dcentry_t *) * newsize, KM_NOSLEEP);
1938 	if (newhash == NULL) {
1939 		/*
1940 		 * System is short on memory just return
1941 		 * Note, the old hash table is still usable.
1942 		 * This return is unlikely to repeatedy occur, because
1943 		 * either some other directory caches will be reclaimed
1944 		 * due to memory shortage, thus freeing memory, or this
1945 		 * directory cahe will be reclaimed.
1946 		 */
1947 		return;
1948 	}
1949 	oldsize = dcp->dc_nhash_mask + 1;
1950 	dcp->dc_nhash_mask = newsizemask = newsize - 1;
1951 
1952 	/*
1953 	 * Move entries from the old table to the new
1954 	 */
1955 	for (i = 0; i < oldsize; i++) { /* for each hash bucket */
1956 		dep = dcp->dc_namehash[i];
1957 		while (dep != NULL) { /* for each chained entry */
1958 			tep = dep;
1959 			dep = dep->de_next;
1960 			nhp = &newhash[tep->de_hash & newsizemask];
1961 			tep->de_next = *nhp;
1962 			*nhp = tep;
1963 		}
1964 	}
1965 
1966 	/*
1967 	 * delete old hash table and set new one in place
1968 	 */
1969 	kmem_free(dcp->dc_namehash, sizeof (dcentry_t *) * oldsize);
1970 	dcp->dc_namehash = newhash;
1971 }
1972 
1973 /*
1974  * Dynamically grow or shrink the size of the free space hash table
1975  */
1976 static void
1977 dnlc_dir_adjust_fhash(dircache_t *dcp)
1978 {
1979 	dcfree_t **newhash, *dfp, **nhp, *tfp;
1980 	uint_t newsize;
1981 	uint_t oldsize;
1982 	int i;
1983 
1984 	/*
1985 	 * Allocate new hash table
1986 	 */
1987 	newsize = dcp->dc_num_free >> dnlc_dir_hash_size_shift;
1988 	newhash = kmem_zalloc(sizeof (dcfree_t *) * newsize, KM_NOSLEEP);
1989 	if (newhash == NULL) {
1990 		/*
1991 		 * System is short on memory just return
1992 		 * Note, the old hash table is still usable.
1993 		 * This return is unlikely to repeatedy occur, because
1994 		 * either some other directory caches will be reclaimed
1995 		 * due to memory shortage, thus freeing memory, or this
1996 		 * directory cahe will be reclaimed.
1997 		 */
1998 		return;
1999 	}
2000 	oldsize = dcp->dc_fhash_mask + 1;
2001 	dcp->dc_fhash_mask = newsize - 1;
2002 
2003 	/*
2004 	 * Move entries from the old table to the new
2005 	 */
2006 	for (i = 0; i < oldsize; i++) { /* for each hash bucket */
2007 		dfp = dcp->dc_freehash[i];
2008 		while (dfp != NULL) { /* for each chained entry */
2009 			tfp = dfp;
2010 			dfp = dfp->df_next;
2011 			nhp = &newhash[DDFHASH(tfp->df_handle, dcp)];
2012 			tfp->df_next = *nhp;
2013 			*nhp = tfp;
2014 		}
2015 	}
2016 
2017 	/*
2018 	 * delete old hash table and set new one in place
2019 	 */
2020 	kmem_free(dcp->dc_freehash, sizeof (dcfree_t *) * oldsize);
2021 	dcp->dc_freehash = newhash;
2022 }
2023