1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2015, Joyent, Inc.
24 */
25
26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
28
29 /*
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
32 * All Rights Reserved
33 *
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
36 * contributors.
37 */
38
39 #include <sys/types.h>
40 #include <sys/systm.h>
41 #include <sys/param.h>
42 #include <sys/t_lock.h>
43 #include <sys/systm.h>
44 #include <sys/vfs.h>
45 #include <sys/vnode.h>
46 #include <sys/dnlc.h>
47 #include <sys/kmem.h>
48 #include <sys/cmn_err.h>
49 #include <sys/vtrace.h>
50 #include <sys/bitmap.h>
51 #include <sys/var.h>
52 #include <sys/sysmacros.h>
53 #include <sys/kstat.h>
54 #include <sys/atomic.h>
55 #include <sys/taskq.h>
56
57 /*
58 * Directory name lookup cache.
59 * Based on code originally done by Robert Elz at Melbourne.
60 *
61 * Names found by directory scans are retained in a cache
62 * for future reference. Each hash chain is ordered by LRU
63 * Cache is indexed by hash value obtained from (vp, name)
64 * where the vp refers to the directory containing the name.
65 */
66
67 /*
68 * We want to be able to identify files that are referenced only by the DNLC.
69 * When adding a reference from the DNLC, call VN_HOLD_DNLC instead of VN_HOLD,
70 * since multiple DNLC references should only be counted once in v_count. The
71 * VN_HOLD macro itself is aliased to VN_HOLD_CALLER in this file to help
72 * differentiate the behaviors. (Unfortunately it is not possible to #undef
73 * VN_HOLD and retain VN_HOLD_CALLER. Ideally a Makefile rule would grep
74 * uncommented C tokens to check that VN_HOLD is referenced only once in this
75 * file, to define VN_HOLD_CALLER.)
76 */
77 #define VN_HOLD_CALLER VN_HOLD
78 #define VN_HOLD_DNLC(vp) { \
79 mutex_enter(&(vp)->v_lock); \
80 if ((vp)->v_count_dnlc == 0) \
81 (vp)->v_count++; \
82 (vp)->v_count_dnlc++; \
83 mutex_exit(&(vp)->v_lock); \
84 }
85 #define VN_RELE_DNLC(vp) { \
86 vn_rele_dnlc(vp); \
87 }
88
89 /*
90 * Tunable nc_hashavelen is the average length desired for this chain, from
91 * which the size of the nc_hash table is derived at create time.
92 */
93 #define NC_HASHAVELEN_DEFAULT 4
94 int nc_hashavelen = NC_HASHAVELEN_DEFAULT;
95
96 /*
97 * NC_MOVETOFRONT is the move-to-front threshold: if the hash lookup
98 * depth exceeds this value, we move the looked-up entry to the front of
99 * its hash chain. The idea is to make sure that the most frequently
100 * accessed entries are found most quickly (by keeping them near the
101 * front of their hash chains).
102 */
103 #define NC_MOVETOFRONT 2
104
105 /*
106 *
107 * DNLC_MAX_RELE is used to size an array on the stack when releasing
108 * vnodes. This array is used rather than calling VN_RELE() inline because
109 * all dnlc locks must be dropped by that time in order to avoid a
110 * possible deadlock. This deadlock occurs when the dnlc holds the last
111 * reference to the vnode and so the VOP_INACTIVE vector is called which
112 * can in turn call back into the dnlc. A global array was used but had
113 * many problems:
114 * 1) Actually doesn't have an upper bound on the array size as
115 * entries can be added after starting the purge.
116 * 2) The locking scheme causes a hang.
117 * 3) Caused serialisation on the global lock.
118 * 4) The array was often unnecessarily huge.
119 *
120 * Note the current value 8 allows up to 4 cache entries (to be purged
121 * from each hash chain), before having to cycle around and retry.
122 * This ought to be ample given that nc_hashavelen is typically very small.
123 */
124 #define DNLC_MAX_RELE 8 /* must be even */
125
126 /*
127 * Hash table of name cache entries for fast lookup, dynamically
128 * allocated at startup.
129 */
130 nc_hash_t *nc_hash;
131
132 /*
133 * Rotors. Used to select entries on a round-robin basis.
134 */
135 static nc_hash_t *dnlc_purge_fs1_rotor;
136 static nc_hash_t *dnlc_free_rotor;
137
138 /*
139 * # of dnlc entries (uninitialized)
140 *
141 * the initial value was chosen as being
142 * a random string of bits, probably not
143 * normally chosen by a systems administrator
144 */
145 int ncsize = -1;
146 volatile uint32_t dnlc_nentries = 0; /* current num of name cache entries */
147 static int nc_hashsz; /* size of hash table */
148 static int nc_hashmask; /* size of hash table minus 1 */
149
150 /*
151 * The dnlc_reduce_cache() taskq queue is activated when there are
152 * ncsize name cache entries and if no parameter is provided, it reduces
153 * the size down to dnlc_nentries_low_water, which is by default one
154 * hundreth less (or 99%) of ncsize.
155 *
156 * If a parameter is provided to dnlc_reduce_cache(), then we reduce
157 * the size down based on ncsize_onepercent - where ncsize_onepercent
158 * is 1% of ncsize; however, we never let dnlc_reduce_cache() reduce
159 * the size below 3% of ncsize (ncsize_min_percent).
160 */
161 #define DNLC_LOW_WATER_DIVISOR_DEFAULT 100
162 uint_t dnlc_low_water_divisor = DNLC_LOW_WATER_DIVISOR_DEFAULT;
163 uint_t dnlc_nentries_low_water;
164 int dnlc_reduce_idle = 1; /* no locking needed */
165 uint_t ncsize_onepercent;
166 uint_t ncsize_min_percent;
167
168 /*
169 * If dnlc_nentries hits dnlc_max_nentries (twice ncsize)
170 * then this means the dnlc_reduce_cache() taskq is failing to
171 * keep up. In this case we refuse to add new entries to the dnlc
172 * until the taskq catches up.
173 */
174 uint_t dnlc_max_nentries; /* twice ncsize */
175 uint64_t dnlc_max_nentries_cnt = 0; /* statistic on times we failed */
176
177 /*
178 * Tunable to define when we should just remove items from
179 * the end of the chain.
180 */
181 #define DNLC_LONG_CHAIN 8
182 uint_t dnlc_long_chain = DNLC_LONG_CHAIN;
183
184 /*
185 * ncstats has been deprecated, due to the integer size of the counters
186 * which can easily overflow in the dnlc.
187 * It is maintained (at some expense) for compatability.
188 * The preferred interface is the kstat accessible nc_stats below.
189 */
190 struct ncstats ncstats;
191
192 struct nc_stats ncs = {
193 { "hits", KSTAT_DATA_UINT64 },
194 { "misses", KSTAT_DATA_UINT64 },
195 { "negative_cache_hits", KSTAT_DATA_UINT64 },
196 { "enters", KSTAT_DATA_UINT64 },
197 { "double_enters", KSTAT_DATA_UINT64 },
198 { "purge_total_entries", KSTAT_DATA_UINT64 },
199 { "purge_all", KSTAT_DATA_UINT64 },
200 { "purge_vp", KSTAT_DATA_UINT64 },
201 { "purge_vfs", KSTAT_DATA_UINT64 },
202 { "purge_fs1", KSTAT_DATA_UINT64 },
203 { "pick_free", KSTAT_DATA_UINT64 },
204 { "pick_heuristic", KSTAT_DATA_UINT64 },
205 { "pick_last", KSTAT_DATA_UINT64 },
206
207 /* directory caching stats */
208
209 { "dir_hits", KSTAT_DATA_UINT64 },
210 { "dir_misses", KSTAT_DATA_UINT64 },
211 { "dir_cached_current", KSTAT_DATA_UINT64 },
212 { "dir_entries_cached_current", KSTAT_DATA_UINT64 },
213 { "dir_cached_total", KSTAT_DATA_UINT64 },
214 { "dir_start_no_memory", KSTAT_DATA_UINT64 },
215 { "dir_add_no_memory", KSTAT_DATA_UINT64 },
216 { "dir_add_abort", KSTAT_DATA_UINT64 },
217 { "dir_add_max", KSTAT_DATA_UINT64 },
218 { "dir_remove_entry_fail", KSTAT_DATA_UINT64 },
219 { "dir_remove_space_fail", KSTAT_DATA_UINT64 },
220 { "dir_update_fail", KSTAT_DATA_UINT64 },
221 { "dir_fini_purge", KSTAT_DATA_UINT64 },
222 { "dir_reclaim_last", KSTAT_DATA_UINT64 },
223 { "dir_reclaim_any", KSTAT_DATA_UINT64 },
224 };
225
226 static int doingcache = 1;
227
228 vnode_t negative_cache_vnode;
229
230 /*
231 * Insert entry at the front of the queue
232 */
233 #define nc_inshash(ncp, hp) \
234 { \
235 (ncp)->hash_next = (hp)->hash_next; \
236 (ncp)->hash_prev = (ncache_t *)(hp); \
237 (hp)->hash_next->hash_prev = (ncp); \
238 (hp)->hash_next = (ncp); \
239 }
240
241 /*
242 * Remove entry from hash queue
243 */
244 #define nc_rmhash(ncp) \
245 { \
246 (ncp)->hash_prev->hash_next = (ncp)->hash_next; \
247 (ncp)->hash_next->hash_prev = (ncp)->hash_prev; \
248 (ncp)->hash_prev = NULL; \
249 (ncp)->hash_next = NULL; \
250 }
251
252 /*
253 * Free an entry.
254 */
255 #define dnlc_free(ncp) \
256 { \
257 kmem_free((ncp), sizeof (ncache_t) + (ncp)->namlen); \
258 atomic_dec_32(&dnlc_nentries); \
259 }
260
261
262 /*
263 * Cached directory info.
264 * ======================
265 */
266
267 /*
268 * Cached directory free space hash function.
269 * Needs the free space handle and the dcp to get the hash table size
270 * Returns the hash index.
271 */
272 #define DDFHASH(handle, dcp) ((handle >> 2) & (dcp)->dc_fhash_mask)
273
274 /*
275 * Cached directory name entry hash function.
276 * Uses the name and returns in the input arguments the hash and the name
277 * length.
278 */
279 #define DNLC_DIR_HASH(name, hash, namelen) \
280 { \
281 char Xc; \
282 const char *Xcp; \
283 hash = *name; \
284 for (Xcp = (name + 1); (Xc = *Xcp) != 0; Xcp++) \
285 hash = (hash << 4) + hash + Xc; \
286 ASSERT((Xcp - (name)) <= ((1 << NBBY) - 1)); \
287 namelen = Xcp - (name); \
288 }
289
290 /* special dircache_t pointer to indicate error should be returned */
291 /*
292 * The anchor directory cache pointer can contain 3 types of values,
293 * 1) NULL: No directory cache
294 * 2) DC_RET_LOW_MEM (-1): There was a directory cache that found to be
295 * too big or a memory shortage occurred. This value remains in the
296 * pointer until a dnlc_dir_start() which returns the a DNOMEM error.
297 * This is kludgy but efficient and only visible in this source file.
298 * 3) A valid cache pointer.
299 */
300 #define DC_RET_LOW_MEM (dircache_t *)1
301 #define VALID_DIR_CACHE(dcp) ((dircache_t *)(dcp) > DC_RET_LOW_MEM)
302
303 /* Tunables */
304 uint_t dnlc_dir_enable = 1; /* disable caching directories by setting to 0 */
305 uint_t dnlc_dir_min_size = 40; /* min no of directory entries before caching */
306 uint_t dnlc_dir_max_size = UINT_MAX; /* ditto maximum */
307 uint_t dnlc_dir_hash_size_shift = 3; /* 8 entries per hash bucket */
308 uint_t dnlc_dir_min_reclaim = 350000; /* approx 1MB of dcentrys */
309 /*
310 * dnlc_dir_hash_resize_shift determines when the hash tables
311 * get re-adjusted due to growth or shrinkage
312 * - currently 2 indicating that there can be at most 4
313 * times or at least one quarter the number of entries
314 * before hash table readjustment. Note that with
315 * dnlc_dir_hash_size_shift above set at 3 this would
316 * mean readjustment would occur if the average number
317 * of entries went above 32 or below 2
318 */
319 uint_t dnlc_dir_hash_resize_shift = 2; /* readjust rate */
320
321 static kmem_cache_t *dnlc_dir_space_cache; /* free space entry cache */
322 static dchead_t dc_head; /* anchor of cached directories */
323
324 /* Prototypes */
325 static ncache_t *dnlc_get(uchar_t namlen);
326 static ncache_t *dnlc_search(vnode_t *dp, const char *name, uchar_t namlen,
327 int hash);
328 static void dnlc_dir_reclaim(void *unused);
329 static void dnlc_dir_abort(dircache_t *dcp);
330 static void dnlc_dir_adjust_fhash(dircache_t *dcp);
331 static void dnlc_dir_adjust_nhash(dircache_t *dcp);
332 static void do_dnlc_reduce_cache(void *);
333
334
335 /*
336 * Initialize the directory cache.
337 */
338 void
dnlc_init()339 dnlc_init()
340 {
341 nc_hash_t *hp;
342 kstat_t *ksp;
343 int i;
344
345 /*
346 * Set up the size of the dnlc (ncsize) and its low water mark.
347 */
348 if (ncsize == -1) {
349 /* calculate a reasonable size for the low water */
350 dnlc_nentries_low_water = 4 * (v.v_proc + maxusers) + 320;
351 ncsize = dnlc_nentries_low_water +
352 (dnlc_nentries_low_water / dnlc_low_water_divisor);
353 } else {
354 /* don't change the user specified ncsize */
355 dnlc_nentries_low_water =
356 ncsize - (ncsize / dnlc_low_water_divisor);
357 }
358 if (ncsize <= 0) {
359 doingcache = 0;
360 dnlc_dir_enable = 0; /* also disable directory caching */
361 ncsize = 0;
362 cmn_err(CE_NOTE, "name cache (dnlc) disabled");
363 return;
364 }
365 dnlc_max_nentries = ncsize * 2;
366 ncsize_onepercent = ncsize / 100;
367 ncsize_min_percent = ncsize_onepercent * 3;
368
369 /*
370 * Initialise the hash table.
371 * Compute hash size rounding to the next power of two.
372 */
373 nc_hashsz = ncsize / nc_hashavelen;
374 nc_hashsz = 1 << highbit(nc_hashsz);
375 nc_hashmask = nc_hashsz - 1;
376 nc_hash = kmem_zalloc(nc_hashsz * sizeof (*nc_hash), KM_SLEEP);
377 for (i = 0; i < nc_hashsz; i++) {
378 hp = (nc_hash_t *)&nc_hash[i];
379 mutex_init(&hp->hash_lock, NULL, MUTEX_DEFAULT, NULL);
380 hp->hash_next = (ncache_t *)hp;
381 hp->hash_prev = (ncache_t *)hp;
382 }
383
384 /*
385 * Initialize rotors
386 */
387 dnlc_free_rotor = dnlc_purge_fs1_rotor = &nc_hash[0];
388
389 /*
390 * Set up the directory caching to use kmem_cache_alloc
391 * for its free space entries so that we can get a callback
392 * when the system is short on memory, to allow us to free
393 * up some memory. we don't use the constructor/deconstructor
394 * functions.
395 */
396 dnlc_dir_space_cache = kmem_cache_create("dnlc_space_cache",
397 sizeof (dcfree_t), 0, NULL, NULL, dnlc_dir_reclaim, NULL,
398 NULL, 0);
399
400 /*
401 * Initialise the head of the cached directory structures
402 */
403 mutex_init(&dc_head.dch_lock, NULL, MUTEX_DEFAULT, NULL);
404 dc_head.dch_next = (dircache_t *)&dc_head;
405 dc_head.dch_prev = (dircache_t *)&dc_head;
406
407 /*
408 * Initialise the reference count of the negative cache vnode to 1
409 * so that it never goes away (VOP_INACTIVE isn't called on it).
410 */
411 negative_cache_vnode.v_count = 1;
412 negative_cache_vnode.v_count_dnlc = 0;
413
414 /*
415 * Initialise kstats - both the old compatability raw kind and
416 * the more extensive named stats.
417 */
418 ksp = kstat_create("unix", 0, "ncstats", "misc", KSTAT_TYPE_RAW,
419 sizeof (struct ncstats), KSTAT_FLAG_VIRTUAL);
420 if (ksp) {
421 ksp->ks_data = (void *) &ncstats;
422 kstat_install(ksp);
423 }
424 ksp = kstat_create("unix", 0, "dnlcstats", "misc", KSTAT_TYPE_NAMED,
425 sizeof (ncs) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
426 if (ksp) {
427 ksp->ks_data = (void *) &ncs;
428 kstat_install(ksp);
429 }
430 }
431
432 /*
433 * Add a name to the directory cache.
434 */
435 void
dnlc_enter(vnode_t * dp,const char * name,vnode_t * vp)436 dnlc_enter(vnode_t *dp, const char *name, vnode_t *vp)
437 {
438 ncache_t *ncp;
439 nc_hash_t *hp;
440 uchar_t namlen;
441 int hash;
442
443 TRACE_0(TR_FAC_NFS, TR_DNLC_ENTER_START, "dnlc_enter_start:");
444
445 if (!doingcache) {
446 TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
447 "dnlc_enter_end:(%S) %d", "not caching", 0);
448 return;
449 }
450
451 /*
452 * Get a new dnlc entry. Assume the entry won't be in the cache
453 * and initialize it now
454 */
455 DNLCHASH(name, dp, hash, namlen);
456 if ((ncp = dnlc_get(namlen)) == NULL)
457 return;
458 ncp->dp = dp;
459 VN_HOLD_DNLC(dp);
460 ncp->vp = vp;
461 VN_HOLD_DNLC(vp);
462 bcopy(name, ncp->name, namlen + 1); /* name and null */
463 ncp->hash = hash;
464 hp = &nc_hash[hash & nc_hashmask];
465
466 mutex_enter(&hp->hash_lock);
467 if (dnlc_search(dp, name, namlen, hash) != NULL) {
468 mutex_exit(&hp->hash_lock);
469 ncstats.dbl_enters++;
470 ncs.ncs_dbl_enters.value.ui64++;
471 VN_RELE_DNLC(dp);
472 VN_RELE_DNLC(vp);
473 dnlc_free(ncp); /* crfree done here */
474 TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
475 "dnlc_enter_end:(%S) %d", "dbl enter", ncstats.dbl_enters);
476 return;
477 }
478 /*
479 * Insert back into the hash chain.
480 */
481 nc_inshash(ncp, hp);
482 mutex_exit(&hp->hash_lock);
483 ncstats.enters++;
484 ncs.ncs_enters.value.ui64++;
485 TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
486 "dnlc_enter_end:(%S) %d", "done", ncstats.enters);
487 }
488
489 /*
490 * Add a name to the directory cache.
491 *
492 * This function is basically identical with
493 * dnlc_enter(). The difference is that when the
494 * desired dnlc entry is found, the vnode in the
495 * ncache is compared with the vnode passed in.
496 *
497 * If they are not equal then the ncache is
498 * updated with the passed in vnode. Otherwise
499 * it just frees up the newly allocated dnlc entry.
500 */
501 void
dnlc_update(vnode_t * dp,const char * name,vnode_t * vp)502 dnlc_update(vnode_t *dp, const char *name, vnode_t *vp)
503 {
504 ncache_t *ncp;
505 ncache_t *tcp;
506 vnode_t *tvp;
507 nc_hash_t *hp;
508 int hash;
509 uchar_t namlen;
510
511 TRACE_0(TR_FAC_NFS, TR_DNLC_ENTER_START, "dnlc_update_start:");
512
513 if (!doingcache) {
514 TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
515 "dnlc_update_end:(%S) %d", "not caching", 0);
516 return;
517 }
518
519 /*
520 * Get a new dnlc entry and initialize it now.
521 * If we fail to get a new entry, call dnlc_remove() to purge
522 * any existing dnlc entry including negative cache (DNLC_NO_VNODE)
523 * entry.
524 * Failure to clear an existing entry could result in false dnlc
525 * lookup (negative/stale entry).
526 */
527 DNLCHASH(name, dp, hash, namlen);
528 if ((ncp = dnlc_get(namlen)) == NULL) {
529 dnlc_remove(dp, name);
530 return;
531 }
532 ncp->dp = dp;
533 VN_HOLD_DNLC(dp);
534 ncp->vp = vp;
535 VN_HOLD_DNLC(vp);
536 bcopy(name, ncp->name, namlen + 1); /* name and null */
537 ncp->hash = hash;
538 hp = &nc_hash[hash & nc_hashmask];
539
540 mutex_enter(&hp->hash_lock);
541 if ((tcp = dnlc_search(dp, name, namlen, hash)) != NULL) {
542 if (tcp->vp != vp) {
543 tvp = tcp->vp;
544 tcp->vp = vp;
545 mutex_exit(&hp->hash_lock);
546 VN_RELE_DNLC(tvp);
547 ncstats.enters++;
548 ncs.ncs_enters.value.ui64++;
549 TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
550 "dnlc_update_end:(%S) %d", "done", ncstats.enters);
551 } else {
552 mutex_exit(&hp->hash_lock);
553 VN_RELE_DNLC(vp);
554 ncstats.dbl_enters++;
555 ncs.ncs_dbl_enters.value.ui64++;
556 TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
557 "dnlc_update_end:(%S) %d",
558 "dbl enter", ncstats.dbl_enters);
559 }
560 VN_RELE_DNLC(dp);
561 dnlc_free(ncp); /* crfree done here */
562 return;
563 }
564 /*
565 * insert the new entry, since it is not in dnlc yet
566 */
567 nc_inshash(ncp, hp);
568 mutex_exit(&hp->hash_lock);
569 ncstats.enters++;
570 ncs.ncs_enters.value.ui64++;
571 TRACE_2(TR_FAC_NFS, TR_DNLC_ENTER_END,
572 "dnlc_update_end:(%S) %d", "done", ncstats.enters);
573 }
574
575 /*
576 * Look up a name in the directory name cache.
577 *
578 * Return a doubly-held vnode if found: one hold so that it may
579 * remain in the cache for other users, the other hold so that
580 * the cache is not re-cycled and the identity of the vnode is
581 * lost before the caller can use the vnode.
582 */
583 vnode_t *
dnlc_lookup(vnode_t * dp,const char * name)584 dnlc_lookup(vnode_t *dp, const char *name)
585 {
586 ncache_t *ncp;
587 nc_hash_t *hp;
588 vnode_t *vp;
589 int hash, depth;
590 uchar_t namlen;
591
592 TRACE_2(TR_FAC_NFS, TR_DNLC_LOOKUP_START,
593 "dnlc_lookup_start:dp %x name %s", dp, name);
594
595 if (!doingcache) {
596 TRACE_4(TR_FAC_NFS, TR_DNLC_LOOKUP_END,
597 "dnlc_lookup_end:%S %d vp %x name %s",
598 "not_caching", 0, NULL, name);
599 return (NULL);
600 }
601
602 DNLCHASH(name, dp, hash, namlen);
603 depth = 1;
604 hp = &nc_hash[hash & nc_hashmask];
605 mutex_enter(&hp->hash_lock);
606
607 for (ncp = hp->hash_next; ncp != (ncache_t *)hp;
608 ncp = ncp->hash_next) {
609 if (ncp->hash == hash && /* fast signature check */
610 ncp->dp == dp &&
611 ncp->namlen == namlen &&
612 bcmp(ncp->name, name, namlen) == 0) {
613 /*
614 * Move this entry to the head of its hash chain
615 * if it's not already close.
616 */
617 if (depth > NC_MOVETOFRONT) {
618 ncache_t *next = ncp->hash_next;
619 ncache_t *prev = ncp->hash_prev;
620
621 prev->hash_next = next;
622 next->hash_prev = prev;
623 ncp->hash_next = next = hp->hash_next;
624 ncp->hash_prev = (ncache_t *)hp;
625 next->hash_prev = ncp;
626 hp->hash_next = ncp;
627
628 ncstats.move_to_front++;
629 }
630
631 /*
632 * Put a hold on the vnode now so its identity
633 * can't change before the caller has a chance to
634 * put a hold on it.
635 */
636 vp = ncp->vp;
637 VN_HOLD_CALLER(vp);
638 mutex_exit(&hp->hash_lock);
639 ncstats.hits++;
640 ncs.ncs_hits.value.ui64++;
641 if (vp == DNLC_NO_VNODE) {
642 ncs.ncs_neg_hits.value.ui64++;
643 }
644 TRACE_4(TR_FAC_NFS, TR_DNLC_LOOKUP_END,
645 "dnlc_lookup_end:%S %d vp %x name %s", "hit",
646 ncstats.hits, vp, name);
647 return (vp);
648 }
649 depth++;
650 }
651
652 mutex_exit(&hp->hash_lock);
653 ncstats.misses++;
654 ncs.ncs_misses.value.ui64++;
655 TRACE_4(TR_FAC_NFS, TR_DNLC_LOOKUP_END,
656 "dnlc_lookup_end:%S %d vp %x name %s", "miss", ncstats.misses,
657 NULL, name);
658 return (NULL);
659 }
660
661 /*
662 * Remove an entry in the directory name cache.
663 */
664 void
dnlc_remove(vnode_t * dp,const char * name)665 dnlc_remove(vnode_t *dp, const char *name)
666 {
667 ncache_t *ncp;
668 nc_hash_t *hp;
669 uchar_t namlen;
670 int hash;
671
672 if (!doingcache)
673 return;
674 DNLCHASH(name, dp, hash, namlen);
675 hp = &nc_hash[hash & nc_hashmask];
676
677 mutex_enter(&hp->hash_lock);
678 if (ncp = dnlc_search(dp, name, namlen, hash)) {
679 /*
680 * Free up the entry
681 */
682 nc_rmhash(ncp);
683 mutex_exit(&hp->hash_lock);
684 VN_RELE_DNLC(ncp->vp);
685 VN_RELE_DNLC(ncp->dp);
686 dnlc_free(ncp);
687 return;
688 }
689 mutex_exit(&hp->hash_lock);
690 }
691
692 /*
693 * Purge the entire cache.
694 */
695 void
dnlc_purge()696 dnlc_purge()
697 {
698 nc_hash_t *nch;
699 ncache_t *ncp;
700 int index;
701 int i;
702 vnode_t *nc_rele[DNLC_MAX_RELE];
703
704 if (!doingcache)
705 return;
706
707 ncstats.purges++;
708 ncs.ncs_purge_all.value.ui64++;
709
710 for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
711 index = 0;
712 mutex_enter(&nch->hash_lock);
713 ncp = nch->hash_next;
714 while (ncp != (ncache_t *)nch) {
715 ncache_t *np;
716
717 np = ncp->hash_next;
718 nc_rele[index++] = ncp->vp;
719 nc_rele[index++] = ncp->dp;
720
721 nc_rmhash(ncp);
722 dnlc_free(ncp);
723 ncp = np;
724 ncs.ncs_purge_total.value.ui64++;
725 if (index == DNLC_MAX_RELE)
726 break;
727 }
728 mutex_exit(&nch->hash_lock);
729
730 /* Release holds on all the vnodes now that we have no locks */
731 for (i = 0; i < index; i++) {
732 VN_RELE_DNLC(nc_rele[i]);
733 }
734 if (ncp != (ncache_t *)nch) {
735 nch--; /* Do current hash chain again */
736 }
737 }
738 }
739
740 /*
741 * Purge any cache entries referencing a vnode. Exit as soon as the dnlc
742 * reference count goes to zero (the caller still holds a reference).
743 */
744 void
dnlc_purge_vp(vnode_t * vp)745 dnlc_purge_vp(vnode_t *vp)
746 {
747 nc_hash_t *nch;
748 ncache_t *ncp;
749 int index;
750 vnode_t *nc_rele[DNLC_MAX_RELE];
751
752 ASSERT(vp->v_count > 0);
753 if (vp->v_count_dnlc == 0) {
754 return;
755 }
756
757 if (!doingcache)
758 return;
759
760 ncstats.purges++;
761 ncs.ncs_purge_vp.value.ui64++;
762
763 for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
764 index = 0;
765 mutex_enter(&nch->hash_lock);
766 ncp = nch->hash_next;
767 while (ncp != (ncache_t *)nch) {
768 ncache_t *np;
769
770 np = ncp->hash_next;
771 if (ncp->dp == vp || ncp->vp == vp) {
772 nc_rele[index++] = ncp->vp;
773 nc_rele[index++] = ncp->dp;
774 nc_rmhash(ncp);
775 dnlc_free(ncp);
776 ncs.ncs_purge_total.value.ui64++;
777 if (index == DNLC_MAX_RELE) {
778 ncp = np;
779 break;
780 }
781 }
782 ncp = np;
783 }
784 mutex_exit(&nch->hash_lock);
785
786 /* Release holds on all the vnodes now that we have no locks */
787 while (index) {
788 VN_RELE_DNLC(nc_rele[--index]);
789 }
790
791 if (vp->v_count_dnlc == 0) {
792 return;
793 }
794
795 if (ncp != (ncache_t *)nch) {
796 nch--; /* Do current hash chain again */
797 }
798 }
799 }
800
801 /*
802 * Purge cache entries referencing a vfsp. Caller supplies a count
803 * of entries to purge; up to that many will be freed. A count of
804 * zero indicates that all such entries should be purged. Returns
805 * the number of entries that were purged.
806 */
807 int
dnlc_purge_vfsp(vfs_t * vfsp,int count)808 dnlc_purge_vfsp(vfs_t *vfsp, int count)
809 {
810 nc_hash_t *nch;
811 ncache_t *ncp;
812 int n = 0;
813 int index;
814 int i;
815 vnode_t *nc_rele[DNLC_MAX_RELE];
816
817 if (!doingcache)
818 return (0);
819
820 ncstats.purges++;
821 ncs.ncs_purge_vfs.value.ui64++;
822
823 for (nch = nc_hash; nch < &nc_hash[nc_hashsz]; nch++) {
824 index = 0;
825 mutex_enter(&nch->hash_lock);
826 ncp = nch->hash_next;
827 while (ncp != (ncache_t *)nch) {
828 ncache_t *np;
829
830 np = ncp->hash_next;
831 ASSERT(ncp->dp != NULL);
832 ASSERT(ncp->vp != NULL);
833 if ((ncp->dp->v_vfsp == vfsp) ||
834 (ncp->vp->v_vfsp == vfsp)) {
835 n++;
836 nc_rele[index++] = ncp->vp;
837 nc_rele[index++] = ncp->dp;
838 nc_rmhash(ncp);
839 dnlc_free(ncp);
840 ncs.ncs_purge_total.value.ui64++;
841 if (index == DNLC_MAX_RELE) {
842 ncp = np;
843 break;
844 }
845 if (count != 0 && n >= count) {
846 break;
847 }
848 }
849 ncp = np;
850 }
851 mutex_exit(&nch->hash_lock);
852 /* Release holds on all the vnodes now that we have no locks */
853 for (i = 0; i < index; i++) {
854 VN_RELE_DNLC(nc_rele[i]);
855 }
856 if (count != 0 && n >= count) {
857 return (n);
858 }
859 if (ncp != (ncache_t *)nch) {
860 nch--; /* Do current hash chain again */
861 }
862 }
863 return (n);
864 }
865
866 /*
867 * Purge 1 entry from the dnlc that is part of the filesystem(s)
868 * represented by 'vop'. The purpose of this routine is to allow
869 * users of the dnlc to free a vnode that is being held by the dnlc.
870 *
871 * If we find a vnode that we release which will result in
872 * freeing the underlying vnode (count was 1), return 1, 0
873 * if no appropriate vnodes found.
874 *
875 * Note, vop is not the 'right' identifier for a filesystem.
876 */
877 int
dnlc_fs_purge1(vnodeops_t * vop)878 dnlc_fs_purge1(vnodeops_t *vop)
879 {
880 nc_hash_t *end;
881 nc_hash_t *hp;
882 ncache_t *ncp;
883 vnode_t *vp;
884
885 if (!doingcache)
886 return (0);
887
888 ncs.ncs_purge_fs1.value.ui64++;
889
890 /*
891 * Scan the dnlc entries looking for a likely candidate.
892 */
893 hp = end = dnlc_purge_fs1_rotor;
894
895 do {
896 if (++hp == &nc_hash[nc_hashsz])
897 hp = nc_hash;
898 dnlc_purge_fs1_rotor = hp;
899 if (hp->hash_next == (ncache_t *)hp)
900 continue;
901 mutex_enter(&hp->hash_lock);
902 for (ncp = hp->hash_prev;
903 ncp != (ncache_t *)hp;
904 ncp = ncp->hash_prev) {
905 vp = ncp->vp;
906 if (!vn_has_cached_data(vp) && (vp->v_count == 1) &&
907 vn_matchops(vp, vop))
908 break;
909 }
910 if (ncp != (ncache_t *)hp) {
911 nc_rmhash(ncp);
912 mutex_exit(&hp->hash_lock);
913 VN_RELE_DNLC(ncp->dp);
914 VN_RELE_DNLC(vp)
915 dnlc_free(ncp);
916 ncs.ncs_purge_total.value.ui64++;
917 return (1);
918 }
919 mutex_exit(&hp->hash_lock);
920 } while (hp != end);
921 return (0);
922 }
923
924 /*
925 * Utility routine to search for a cache entry. Return the
926 * ncache entry if found, NULL otherwise.
927 */
928 static ncache_t *
dnlc_search(vnode_t * dp,const char * name,uchar_t namlen,int hash)929 dnlc_search(vnode_t *dp, const char *name, uchar_t namlen, int hash)
930 {
931 nc_hash_t *hp;
932 ncache_t *ncp;
933
934 hp = &nc_hash[hash & nc_hashmask];
935
936 for (ncp = hp->hash_next; ncp != (ncache_t *)hp; ncp = ncp->hash_next) {
937 if (ncp->hash == hash &&
938 ncp->dp == dp &&
939 ncp->namlen == namlen &&
940 bcmp(ncp->name, name, namlen) == 0)
941 return (ncp);
942 }
943 return (NULL);
944 }
945
946 #if ((1 << NBBY) - 1) < (MAXNAMELEN - 1)
947 #error ncache_t name length representation is too small
948 #endif
949
950 void
dnlc_reduce_cache(void * reduce_percent)951 dnlc_reduce_cache(void *reduce_percent)
952 {
953 if (dnlc_reduce_idle && (dnlc_nentries >= ncsize || reduce_percent)) {
954 dnlc_reduce_idle = 0;
955 if ((taskq_dispatch(system_taskq, do_dnlc_reduce_cache,
956 reduce_percent, TQ_NOSLEEP)) == NULL)
957 dnlc_reduce_idle = 1;
958 }
959 }
960
961 /*
962 * Get a new name cache entry.
963 * If the dnlc_reduce_cache() taskq isn't keeping up with demand, or memory
964 * is short then just return NULL. If we're over ncsize then kick off a
965 * thread to free some in use entries down to dnlc_nentries_low_water.
966 * Caller must initialise all fields except namlen.
967 * Component names are defined to be less than MAXNAMELEN
968 * which includes a null.
969 */
970 static ncache_t *
dnlc_get(uchar_t namlen)971 dnlc_get(uchar_t namlen)
972 {
973 ncache_t *ncp;
974
975 if (dnlc_nentries > dnlc_max_nentries) {
976 dnlc_max_nentries_cnt++; /* keep a statistic */
977 return (NULL);
978 }
979 ncp = kmem_alloc(sizeof (ncache_t) + namlen, KM_NOSLEEP);
980 if (ncp == NULL) {
981 return (NULL);
982 }
983 ncp->namlen = namlen;
984 atomic_inc_32(&dnlc_nentries);
985 dnlc_reduce_cache(NULL);
986 return (ncp);
987 }
988
989 /*
990 * Taskq routine to free up name cache entries to reduce the
991 * cache size to the low water mark if "reduce_percent" is not provided.
992 * If "reduce_percent" is provided, reduce cache size by
993 * (ncsize_onepercent * reduce_percent).
994 */
995 /*ARGSUSED*/
996 static void
do_dnlc_reduce_cache(void * reduce_percent)997 do_dnlc_reduce_cache(void *reduce_percent)
998 {
999 nc_hash_t *hp = dnlc_free_rotor, *start_hp = hp;
1000 vnode_t *vp;
1001 ncache_t *ncp;
1002 int cnt;
1003 uint_t low_water = dnlc_nentries_low_water;
1004
1005 if (reduce_percent) {
1006 uint_t reduce_cnt;
1007
1008 /*
1009 * Never try to reduce the current number
1010 * of cache entries below 3% of ncsize.
1011 */
1012 if (dnlc_nentries <= ncsize_min_percent) {
1013 dnlc_reduce_idle = 1;
1014 return;
1015 }
1016 reduce_cnt = ncsize_onepercent *
1017 (uint_t)(uintptr_t)reduce_percent;
1018
1019 if (reduce_cnt > dnlc_nentries ||
1020 dnlc_nentries - reduce_cnt < ncsize_min_percent)
1021 low_water = ncsize_min_percent;
1022 else
1023 low_water = dnlc_nentries - reduce_cnt;
1024 }
1025
1026 do {
1027 /*
1028 * Find the first non empty hash queue without locking.
1029 * Only look at each hash queue once to avoid an infinite loop.
1030 */
1031 do {
1032 if (++hp == &nc_hash[nc_hashsz])
1033 hp = nc_hash;
1034 } while (hp->hash_next == (ncache_t *)hp && hp != start_hp);
1035
1036 /* return if all hash queues are empty. */
1037 if (hp->hash_next == (ncache_t *)hp) {
1038 dnlc_reduce_idle = 1;
1039 return;
1040 }
1041
1042 mutex_enter(&hp->hash_lock);
1043 for (cnt = 0, ncp = hp->hash_prev; ncp != (ncache_t *)hp;
1044 ncp = ncp->hash_prev, cnt++) {
1045 vp = ncp->vp;
1046 /*
1047 * A name cache entry with a reference count
1048 * of one is only referenced by the dnlc.
1049 * Also negative cache entries are purged first.
1050 */
1051 if (!vn_has_cached_data(vp) &&
1052 ((vp->v_count == 1) || (vp == DNLC_NO_VNODE))) {
1053 ncs.ncs_pick_heur.value.ui64++;
1054 goto found;
1055 }
1056 /*
1057 * Remove from the end of the chain if the
1058 * chain is too long
1059 */
1060 if (cnt > dnlc_long_chain) {
1061 ncp = hp->hash_prev;
1062 ncs.ncs_pick_last.value.ui64++;
1063 vp = ncp->vp;
1064 goto found;
1065 }
1066 }
1067 /* check for race and continue */
1068 if (hp->hash_next == (ncache_t *)hp) {
1069 mutex_exit(&hp->hash_lock);
1070 continue;
1071 }
1072
1073 ncp = hp->hash_prev; /* pick the last one in the hash queue */
1074 ncs.ncs_pick_last.value.ui64++;
1075 vp = ncp->vp;
1076 found:
1077 /*
1078 * Remove from hash chain.
1079 */
1080 nc_rmhash(ncp);
1081 mutex_exit(&hp->hash_lock);
1082 VN_RELE_DNLC(vp);
1083 VN_RELE_DNLC(ncp->dp);
1084 dnlc_free(ncp);
1085 } while (dnlc_nentries > low_water);
1086
1087 dnlc_free_rotor = hp;
1088 dnlc_reduce_idle = 1;
1089 }
1090
1091 /*
1092 * Directory caching routines
1093 * ==========================
1094 *
1095 * See dnlc.h for details of the interfaces below.
1096 */
1097
1098 /*
1099 * Lookup up an entry in a complete or partial directory cache.
1100 */
1101 dcret_t
dnlc_dir_lookup(dcanchor_t * dcap,const char * name,uint64_t * handle)1102 dnlc_dir_lookup(dcanchor_t *dcap, const char *name, uint64_t *handle)
1103 {
1104 dircache_t *dcp;
1105 dcentry_t *dep;
1106 int hash;
1107 int ret;
1108 uchar_t namlen;
1109
1110 /*
1111 * can test without lock as we are only a cache
1112 */
1113 if (!VALID_DIR_CACHE(dcap->dca_dircache)) {
1114 ncs.ncs_dir_misses.value.ui64++;
1115 return (DNOCACHE);
1116 }
1117
1118 if (!dnlc_dir_enable) {
1119 return (DNOCACHE);
1120 }
1121
1122 mutex_enter(&dcap->dca_lock);
1123 dcp = (dircache_t *)dcap->dca_dircache;
1124 if (VALID_DIR_CACHE(dcp)) {
1125 dcp->dc_actime = ddi_get_lbolt64();
1126 DNLC_DIR_HASH(name, hash, namlen);
1127 dep = dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1128 while (dep != NULL) {
1129 if ((dep->de_hash == hash) &&
1130 (namlen == dep->de_namelen) &&
1131 bcmp(dep->de_name, name, namlen) == 0) {
1132 *handle = dep->de_handle;
1133 mutex_exit(&dcap->dca_lock);
1134 ncs.ncs_dir_hits.value.ui64++;
1135 return (DFOUND);
1136 }
1137 dep = dep->de_next;
1138 }
1139 if (dcp->dc_complete) {
1140 ret = DNOENT;
1141 } else {
1142 ret = DNOCACHE;
1143 }
1144 mutex_exit(&dcap->dca_lock);
1145 return (ret);
1146 } else {
1147 mutex_exit(&dcap->dca_lock);
1148 ncs.ncs_dir_misses.value.ui64++;
1149 return (DNOCACHE);
1150 }
1151 }
1152
1153 /*
1154 * Start a new directory cache. An estimate of the number of
1155 * entries is provided to as a quick check to ensure the directory
1156 * is cacheable.
1157 */
1158 dcret_t
dnlc_dir_start(dcanchor_t * dcap,uint_t num_entries)1159 dnlc_dir_start(dcanchor_t *dcap, uint_t num_entries)
1160 {
1161 dircache_t *dcp;
1162
1163 if (!dnlc_dir_enable ||
1164 (num_entries < dnlc_dir_min_size)) {
1165 return (DNOCACHE);
1166 }
1167
1168 if (num_entries > dnlc_dir_max_size) {
1169 return (DTOOBIG);
1170 }
1171
1172 mutex_enter(&dc_head.dch_lock);
1173 mutex_enter(&dcap->dca_lock);
1174
1175 if (dcap->dca_dircache == DC_RET_LOW_MEM) {
1176 dcap->dca_dircache = NULL;
1177 mutex_exit(&dcap->dca_lock);
1178 mutex_exit(&dc_head.dch_lock);
1179 return (DNOMEM);
1180 }
1181
1182 /*
1183 * Check if there's currently a cache.
1184 * This probably only occurs on a race.
1185 */
1186 if (dcap->dca_dircache != NULL) {
1187 mutex_exit(&dcap->dca_lock);
1188 mutex_exit(&dc_head.dch_lock);
1189 return (DNOCACHE);
1190 }
1191
1192 /*
1193 * Allocate the dircache struct, entry and free space hash tables.
1194 * These tables are initially just one entry but dynamically resize
1195 * when entries and free space are added or removed.
1196 */
1197 if ((dcp = kmem_zalloc(sizeof (dircache_t), KM_NOSLEEP)) == NULL) {
1198 goto error;
1199 }
1200 if ((dcp->dc_namehash = kmem_zalloc(sizeof (dcentry_t *),
1201 KM_NOSLEEP)) == NULL) {
1202 goto error;
1203 }
1204 if ((dcp->dc_freehash = kmem_zalloc(sizeof (dcfree_t *),
1205 KM_NOSLEEP)) == NULL) {
1206 goto error;
1207 }
1208
1209 dcp->dc_anchor = dcap; /* set back pointer to anchor */
1210 dcap->dca_dircache = dcp;
1211
1212 /* add into head of global chain */
1213 dcp->dc_next = dc_head.dch_next;
1214 dcp->dc_prev = (dircache_t *)&dc_head;
1215 dcp->dc_next->dc_prev = dcp;
1216 dc_head.dch_next = dcp;
1217
1218 mutex_exit(&dcap->dca_lock);
1219 mutex_exit(&dc_head.dch_lock);
1220 ncs.ncs_cur_dirs.value.ui64++;
1221 ncs.ncs_dirs_cached.value.ui64++;
1222 return (DOK);
1223 error:
1224 if (dcp != NULL) {
1225 if (dcp->dc_namehash) {
1226 kmem_free(dcp->dc_namehash, sizeof (dcentry_t *));
1227 }
1228 kmem_free(dcp, sizeof (dircache_t));
1229 }
1230 /*
1231 * Must also kmem_free dcp->dc_freehash if more error cases are added
1232 */
1233 mutex_exit(&dcap->dca_lock);
1234 mutex_exit(&dc_head.dch_lock);
1235 ncs.ncs_dir_start_nm.value.ui64++;
1236 return (DNOCACHE);
1237 }
1238
1239 /*
1240 * Add a directopry entry to a partial or complete directory cache.
1241 */
1242 dcret_t
dnlc_dir_add_entry(dcanchor_t * dcap,const char * name,uint64_t handle)1243 dnlc_dir_add_entry(dcanchor_t *dcap, const char *name, uint64_t handle)
1244 {
1245 dircache_t *dcp;
1246 dcentry_t **hp, *dep;
1247 int hash;
1248 uint_t capacity;
1249 uchar_t namlen;
1250
1251 /*
1252 * Allocate the dcentry struct, including the variable
1253 * size name. Note, the null terminator is not copied.
1254 *
1255 * We do this outside the lock to avoid possible deadlock if
1256 * dnlc_dir_reclaim() is called as a result of memory shortage.
1257 */
1258 DNLC_DIR_HASH(name, hash, namlen);
1259 dep = kmem_alloc(sizeof (dcentry_t) - 1 + namlen, KM_NOSLEEP);
1260 if (dep == NULL) {
1261 #ifdef DEBUG
1262 /*
1263 * The kmem allocator generates random failures for
1264 * KM_NOSLEEP calls (see KMEM_RANDOM_ALLOCATION_FAILURE)
1265 * So try again before we blow away a perfectly good cache.
1266 * This is done not to cover an error but purely for
1267 * performance running a debug kernel.
1268 * This random error only occurs in debug mode.
1269 */
1270 dep = kmem_alloc(sizeof (dcentry_t) - 1 + namlen, KM_NOSLEEP);
1271 if (dep != NULL)
1272 goto ok;
1273 #endif
1274 ncs.ncs_dir_add_nm.value.ui64++;
1275 /*
1276 * Free a directory cache. This may be the one we are
1277 * called with.
1278 */
1279 dnlc_dir_reclaim(NULL);
1280 dep = kmem_alloc(sizeof (dcentry_t) - 1 + namlen, KM_NOSLEEP);
1281 if (dep == NULL) {
1282 /*
1283 * still no memory, better delete this cache
1284 */
1285 mutex_enter(&dcap->dca_lock);
1286 dcp = (dircache_t *)dcap->dca_dircache;
1287 if (VALID_DIR_CACHE(dcp)) {
1288 dnlc_dir_abort(dcp);
1289 dcap->dca_dircache = DC_RET_LOW_MEM;
1290 }
1291 mutex_exit(&dcap->dca_lock);
1292 ncs.ncs_dir_addabort.value.ui64++;
1293 return (DNOCACHE);
1294 }
1295 /*
1296 * fall through as if the 1st kmem_alloc had worked
1297 */
1298 }
1299 #ifdef DEBUG
1300 ok:
1301 #endif
1302 mutex_enter(&dcap->dca_lock);
1303 dcp = (dircache_t *)dcap->dca_dircache;
1304 if (VALID_DIR_CACHE(dcp)) {
1305 /*
1306 * If the total number of entries goes above the max
1307 * then free this cache
1308 */
1309 if ((dcp->dc_num_entries + dcp->dc_num_free) >
1310 dnlc_dir_max_size) {
1311 mutex_exit(&dcap->dca_lock);
1312 dnlc_dir_purge(dcap);
1313 kmem_free(dep, sizeof (dcentry_t) - 1 + namlen);
1314 ncs.ncs_dir_add_max.value.ui64++;
1315 return (DTOOBIG);
1316 }
1317 dcp->dc_num_entries++;
1318 capacity = (dcp->dc_nhash_mask + 1) << dnlc_dir_hash_size_shift;
1319 if (dcp->dc_num_entries >=
1320 (capacity << dnlc_dir_hash_resize_shift)) {
1321 dnlc_dir_adjust_nhash(dcp);
1322 }
1323 hp = &dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1324
1325 /*
1326 * Initialise and chain in new entry
1327 */
1328 dep->de_handle = handle;
1329 dep->de_hash = hash;
1330 /*
1331 * Note de_namelen is a uchar_t to conserve space
1332 * and alignment padding. The max length of any
1333 * pathname component is defined as MAXNAMELEN
1334 * which is 256 (including the terminating null).
1335 * So provided this doesn't change, we don't include the null,
1336 * we always use bcmp to compare strings, and we don't
1337 * start storing full names, then we are ok.
1338 * The space savings is worth it.
1339 */
1340 dep->de_namelen = namlen;
1341 bcopy(name, dep->de_name, namlen);
1342 dep->de_next = *hp;
1343 *hp = dep;
1344 dcp->dc_actime = ddi_get_lbolt64();
1345 mutex_exit(&dcap->dca_lock);
1346 ncs.ncs_dir_num_ents.value.ui64++;
1347 return (DOK);
1348 } else {
1349 mutex_exit(&dcap->dca_lock);
1350 kmem_free(dep, sizeof (dcentry_t) - 1 + namlen);
1351 return (DNOCACHE);
1352 }
1353 }
1354
1355 /*
1356 * Add free space to a partial or complete directory cache.
1357 */
1358 dcret_t
dnlc_dir_add_space(dcanchor_t * dcap,uint_t len,uint64_t handle)1359 dnlc_dir_add_space(dcanchor_t *dcap, uint_t len, uint64_t handle)
1360 {
1361 dircache_t *dcp;
1362 dcfree_t *dfp, **hp;
1363 uint_t capacity;
1364
1365 /*
1366 * We kmem_alloc outside the lock to avoid possible deadlock if
1367 * dnlc_dir_reclaim() is called as a result of memory shortage.
1368 */
1369 dfp = kmem_cache_alloc(dnlc_dir_space_cache, KM_NOSLEEP);
1370 if (dfp == NULL) {
1371 #ifdef DEBUG
1372 /*
1373 * The kmem allocator generates random failures for
1374 * KM_NOSLEEP calls (see KMEM_RANDOM_ALLOCATION_FAILURE)
1375 * So try again before we blow away a perfectly good cache.
1376 * This random error only occurs in debug mode
1377 */
1378 dfp = kmem_cache_alloc(dnlc_dir_space_cache, KM_NOSLEEP);
1379 if (dfp != NULL)
1380 goto ok;
1381 #endif
1382 ncs.ncs_dir_add_nm.value.ui64++;
1383 /*
1384 * Free a directory cache. This may be the one we are
1385 * called with.
1386 */
1387 dnlc_dir_reclaim(NULL);
1388 dfp = kmem_cache_alloc(dnlc_dir_space_cache, KM_NOSLEEP);
1389 if (dfp == NULL) {
1390 /*
1391 * still no memory, better delete this cache
1392 */
1393 mutex_enter(&dcap->dca_lock);
1394 dcp = (dircache_t *)dcap->dca_dircache;
1395 if (VALID_DIR_CACHE(dcp)) {
1396 dnlc_dir_abort(dcp);
1397 dcap->dca_dircache = DC_RET_LOW_MEM;
1398 }
1399 mutex_exit(&dcap->dca_lock);
1400 ncs.ncs_dir_addabort.value.ui64++;
1401 return (DNOCACHE);
1402 }
1403 /*
1404 * fall through as if the 1st kmem_alloc had worked
1405 */
1406 }
1407
1408 #ifdef DEBUG
1409 ok:
1410 #endif
1411 mutex_enter(&dcap->dca_lock);
1412 dcp = (dircache_t *)dcap->dca_dircache;
1413 if (VALID_DIR_CACHE(dcp)) {
1414 if ((dcp->dc_num_entries + dcp->dc_num_free) >
1415 dnlc_dir_max_size) {
1416 mutex_exit(&dcap->dca_lock);
1417 dnlc_dir_purge(dcap);
1418 kmem_cache_free(dnlc_dir_space_cache, dfp);
1419 ncs.ncs_dir_add_max.value.ui64++;
1420 return (DTOOBIG);
1421 }
1422 dcp->dc_num_free++;
1423 capacity = (dcp->dc_fhash_mask + 1) << dnlc_dir_hash_size_shift;
1424 if (dcp->dc_num_free >=
1425 (capacity << dnlc_dir_hash_resize_shift)) {
1426 dnlc_dir_adjust_fhash(dcp);
1427 }
1428 /*
1429 * Initialise and chain a new entry
1430 */
1431 dfp->df_handle = handle;
1432 dfp->df_len = len;
1433 dcp->dc_actime = ddi_get_lbolt64();
1434 hp = &(dcp->dc_freehash[DDFHASH(handle, dcp)]);
1435 dfp->df_next = *hp;
1436 *hp = dfp;
1437 mutex_exit(&dcap->dca_lock);
1438 ncs.ncs_dir_num_ents.value.ui64++;
1439 return (DOK);
1440 } else {
1441 mutex_exit(&dcap->dca_lock);
1442 kmem_cache_free(dnlc_dir_space_cache, dfp);
1443 return (DNOCACHE);
1444 }
1445 }
1446
1447 /*
1448 * Mark a directory cache as complete.
1449 */
1450 void
dnlc_dir_complete(dcanchor_t * dcap)1451 dnlc_dir_complete(dcanchor_t *dcap)
1452 {
1453 dircache_t *dcp;
1454
1455 mutex_enter(&dcap->dca_lock);
1456 dcp = (dircache_t *)dcap->dca_dircache;
1457 if (VALID_DIR_CACHE(dcp)) {
1458 dcp->dc_complete = B_TRUE;
1459 }
1460 mutex_exit(&dcap->dca_lock);
1461 }
1462
1463 /*
1464 * Internal routine to delete a partial or full directory cache.
1465 * No additional locking needed.
1466 */
1467 static void
dnlc_dir_abort(dircache_t * dcp)1468 dnlc_dir_abort(dircache_t *dcp)
1469 {
1470 dcentry_t *dep, *nhp;
1471 dcfree_t *fep, *fhp;
1472 uint_t nhtsize = dcp->dc_nhash_mask + 1; /* name hash table size */
1473 uint_t fhtsize = dcp->dc_fhash_mask + 1; /* free hash table size */
1474 uint_t i;
1475
1476 /*
1477 * Free up the cached name entries and hash table
1478 */
1479 for (i = 0; i < nhtsize; i++) { /* for each hash bucket */
1480 nhp = dcp->dc_namehash[i];
1481 while (nhp != NULL) { /* for each chained entry */
1482 dep = nhp->de_next;
1483 kmem_free(nhp, sizeof (dcentry_t) - 1 +
1484 nhp->de_namelen);
1485 nhp = dep;
1486 }
1487 }
1488 kmem_free(dcp->dc_namehash, sizeof (dcentry_t *) * nhtsize);
1489
1490 /*
1491 * Free up the free space entries and hash table
1492 */
1493 for (i = 0; i < fhtsize; i++) { /* for each hash bucket */
1494 fhp = dcp->dc_freehash[i];
1495 while (fhp != NULL) { /* for each chained entry */
1496 fep = fhp->df_next;
1497 kmem_cache_free(dnlc_dir_space_cache, fhp);
1498 fhp = fep;
1499 }
1500 }
1501 kmem_free(dcp->dc_freehash, sizeof (dcfree_t *) * fhtsize);
1502
1503 /*
1504 * Finally free the directory cache structure itself
1505 */
1506 ncs.ncs_dir_num_ents.value.ui64 -= (dcp->dc_num_entries +
1507 dcp->dc_num_free);
1508 kmem_free(dcp, sizeof (dircache_t));
1509 ncs.ncs_cur_dirs.value.ui64--;
1510 }
1511
1512 /*
1513 * Remove a partial or complete directory cache
1514 */
1515 void
dnlc_dir_purge(dcanchor_t * dcap)1516 dnlc_dir_purge(dcanchor_t *dcap)
1517 {
1518 dircache_t *dcp;
1519
1520 mutex_enter(&dc_head.dch_lock);
1521 mutex_enter(&dcap->dca_lock);
1522 dcp = (dircache_t *)dcap->dca_dircache;
1523 if (!VALID_DIR_CACHE(dcp)) {
1524 mutex_exit(&dcap->dca_lock);
1525 mutex_exit(&dc_head.dch_lock);
1526 return;
1527 }
1528 dcap->dca_dircache = NULL;
1529 /*
1530 * Unchain from global list
1531 */
1532 dcp->dc_prev->dc_next = dcp->dc_next;
1533 dcp->dc_next->dc_prev = dcp->dc_prev;
1534 mutex_exit(&dcap->dca_lock);
1535 mutex_exit(&dc_head.dch_lock);
1536 dnlc_dir_abort(dcp);
1537 }
1538
1539 /*
1540 * Remove an entry from a complete or partial directory cache.
1541 * Return the handle if it's non null.
1542 */
1543 dcret_t
dnlc_dir_rem_entry(dcanchor_t * dcap,const char * name,uint64_t * handlep)1544 dnlc_dir_rem_entry(dcanchor_t *dcap, const char *name, uint64_t *handlep)
1545 {
1546 dircache_t *dcp;
1547 dcentry_t **prevpp, *te;
1548 uint_t capacity;
1549 int hash;
1550 int ret;
1551 uchar_t namlen;
1552
1553 if (!dnlc_dir_enable) {
1554 return (DNOCACHE);
1555 }
1556
1557 mutex_enter(&dcap->dca_lock);
1558 dcp = (dircache_t *)dcap->dca_dircache;
1559 if (VALID_DIR_CACHE(dcp)) {
1560 dcp->dc_actime = ddi_get_lbolt64();
1561 if (dcp->dc_nhash_mask > 0) { /* ie not minimum */
1562 capacity = (dcp->dc_nhash_mask + 1) <<
1563 dnlc_dir_hash_size_shift;
1564 if (dcp->dc_num_entries <=
1565 (capacity >> dnlc_dir_hash_resize_shift)) {
1566 dnlc_dir_adjust_nhash(dcp);
1567 }
1568 }
1569 DNLC_DIR_HASH(name, hash, namlen);
1570 prevpp = &dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1571 while (*prevpp != NULL) {
1572 if (((*prevpp)->de_hash == hash) &&
1573 (namlen == (*prevpp)->de_namelen) &&
1574 bcmp((*prevpp)->de_name, name, namlen) == 0) {
1575 if (handlep != NULL) {
1576 *handlep = (*prevpp)->de_handle;
1577 }
1578 te = *prevpp;
1579 *prevpp = (*prevpp)->de_next;
1580 kmem_free(te, sizeof (dcentry_t) - 1 +
1581 te->de_namelen);
1582
1583 /*
1584 * If the total number of entries
1585 * falls below half the minimum number
1586 * of entries then free this cache.
1587 */
1588 if (--dcp->dc_num_entries <
1589 (dnlc_dir_min_size >> 1)) {
1590 mutex_exit(&dcap->dca_lock);
1591 dnlc_dir_purge(dcap);
1592 } else {
1593 mutex_exit(&dcap->dca_lock);
1594 }
1595 ncs.ncs_dir_num_ents.value.ui64--;
1596 return (DFOUND);
1597 }
1598 prevpp = &((*prevpp)->de_next);
1599 }
1600 if (dcp->dc_complete) {
1601 ncs.ncs_dir_reme_fai.value.ui64++;
1602 ret = DNOENT;
1603 } else {
1604 ret = DNOCACHE;
1605 }
1606 mutex_exit(&dcap->dca_lock);
1607 return (ret);
1608 } else {
1609 mutex_exit(&dcap->dca_lock);
1610 return (DNOCACHE);
1611 }
1612 }
1613
1614
1615 /*
1616 * Remove free space of at least the given length from a complete
1617 * or partial directory cache.
1618 */
1619 dcret_t
dnlc_dir_rem_space_by_len(dcanchor_t * dcap,uint_t len,uint64_t * handlep)1620 dnlc_dir_rem_space_by_len(dcanchor_t *dcap, uint_t len, uint64_t *handlep)
1621 {
1622 dircache_t *dcp;
1623 dcfree_t **prevpp, *tfp;
1624 uint_t fhtsize; /* free hash table size */
1625 uint_t i;
1626 uint_t capacity;
1627 int ret;
1628
1629 if (!dnlc_dir_enable) {
1630 return (DNOCACHE);
1631 }
1632
1633 mutex_enter(&dcap->dca_lock);
1634 dcp = (dircache_t *)dcap->dca_dircache;
1635 if (VALID_DIR_CACHE(dcp)) {
1636 dcp->dc_actime = ddi_get_lbolt64();
1637 if (dcp->dc_fhash_mask > 0) { /* ie not minimum */
1638 capacity = (dcp->dc_fhash_mask + 1) <<
1639 dnlc_dir_hash_size_shift;
1640 if (dcp->dc_num_free <=
1641 (capacity >> dnlc_dir_hash_resize_shift)) {
1642 dnlc_dir_adjust_fhash(dcp);
1643 }
1644 }
1645 /*
1646 * Search for an entry of the appropriate size
1647 * on a first fit basis.
1648 */
1649 fhtsize = dcp->dc_fhash_mask + 1;
1650 for (i = 0; i < fhtsize; i++) { /* for each hash bucket */
1651 prevpp = &(dcp->dc_freehash[i]);
1652 while (*prevpp != NULL) {
1653 if ((*prevpp)->df_len >= len) {
1654 *handlep = (*prevpp)->df_handle;
1655 tfp = *prevpp;
1656 *prevpp = (*prevpp)->df_next;
1657 dcp->dc_num_free--;
1658 mutex_exit(&dcap->dca_lock);
1659 kmem_cache_free(dnlc_dir_space_cache,
1660 tfp);
1661 ncs.ncs_dir_num_ents.value.ui64--;
1662 return (DFOUND);
1663 }
1664 prevpp = &((*prevpp)->df_next);
1665 }
1666 }
1667 if (dcp->dc_complete) {
1668 ret = DNOENT;
1669 } else {
1670 ret = DNOCACHE;
1671 }
1672 mutex_exit(&dcap->dca_lock);
1673 return (ret);
1674 } else {
1675 mutex_exit(&dcap->dca_lock);
1676 return (DNOCACHE);
1677 }
1678 }
1679
1680 /*
1681 * Remove free space with the given handle from a complete or partial
1682 * directory cache.
1683 */
1684 dcret_t
dnlc_dir_rem_space_by_handle(dcanchor_t * dcap,uint64_t handle)1685 dnlc_dir_rem_space_by_handle(dcanchor_t *dcap, uint64_t handle)
1686 {
1687 dircache_t *dcp;
1688 dcfree_t **prevpp, *tfp;
1689 uint_t capacity;
1690 int ret;
1691
1692 if (!dnlc_dir_enable) {
1693 return (DNOCACHE);
1694 }
1695
1696 mutex_enter(&dcap->dca_lock);
1697 dcp = (dircache_t *)dcap->dca_dircache;
1698 if (VALID_DIR_CACHE(dcp)) {
1699 dcp->dc_actime = ddi_get_lbolt64();
1700 if (dcp->dc_fhash_mask > 0) { /* ie not minimum */
1701 capacity = (dcp->dc_fhash_mask + 1) <<
1702 dnlc_dir_hash_size_shift;
1703 if (dcp->dc_num_free <=
1704 (capacity >> dnlc_dir_hash_resize_shift)) {
1705 dnlc_dir_adjust_fhash(dcp);
1706 }
1707 }
1708
1709 /*
1710 * search for the exact entry
1711 */
1712 prevpp = &(dcp->dc_freehash[DDFHASH(handle, dcp)]);
1713 while (*prevpp != NULL) {
1714 if ((*prevpp)->df_handle == handle) {
1715 tfp = *prevpp;
1716 *prevpp = (*prevpp)->df_next;
1717 dcp->dc_num_free--;
1718 mutex_exit(&dcap->dca_lock);
1719 kmem_cache_free(dnlc_dir_space_cache, tfp);
1720 ncs.ncs_dir_num_ents.value.ui64--;
1721 return (DFOUND);
1722 }
1723 prevpp = &((*prevpp)->df_next);
1724 }
1725 if (dcp->dc_complete) {
1726 ncs.ncs_dir_rems_fai.value.ui64++;
1727 ret = DNOENT;
1728 } else {
1729 ret = DNOCACHE;
1730 }
1731 mutex_exit(&dcap->dca_lock);
1732 return (ret);
1733 } else {
1734 mutex_exit(&dcap->dca_lock);
1735 return (DNOCACHE);
1736 }
1737 }
1738
1739 /*
1740 * Update the handle of an directory cache entry.
1741 */
1742 dcret_t
dnlc_dir_update(dcanchor_t * dcap,const char * name,uint64_t handle)1743 dnlc_dir_update(dcanchor_t *dcap, const char *name, uint64_t handle)
1744 {
1745 dircache_t *dcp;
1746 dcentry_t *dep;
1747 int hash;
1748 int ret;
1749 uchar_t namlen;
1750
1751 if (!dnlc_dir_enable) {
1752 return (DNOCACHE);
1753 }
1754
1755 mutex_enter(&dcap->dca_lock);
1756 dcp = (dircache_t *)dcap->dca_dircache;
1757 if (VALID_DIR_CACHE(dcp)) {
1758 dcp->dc_actime = ddi_get_lbolt64();
1759 DNLC_DIR_HASH(name, hash, namlen);
1760 dep = dcp->dc_namehash[hash & dcp->dc_nhash_mask];
1761 while (dep != NULL) {
1762 if ((dep->de_hash == hash) &&
1763 (namlen == dep->de_namelen) &&
1764 bcmp(dep->de_name, name, namlen) == 0) {
1765 dep->de_handle = handle;
1766 mutex_exit(&dcap->dca_lock);
1767 return (DFOUND);
1768 }
1769 dep = dep->de_next;
1770 }
1771 if (dcp->dc_complete) {
1772 ncs.ncs_dir_upd_fail.value.ui64++;
1773 ret = DNOENT;
1774 } else {
1775 ret = DNOCACHE;
1776 }
1777 mutex_exit(&dcap->dca_lock);
1778 return (ret);
1779 } else {
1780 mutex_exit(&dcap->dca_lock);
1781 return (DNOCACHE);
1782 }
1783 }
1784
1785 void
dnlc_dir_fini(dcanchor_t * dcap)1786 dnlc_dir_fini(dcanchor_t *dcap)
1787 {
1788 dircache_t *dcp;
1789
1790 mutex_enter(&dc_head.dch_lock);
1791 mutex_enter(&dcap->dca_lock);
1792 dcp = (dircache_t *)dcap->dca_dircache;
1793 if (VALID_DIR_CACHE(dcp)) {
1794 /*
1795 * Unchain from global list
1796 */
1797 ncs.ncs_dir_finipurg.value.ui64++;
1798 dcp->dc_prev->dc_next = dcp->dc_next;
1799 dcp->dc_next->dc_prev = dcp->dc_prev;
1800 } else {
1801 dcp = NULL;
1802 }
1803 dcap->dca_dircache = NULL;
1804 mutex_exit(&dcap->dca_lock);
1805 mutex_exit(&dc_head.dch_lock);
1806 mutex_destroy(&dcap->dca_lock);
1807 if (dcp) {
1808 dnlc_dir_abort(dcp);
1809 }
1810 }
1811
1812 /*
1813 * Reclaim callback for dnlc directory caching.
1814 * Invoked by the kernel memory allocator when memory gets tight.
1815 * This is a pretty serious condition and can lead easily lead to system
1816 * hangs if not enough space is returned.
1817 *
1818 * Deciding which directory (or directories) to purge is tricky.
1819 * Purging everything is an overkill, but purging just the oldest used
1820 * was found to lead to hangs. The largest cached directories use the
1821 * most memory, but take the most effort to rebuild, whereas the smaller
1822 * ones have little value and give back little space. So what to do?
1823 *
1824 * The current policy is to continue purging the oldest used directories
1825 * until at least dnlc_dir_min_reclaim directory entries have been purged.
1826 */
1827 /*ARGSUSED*/
1828 static void
dnlc_dir_reclaim(void * unused)1829 dnlc_dir_reclaim(void *unused)
1830 {
1831 dircache_t *dcp, *oldest;
1832 uint_t dirent_cnt = 0;
1833
1834 mutex_enter(&dc_head.dch_lock);
1835 while (dirent_cnt < dnlc_dir_min_reclaim) {
1836 dcp = dc_head.dch_next;
1837 oldest = NULL;
1838 while (dcp != (dircache_t *)&dc_head) {
1839 if (oldest == NULL) {
1840 oldest = dcp;
1841 } else {
1842 if (dcp->dc_actime < oldest->dc_actime) {
1843 oldest = dcp;
1844 }
1845 }
1846 dcp = dcp->dc_next;
1847 }
1848 if (oldest == NULL) {
1849 /* nothing to delete */
1850 mutex_exit(&dc_head.dch_lock);
1851 return;
1852 }
1853 /*
1854 * remove from directory chain and purge
1855 */
1856 oldest->dc_prev->dc_next = oldest->dc_next;
1857 oldest->dc_next->dc_prev = oldest->dc_prev;
1858 mutex_enter(&oldest->dc_anchor->dca_lock);
1859 /*
1860 * If this was the last entry then it must be too large.
1861 * Mark it as such by saving a special dircache_t
1862 * pointer (DC_RET_LOW_MEM) in the anchor. The error DNOMEM
1863 * will be presented to the caller of dnlc_dir_start()
1864 */
1865 if (oldest->dc_next == oldest->dc_prev) {
1866 oldest->dc_anchor->dca_dircache = DC_RET_LOW_MEM;
1867 ncs.ncs_dir_rec_last.value.ui64++;
1868 } else {
1869 oldest->dc_anchor->dca_dircache = NULL;
1870 ncs.ncs_dir_recl_any.value.ui64++;
1871 }
1872 mutex_exit(&oldest->dc_anchor->dca_lock);
1873 dirent_cnt += oldest->dc_num_entries;
1874 dnlc_dir_abort(oldest);
1875 }
1876 mutex_exit(&dc_head.dch_lock);
1877 }
1878
1879 /*
1880 * Dynamically grow or shrink the size of the name hash table
1881 */
1882 static void
dnlc_dir_adjust_nhash(dircache_t * dcp)1883 dnlc_dir_adjust_nhash(dircache_t *dcp)
1884 {
1885 dcentry_t **newhash, *dep, **nhp, *tep;
1886 uint_t newsize;
1887 uint_t oldsize;
1888 uint_t newsizemask;
1889 int i;
1890
1891 /*
1892 * Allocate new hash table
1893 */
1894 newsize = dcp->dc_num_entries >> dnlc_dir_hash_size_shift;
1895 newhash = kmem_zalloc(sizeof (dcentry_t *) * newsize, KM_NOSLEEP);
1896 if (newhash == NULL) {
1897 /*
1898 * System is short on memory just return
1899 * Note, the old hash table is still usable.
1900 * This return is unlikely to repeatedy occur, because
1901 * either some other directory caches will be reclaimed
1902 * due to memory shortage, thus freeing memory, or this
1903 * directory cahe will be reclaimed.
1904 */
1905 return;
1906 }
1907 oldsize = dcp->dc_nhash_mask + 1;
1908 dcp->dc_nhash_mask = newsizemask = newsize - 1;
1909
1910 /*
1911 * Move entries from the old table to the new
1912 */
1913 for (i = 0; i < oldsize; i++) { /* for each hash bucket */
1914 dep = dcp->dc_namehash[i];
1915 while (dep != NULL) { /* for each chained entry */
1916 tep = dep;
1917 dep = dep->de_next;
1918 nhp = &newhash[tep->de_hash & newsizemask];
1919 tep->de_next = *nhp;
1920 *nhp = tep;
1921 }
1922 }
1923
1924 /*
1925 * delete old hash table and set new one in place
1926 */
1927 kmem_free(dcp->dc_namehash, sizeof (dcentry_t *) * oldsize);
1928 dcp->dc_namehash = newhash;
1929 }
1930
1931 /*
1932 * Dynamically grow or shrink the size of the free space hash table
1933 */
1934 static void
dnlc_dir_adjust_fhash(dircache_t * dcp)1935 dnlc_dir_adjust_fhash(dircache_t *dcp)
1936 {
1937 dcfree_t **newhash, *dfp, **nhp, *tfp;
1938 uint_t newsize;
1939 uint_t oldsize;
1940 int i;
1941
1942 /*
1943 * Allocate new hash table
1944 */
1945 newsize = dcp->dc_num_free >> dnlc_dir_hash_size_shift;
1946 newhash = kmem_zalloc(sizeof (dcfree_t *) * newsize, KM_NOSLEEP);
1947 if (newhash == NULL) {
1948 /*
1949 * System is short on memory just return
1950 * Note, the old hash table is still usable.
1951 * This return is unlikely to repeatedy occur, because
1952 * either some other directory caches will be reclaimed
1953 * due to memory shortage, thus freeing memory, or this
1954 * directory cahe will be reclaimed.
1955 */
1956 return;
1957 }
1958 oldsize = dcp->dc_fhash_mask + 1;
1959 dcp->dc_fhash_mask = newsize - 1;
1960
1961 /*
1962 * Move entries from the old table to the new
1963 */
1964 for (i = 0; i < oldsize; i++) { /* for each hash bucket */
1965 dfp = dcp->dc_freehash[i];
1966 while (dfp != NULL) { /* for each chained entry */
1967 tfp = dfp;
1968 dfp = dfp->df_next;
1969 nhp = &newhash[DDFHASH(tfp->df_handle, dcp)];
1970 tfp->df_next = *nhp;
1971 *nhp = tfp;
1972 }
1973 }
1974
1975 /*
1976 * delete old hash table and set new one in place
1977 */
1978 kmem_free(dcp->dc_freehash, sizeof (dcfree_t *) * oldsize);
1979 dcp->dc_freehash = newhash;
1980 }
1981