xref: /illumos-gate/usr/src/uts/common/fs/smbsrv/smb_kutil.c (revision b62fa64bd1b93f15d05f01b9f01842071f059a30)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
25  * Copyright 2022-2023 RackTop Systems, Inc.
26  * Copyright 2023 Oxide Computer Company
27  */
28 
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/tzfile.h>
32 #include <sys/atomic.h>
33 #include <sys/time.h>
34 #include <sys/spl.h>
35 #include <sys/random.h>
36 #include <smbsrv/smb_kproto.h>
37 #include <smbsrv/smb_fsops.h>
38 #include <smbsrv/smbinfo.h>
39 #include <smbsrv/smb_xdr.h>
40 #include <smbsrv/smb_vops.h>
41 #include <smbsrv/smb_idmap.h>
42 
43 #include <sys/sid.h>
44 #include <sys/priv_names.h>
45 #include <sys/bitmap.h>
46 
47 static kmem_cache_t	*smb_dtor_cache = NULL;
48 
49 static boolean_t smb_avl_hold(smb_avl_t *);
50 static void smb_avl_rele(smb_avl_t *);
51 
52 time_t tzh_leapcnt = 0;
53 
54 struct tm
55 *smb_gmtime_r(time_t *clock, struct tm *result);
56 
57 time_t
58 smb_timegm(struct tm *tm);
59 
60 struct	tm {
61 	int	tm_sec;
62 	int	tm_min;
63 	int	tm_hour;
64 	int	tm_mday;
65 	int	tm_mon;
66 	int	tm_year;
67 	int	tm_wday;
68 	int	tm_yday;
69 	int	tm_isdst;
70 };
71 
72 static const int days_in_month[] = {
73 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
74 };
75 
76 /*
77  * Given a UTF-8 string (our internal form everywhere)
78  * return either the Unicode (UTF-16) length in bytes,
79  * or the OEM length in bytes.  Which we return is
80  * determined by whether the client supports Unicode.
81  * This length does NOT include the null.
82  */
83 int
smb_ascii_or_unicode_strlen(struct smb_request * sr,char * str)84 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
85 {
86 	if (sr->session->dialect >= SMB_VERS_2_BASE ||
87 	    (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
88 		return (smb_wcequiv_strlen(str));
89 	return (smb_sbequiv_strlen(str));
90 }
91 
92 /*
93  * Given a UTF-8 string (our internal form everywhere)
94  * return either the Unicode (UTF-16) length in bytes,
95  * or the OEM length in bytes.  Which we return is
96  * determined by whether the client supports Unicode.
97  * This length DOES include the null.
98  */
99 int
smb_ascii_or_unicode_strlen_null(struct smb_request * sr,char * str)100 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
101 {
102 	if (sr->session->dialect >= SMB_VERS_2_BASE ||
103 	    (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
104 		return (smb_wcequiv_strlen(str) + 2);
105 	return (smb_sbequiv_strlen(str) + 1);
106 }
107 
108 int
smb_ascii_or_unicode_null_len(struct smb_request * sr)109 smb_ascii_or_unicode_null_len(struct smb_request *sr)
110 {
111 	if (sr->session->dialect >= SMB_VERS_2_BASE ||
112 	    (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
113 		return (2);
114 	return (1);
115 }
116 
117 /*
118  *
119  * Convert old-style (DOS, LanMan) wildcard strings to NT style.
120  * This should ONLY happen to patterns that come from old clients,
121  * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12).
122  *
123  *	? is converted to >
124  *	* is converted to < if it is followed by .
125  *	. is converted to " if it is followed by ? or * or end of pattern
126  *
127  * Note: modifies pattern in place.
128  */
129 void
smb_convert_wildcards(char * pattern)130 smb_convert_wildcards(char *pattern)
131 {
132 	char	*p;
133 
134 	for (p = pattern; *p != '\0'; p++) {
135 		switch (*p) {
136 		case '?':
137 			*p = '>';
138 			break;
139 		case '*':
140 			if (p[1] == '.')
141 				*p = '<';
142 			break;
143 		case '.':
144 			if (p[1] == '?' || p[1] == '*' || p[1] == '\0')
145 				*p = '\"';
146 			break;
147 		}
148 	}
149 }
150 
151 /*
152  * smb_sattr_check
153  *
154  * Check file attributes against a search attribute (sattr) mask.
155  *
156  * Normal files, which includes READONLY and ARCHIVE, always pass
157  * this check.  If the DIRECTORY, HIDDEN or SYSTEM special attributes
158  * are set then they must appear in the search mask.  The special
159  * attributes are inclusive, i.e. all special attributes that appear
160  * in sattr must also appear in the file attributes for the check to
161  * pass.
162  *
163  * The following examples show how this works:
164  *
165  *		fileA:	READONLY
166  *		fileB:	0 (no attributes = normal file)
167  *		fileC:	READONLY, ARCHIVE
168  *		fileD:	HIDDEN
169  *		fileE:	READONLY, HIDDEN, SYSTEM
170  *		dirA:	DIRECTORY
171  *
172  * search attribute: 0
173  *		Returns: fileA, fileB and fileC.
174  * search attribute: HIDDEN
175  *		Returns: fileA, fileB, fileC and fileD.
176  * search attribute: SYSTEM
177  *		Returns: fileA, fileB and fileC.
178  * search attribute: DIRECTORY
179  *		Returns: fileA, fileB, fileC and dirA.
180  * search attribute: HIDDEN and SYSTEM
181  *		Returns: fileA, fileB, fileC, fileD and fileE.
182  *
183  * Returns true if the file and sattr match; otherwise, returns false.
184  */
185 boolean_t
smb_sattr_check(uint16_t dosattr,uint16_t sattr)186 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
187 {
188 	if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
189 	    !(sattr & FILE_ATTRIBUTE_DIRECTORY))
190 		return (B_FALSE);
191 
192 	if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
193 	    !(sattr & FILE_ATTRIBUTE_HIDDEN))
194 		return (B_FALSE);
195 
196 	if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
197 	    !(sattr & FILE_ATTRIBUTE_SYSTEM))
198 		return (B_FALSE);
199 
200 	return (B_TRUE);
201 }
202 
203 time_t
smb_get_boottime(void)204 smb_get_boottime(void)
205 {
206 	return (curzone->zone_boot_time);
207 }
208 
209 /*
210  * smb_idpool_increment
211  *
212  * This function increments the ID pool by doubling the current size. This
213  * function assumes the caller entered the mutex of the pool.
214  */
215 static int
smb_idpool_increment(smb_idpool_t * pool)216 smb_idpool_increment(
217     smb_idpool_t	*pool)
218 {
219 	uint8_t		*new_pool;
220 	uint32_t	new_size;
221 
222 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
223 
224 	new_size = pool->id_size * 2;
225 	if (new_size <= SMB_IDPOOL_MAX_SIZE) {
226 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
227 		if (new_pool) {
228 			bzero(new_pool, new_size / 8);
229 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
230 			kmem_free(pool->id_pool, pool->id_size / 8);
231 			pool->id_pool = new_pool;
232 			pool->id_free_counter += new_size - pool->id_size;
233 			pool->id_max_free_counter += new_size - pool->id_size;
234 			pool->id_size = new_size;
235 			pool->id_idx_msk = (new_size / 8) - 1;
236 			if (new_size >= SMB_IDPOOL_MAX_SIZE) {
237 				/* id -1 made unavailable */
238 				pool->id_pool[pool->id_idx_msk] = 0x80;
239 				pool->id_free_counter--;
240 				pool->id_max_free_counter--;
241 			}
242 			return (0);
243 		}
244 	}
245 	return (-1);
246 }
247 
248 /*
249  * smb_idpool_constructor
250  *
251  * This function initializes the pool structure provided.
252  */
253 int
smb_idpool_constructor(smb_idpool_t * pool)254 smb_idpool_constructor(
255     smb_idpool_t	*pool)
256 {
257 
258 	ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
259 
260 	pool->id_size = SMB_IDPOOL_MIN_SIZE;
261 	pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
262 	pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
263 	pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
264 	pool->id_bit = 0x02;
265 	pool->id_bit_idx = 1;
266 	pool->id_idx = 0;
267 	pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
268 	    KM_SLEEP);
269 	bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
270 	/* -1 id made unavailable */
271 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
272 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
273 	pool->id_magic = SMB_IDPOOL_MAGIC;
274 	return (0);
275 }
276 
277 /*
278  * smb_idpool_destructor
279  *
280  * This function tears down and frees the resources associated with the
281  * pool provided.
282  */
283 void
smb_idpool_destructor(smb_idpool_t * pool)284 smb_idpool_destructor(
285     smb_idpool_t	*pool)
286 {
287 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
288 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
289 	pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
290 	mutex_destroy(&pool->id_mutex);
291 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
292 }
293 
294 /*
295  * smb_idpool_alloc
296  *
297  * This function allocates an ID from the pool provided.
298  */
299 int
smb_idpool_alloc(smb_idpool_t * pool,uint16_t * id)300 smb_idpool_alloc(
301     smb_idpool_t	*pool,
302     uint16_t		*id)
303 {
304 	uint32_t	i;
305 	uint8_t		bit;
306 	uint8_t		bit_idx;
307 	uint8_t		byte;
308 
309 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
310 
311 	mutex_enter(&pool->id_mutex);
312 	if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
313 		mutex_exit(&pool->id_mutex);
314 		return (-1);
315 	}
316 
317 	i = pool->id_size;
318 	while (i) {
319 		bit = pool->id_bit;
320 		bit_idx = pool->id_bit_idx;
321 		byte = pool->id_pool[pool->id_idx];
322 		while (bit) {
323 			if (byte & bit) {
324 				bit = bit << 1;
325 				bit_idx++;
326 				continue;
327 			}
328 			pool->id_pool[pool->id_idx] |= bit;
329 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
330 			pool->id_free_counter--;
331 			/*
332 			 * Leave position at next bit to allocate,
333 			 * so we don't keep re-using the last in an
334 			 * alloc/free/alloc/free sequence.  Doing
335 			 * that can confuse some SMB clients.
336 			 */
337 			if (bit & 0x80) {
338 				pool->id_bit = 1;
339 				pool->id_bit_idx = 0;
340 				pool->id_idx++;
341 				pool->id_idx &= pool->id_idx_msk;
342 			} else {
343 				pool->id_bit = (bit << 1);
344 				pool->id_bit_idx = bit_idx + 1;
345 				/* keep id_idx */
346 			}
347 			mutex_exit(&pool->id_mutex);
348 			return (0);
349 		}
350 		pool->id_bit = 1;
351 		pool->id_bit_idx = 0;
352 		pool->id_idx++;
353 		pool->id_idx &= pool->id_idx_msk;
354 		--i;
355 	}
356 	/*
357 	 * This section of code shouldn't be reached. If there are IDs
358 	 * available and none could be found there's a problem.
359 	 */
360 	ASSERT(0);
361 	mutex_exit(&pool->id_mutex);
362 	return (-1);
363 }
364 
365 /*
366  * smb_idpool_free
367  *
368  * This function frees the ID provided.
369  */
370 void
smb_idpool_free(smb_idpool_t * pool,uint16_t id)371 smb_idpool_free(
372     smb_idpool_t	*pool,
373     uint16_t		id)
374 {
375 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
376 	ASSERT(id != 0);
377 	ASSERT(id != 0xFFFF);
378 
379 	mutex_enter(&pool->id_mutex);
380 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
381 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
382 		pool->id_free_counter++;
383 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
384 		mutex_exit(&pool->id_mutex);
385 		return;
386 	}
387 	/* Freeing a free ID. */
388 	ASSERT(0);
389 	mutex_exit(&pool->id_mutex);
390 }
391 
392 /*
393  * smb_lavl_constructor
394  *
395  * This function initializes a locked avl.
396  */
397 void
smb_lavl_constructor(smb_lavl_t * la,int (* compar)(const void *,const void *),size_t size,size_t offset)398 smb_lavl_constructor(
399     smb_lavl_t	*la,
400     int (*compar) (const void *, const void *),
401     size_t	size,
402     size_t	offset)
403 {
404 	rw_init(&la->la_lock, NULL, RW_DEFAULT, NULL);
405 	mutex_init(&la->la_mutex, NULL, MUTEX_DEFAULT, NULL);
406 	avl_create(&la->la_tree, compar, size, offset);
407 	list_create(&la->la_deleteq, sizeof (smb_dtor_t),
408 	    offsetof(smb_dtor_t, dt_lnd));
409 	la->la_wrop = 0;
410 	la->la_deleteq_count = 0;
411 	la->la_flushing = B_FALSE;
412 }
413 
414 /*
415  * Flush the delete queue and destroy a locked avl.
416  */
417 void
smb_lavl_destructor(smb_lavl_t * la)418 smb_lavl_destructor(
419     smb_lavl_t	*la)
420 {
421 	smb_lavl_flush(la);
422 
423 	ASSERT(la->la_deleteq_count == 0);
424 	ASSERT0(avl_numnodes(&la->la_tree));
425 
426 	rw_destroy(&la->la_lock);
427 	avl_destroy(&la->la_tree);
428 	list_destroy(&la->la_deleteq);
429 	mutex_destroy(&la->la_mutex);
430 }
431 
432 /*
433  * smb_lavl_enter
434  * Not a macro so dtrace smbsrv:* can see it.
435  */
436 void
smb_lavl_enter(smb_lavl_t * la,krw_t mode)437 smb_lavl_enter(smb_lavl_t *la, krw_t mode)
438 {
439 	rw_enter(&la->la_lock, mode);
440 }
441 
442 /*
443  * Post an object to the delete queue.  The delete queue will be processed
444  * during smb_lavl_exit or lavl destruction.  Objects are often posted for
445  * deletion during avl iteration (while the lavl is locked) but that is
446  * not required, and an object can be posted at any time.
447  */
448 void
smb_lavl_post(smb_lavl_t * la,void * object,smb_dtorproc_t dtorproc)449 smb_lavl_post(smb_lavl_t *la, void *object, smb_dtorproc_t dtorproc)
450 {
451 	smb_dtor_t	*dtor;
452 
453 	ASSERT((object != NULL) && (dtorproc != NULL));
454 
455 	dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
456 	bzero(dtor, sizeof (smb_dtor_t));
457 	dtor->dt_magic = SMB_DTOR_MAGIC;
458 	dtor->dt_object = object;
459 	dtor->dt_proc = dtorproc;
460 
461 	mutex_enter(&la->la_mutex);
462 	list_insert_tail(&la->la_deleteq, dtor);
463 	++la->la_deleteq_count;
464 	mutex_exit(&la->la_mutex);
465 }
466 
467 /*
468  * Exit the lavl lock and process the delete queue.
469  */
470 void
smb_lavl_exit(smb_lavl_t * la)471 smb_lavl_exit(smb_lavl_t *la)
472 {
473 	rw_exit(&la->la_lock);
474 	smb_lavl_flush(la);
475 }
476 
477 /*
478  * Flush the lavl delete queue.  The mutex is dropped across the destructor
479  * call in case this leads to additional objects being posted to the delete
480  * queue.
481  */
482 void
smb_lavl_flush(smb_lavl_t * la)483 smb_lavl_flush(smb_lavl_t *la)
484 {
485 	smb_dtor_t    *dtor;
486 
487 	mutex_enter(&la->la_mutex);
488 	if (la->la_flushing) {
489 		mutex_exit(&la->la_mutex);
490 		return;
491 	}
492 	la->la_flushing = B_TRUE;
493 
494 	dtor = list_head(&la->la_deleteq);
495 	while (dtor != NULL) {
496 		SMB_DTOR_VALID(dtor);
497 		ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
498 		list_remove(&la->la_deleteq, dtor);
499 		--la->la_deleteq_count;
500 		mutex_exit(&la->la_mutex);
501 
502 		dtor->dt_proc(dtor->dt_object);
503 
504 		dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
505 		kmem_cache_free(smb_dtor_cache, dtor);
506 		mutex_enter(&la->la_mutex);
507 		dtor = list_head(&la->la_deleteq);
508 	}
509 	la->la_flushing = B_FALSE;
510 
511 	mutex_exit(&la->la_mutex);
512 }
513 
514 /*
515  * smb_lavl_upgrade
516  *
517  * This function tries to upgrade the lock of the locked avl. It assumes the
518  * locked has already been entered in RW_READER mode. It first tries using the
519  * Solaris function rw_tryupgrade(). If that call fails the lock is released
520  * and reentered in RW_WRITER mode. In that last case a window is opened during
521  * which the contents of the avl may have changed. The return code indicates
522  * whether or not the avl was modified when the lock was exited.
523  */
smb_lavl_upgrade(smb_lavl_t * la)524 int smb_lavl_upgrade(
525     smb_lavl_t *la)
526 {
527 	uint64_t	wrop;
528 
529 	if (rw_tryupgrade(&la->la_lock) != 0) {
530 		return (0);
531 	}
532 	wrop = la->la_wrop;
533 	rw_exit(&la->la_lock);
534 	rw_enter(&la->la_lock, RW_WRITER);
535 	return (wrop != la->la_wrop);
536 }
537 
538 /*
539  * smb_lavl_insert
540  *
541  * This function inserts the object passed into the tree
542  * at the position determined by the AVL comparator.
543  */
544 void
smb_lavl_insert(smb_lavl_t * la,void * obj)545 smb_lavl_insert(
546     smb_lavl_t	*la,
547     void	*obj)
548 {
549 	avl_add(&la->la_tree, obj);
550 	++la->la_wrop;
551 }
552 
553 /*
554  * smb_lavl_remove
555  *
556  * This function removes the object passed from the lavl. This function
557  * assumes the lock of the lavl has already been entered.
558  */
559 void
smb_lavl_remove(smb_lavl_t * la,void * obj)560 smb_lavl_remove(
561     smb_lavl_t	*la,
562     void	*obj)
563 {
564 	avl_remove(&la->la_tree, obj);
565 	++la->la_wrop;
566 }
567 
568 /*
569  * smb_lavl_get_count
570  *
571  * This function returns the number of elements in the specified avl.
572  */
573 uint32_t
smb_lavl_get_count(smb_lavl_t * la)574 smb_lavl_get_count(
575     smb_lavl_t *la)
576 {
577 	return ((uint32_t)avl_numnodes(&la->la_tree));
578 }
579 
580 /*
581  * Initialize the llist delete queue object cache.
582  */
583 void
smb_llist_init(void)584 smb_llist_init(void)
585 {
586 	if (smb_dtor_cache != NULL)
587 		return;
588 
589 	smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
590 	    sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
591 }
592 
593 /*
594  * Destroy the llist delete queue object cache.
595  */
596 void
smb_llist_fini(void)597 smb_llist_fini(void)
598 {
599 	if (smb_dtor_cache != NULL) {
600 		kmem_cache_destroy(smb_dtor_cache);
601 		smb_dtor_cache = NULL;
602 	}
603 }
604 
605 /*
606  * smb_llist_constructor
607  *
608  * This function initializes a locked list.
609  */
610 void
smb_llist_constructor(smb_llist_t * ll,size_t size,size_t offset)611 smb_llist_constructor(
612     smb_llist_t	*ll,
613     size_t	size,
614     size_t	offset)
615 {
616 	rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
617 	mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
618 	list_create(&ll->ll_list, size, offset);
619 	list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
620 	    offsetof(smb_dtor_t, dt_lnd));
621 	ll->ll_count = 0;
622 	ll->ll_wrop = 0;
623 	ll->ll_deleteq_count = 0;
624 	ll->ll_flushing = B_FALSE;
625 }
626 
627 /*
628  * Flush the delete queue and destroy a locked list.
629  */
630 void
smb_llist_destructor(smb_llist_t * ll)631 smb_llist_destructor(
632     smb_llist_t	*ll)
633 {
634 	smb_llist_flush(ll);
635 
636 	ASSERT(ll->ll_count == 0);
637 	ASSERT(ll->ll_deleteq_count == 0);
638 
639 	rw_destroy(&ll->ll_lock);
640 	list_destroy(&ll->ll_list);
641 	list_destroy(&ll->ll_deleteq);
642 	mutex_destroy(&ll->ll_mutex);
643 }
644 
645 /*
646  * smb_llist_enter
647  * Not a macro so dtrace smbsrv:* can see it.
648  */
649 void
smb_llist_enter(smb_llist_t * ll,krw_t mode)650 smb_llist_enter(smb_llist_t *ll, krw_t mode)
651 {
652 	rw_enter(&ll->ll_lock, mode);
653 }
654 
655 /*
656  * Post an object to the delete queue.  The delete queue will be processed
657  * during list exit or list destruction.  Objects are often posted for
658  * deletion during list iteration (while the list is locked) but that is
659  * not required, and an object can be posted at any time.
660  */
661 void
smb_llist_post(smb_llist_t * ll,void * object,smb_dtorproc_t dtorproc)662 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
663 {
664 	smb_dtor_t	*dtor;
665 
666 	ASSERT((object != NULL) && (dtorproc != NULL));
667 
668 	dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
669 	bzero(dtor, sizeof (smb_dtor_t));
670 	dtor->dt_magic = SMB_DTOR_MAGIC;
671 	dtor->dt_object = object;
672 	dtor->dt_proc = dtorproc;
673 
674 	mutex_enter(&ll->ll_mutex);
675 	list_insert_tail(&ll->ll_deleteq, dtor);
676 	++ll->ll_deleteq_count;
677 	mutex_exit(&ll->ll_mutex);
678 }
679 
680 /*
681  * Exit the list lock and process the delete queue.
682  */
683 void
smb_llist_exit(smb_llist_t * ll)684 smb_llist_exit(smb_llist_t *ll)
685 {
686 	rw_exit(&ll->ll_lock);
687 	smb_llist_flush(ll);
688 }
689 
690 /*
691  * Flush the list delete queue.  The mutex is dropped across the destructor
692  * call in case this leads to additional objects being posted to the delete
693  * queue.
694  */
695 void
smb_llist_flush(smb_llist_t * ll)696 smb_llist_flush(smb_llist_t *ll)
697 {
698 	smb_dtor_t    *dtor;
699 
700 	mutex_enter(&ll->ll_mutex);
701 	if (ll->ll_flushing) {
702 		mutex_exit(&ll->ll_mutex);
703 		return;
704 	}
705 	ll->ll_flushing = B_TRUE;
706 
707 	dtor = list_head(&ll->ll_deleteq);
708 	while (dtor != NULL) {
709 		SMB_DTOR_VALID(dtor);
710 		ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
711 		list_remove(&ll->ll_deleteq, dtor);
712 		--ll->ll_deleteq_count;
713 		mutex_exit(&ll->ll_mutex);
714 
715 		dtor->dt_proc(dtor->dt_object);
716 
717 		dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
718 		kmem_cache_free(smb_dtor_cache, dtor);
719 		mutex_enter(&ll->ll_mutex);
720 		dtor = list_head(&ll->ll_deleteq);
721 	}
722 	ll->ll_flushing = B_FALSE;
723 
724 	mutex_exit(&ll->ll_mutex);
725 }
726 
727 /*
728  * smb_llist_upgrade
729  *
730  * This function tries to upgrade the lock of the locked list. It assumes the
731  * locked has already been entered in RW_READER mode. It first tries using the
732  * Solaris function rw_tryupgrade(). If that call fails the lock is released
733  * and reentered in RW_WRITER mode. In that last case a window is opened during
734  * which the contents of the list may have changed. The return code indicates
735  * whether or not the list was modified when the lock was exited.
736  */
smb_llist_upgrade(smb_llist_t * ll)737 int smb_llist_upgrade(
738     smb_llist_t *ll)
739 {
740 	uint64_t	wrop;
741 
742 	if (rw_tryupgrade(&ll->ll_lock) != 0) {
743 		return (0);
744 	}
745 	wrop = ll->ll_wrop;
746 	rw_exit(&ll->ll_lock);
747 	rw_enter(&ll->ll_lock, RW_WRITER);
748 	return (wrop != ll->ll_wrop);
749 }
750 
751 /*
752  * smb_llist_insert_head
753  *
754  * This function inserts the object passed a the beginning of the list. This
755  * function assumes the lock of the list has already been entered.
756  */
757 void
smb_llist_insert_head(smb_llist_t * ll,void * obj)758 smb_llist_insert_head(
759     smb_llist_t	*ll,
760     void	*obj)
761 {
762 	list_insert_head(&ll->ll_list, obj);
763 	++ll->ll_wrop;
764 	++ll->ll_count;
765 }
766 
767 /*
768  * smb_llist_insert_tail
769  *
770  * This function appends to the object passed to the list. This function assumes
771  * the lock of the list has already been entered.
772  *
773  */
774 void
smb_llist_insert_tail(smb_llist_t * ll,void * obj)775 smb_llist_insert_tail(
776     smb_llist_t	*ll,
777     void	*obj)
778 {
779 	list_insert_tail(&ll->ll_list, obj);
780 	++ll->ll_wrop;
781 	++ll->ll_count;
782 }
783 
784 /*
785  * smb_llist_remove
786  *
787  * This function removes the object passed from the list. This function assumes
788  * the lock of the list has already been entered.
789  */
790 void
smb_llist_remove(smb_llist_t * ll,void * obj)791 smb_llist_remove(
792     smb_llist_t	*ll,
793     void	*obj)
794 {
795 	list_remove(&ll->ll_list, obj);
796 	++ll->ll_wrop;
797 	--ll->ll_count;
798 }
799 
800 /*
801  * smb_llist_get_count
802  *
803  * This function returns the number of elements in the specified list.
804  */
805 uint32_t
smb_llist_get_count(smb_llist_t * ll)806 smb_llist_get_count(
807     smb_llist_t *ll)
808 {
809 	return (ll->ll_count);
810 }
811 
812 /*
813  * smb_slist_constructor
814  *
815  * Synchronized list constructor.
816  */
817 void
smb_slist_constructor(smb_slist_t * sl,size_t size,size_t offset)818 smb_slist_constructor(
819     smb_slist_t	*sl,
820     size_t	size,
821     size_t	offset)
822 {
823 	mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
824 	cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
825 	list_create(&sl->sl_list, size, offset);
826 	sl->sl_count = 0;
827 	sl->sl_waiting = B_FALSE;
828 }
829 
830 /*
831  * smb_slist_destructor
832  *
833  * Synchronized list destructor.
834  */
835 void
smb_slist_destructor(smb_slist_t * sl)836 smb_slist_destructor(
837     smb_slist_t	*sl)
838 {
839 	VERIFY(sl->sl_count == 0);
840 
841 	mutex_destroy(&sl->sl_mutex);
842 	cv_destroy(&sl->sl_cv);
843 	list_destroy(&sl->sl_list);
844 }
845 
846 /*
847  * smb_slist_enter
848  * Not a macro so dtrace smbsrv:* can see it.
849  */
850 void
smb_slist_enter(smb_slist_t * sl)851 smb_slist_enter(smb_slist_t *sl)
852 {
853 	mutex_enter(&(sl)->sl_mutex);
854 }
855 
856 /*
857  * smb_slist_insert_head
858  *
859  * This function inserts the object passed a the beginning of the list.
860  */
861 void
smb_slist_insert_head(smb_slist_t * sl,void * obj)862 smb_slist_insert_head(
863     smb_slist_t	*sl,
864     void	*obj)
865 {
866 	mutex_enter(&sl->sl_mutex);
867 	list_insert_head(&sl->sl_list, obj);
868 	++sl->sl_count;
869 	mutex_exit(&sl->sl_mutex);
870 }
871 
872 /*
873  * smb_slist_insert_tail
874  *
875  * This function appends the object passed to the list.
876  */
877 void
smb_slist_insert_tail(smb_slist_t * sl,void * obj)878 smb_slist_insert_tail(
879     smb_slist_t	*sl,
880     void	*obj)
881 {
882 	mutex_enter(&sl->sl_mutex);
883 	list_insert_tail(&sl->sl_list, obj);
884 	++sl->sl_count;
885 	mutex_exit(&sl->sl_mutex);
886 }
887 
888 /*
889  * smb_llist_remove
890  *
891  * This function removes the object passed by the caller from the list.
892  */
893 void
smb_slist_remove(smb_slist_t * sl,void * obj)894 smb_slist_remove(
895     smb_slist_t	*sl,
896     void	*obj)
897 {
898 	mutex_enter(&sl->sl_mutex);
899 	list_remove(&sl->sl_list, obj);
900 	if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
901 		sl->sl_waiting = B_FALSE;
902 		cv_broadcast(&sl->sl_cv);
903 	}
904 	mutex_exit(&sl->sl_mutex);
905 }
906 
907 /*
908  * smb_slist_move_tail
909  *
910  * This function transfers all the contents of the synchronized list to the
911  * list_t provided. It returns the number of objects transferred.
912  */
913 uint32_t
smb_slist_move_tail(list_t * lst,smb_slist_t * sl)914 smb_slist_move_tail(
915     list_t	*lst,
916     smb_slist_t	*sl)
917 {
918 	uint32_t	rv;
919 
920 	mutex_enter(&sl->sl_mutex);
921 	rv = sl->sl_count;
922 	if (sl->sl_count) {
923 		list_move_tail(lst, &sl->sl_list);
924 		sl->sl_count = 0;
925 		if (sl->sl_waiting) {
926 			sl->sl_waiting = B_FALSE;
927 			cv_broadcast(&sl->sl_cv);
928 		}
929 	}
930 	mutex_exit(&sl->sl_mutex);
931 	return (rv);
932 }
933 
934 /*
935  * smb_slist_obj_move
936  *
937  * This function moves an object from one list to the end of the other list. It
938  * assumes the mutex of each list has been entered.
939  */
940 void
smb_slist_obj_move(smb_slist_t * dst,smb_slist_t * src,void * obj)941 smb_slist_obj_move(
942     smb_slist_t	*dst,
943     smb_slist_t	*src,
944     void	*obj)
945 {
946 	ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
947 	ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
948 
949 	list_remove(&src->sl_list, obj);
950 	list_insert_tail(&dst->sl_list, obj);
951 	dst->sl_count++;
952 	src->sl_count--;
953 	if ((src->sl_count == 0) && (src->sl_waiting)) {
954 		src->sl_waiting = B_FALSE;
955 		cv_broadcast(&src->sl_cv);
956 	}
957 }
958 
959 /*
960  * smb_slist_wait_for_empty
961  *
962  * This function waits for a list to be emptied.
963  */
964 void
smb_slist_wait_for_empty(smb_slist_t * sl)965 smb_slist_wait_for_empty(
966     smb_slist_t	*sl)
967 {
968 	mutex_enter(&sl->sl_mutex);
969 	while (sl->sl_count) {
970 		sl->sl_waiting = B_TRUE;
971 		cv_wait(&sl->sl_cv, &sl->sl_mutex);
972 	}
973 	mutex_exit(&sl->sl_mutex);
974 }
975 
976 /*
977  * smb_slist_exit
978  *
979  * This function exits the muetx of the list and signal the condition variable
980  * if the list is empty.
981  */
982 void
smb_slist_exit(smb_slist_t * sl)983 smb_slist_exit(smb_slist_t *sl)
984 {
985 	if ((sl->sl_count == 0) && (sl->sl_waiting)) {
986 		sl->sl_waiting = B_FALSE;
987 		cv_broadcast(&sl->sl_cv);
988 	}
989 	mutex_exit(&sl->sl_mutex);
990 }
991 
992 /* smb_thread_... moved to smb_thread.c */
993 
994 /*
995  * smb_rwx_init
996  */
997 void
smb_rwx_init(smb_rwx_t * rwx)998 smb_rwx_init(
999     smb_rwx_t	*rwx)
1000 {
1001 	bzero(rwx, sizeof (smb_rwx_t));
1002 	cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
1003 	mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
1004 	rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
1005 }
1006 
1007 /*
1008  * smb_rwx_destroy
1009  */
1010 void
smb_rwx_destroy(smb_rwx_t * rwx)1011 smb_rwx_destroy(
1012     smb_rwx_t	*rwx)
1013 {
1014 	mutex_destroy(&rwx->rwx_mutex);
1015 	cv_destroy(&rwx->rwx_cv);
1016 	rw_destroy(&rwx->rwx_lock);
1017 }
1018 
1019 /*
1020  * smb_rwx_rwenter
1021  */
1022 void
smb_rwx_rwenter(smb_rwx_t * rwx,krw_t mode)1023 smb_rwx_rwenter(smb_rwx_t *rwx, krw_t mode)
1024 {
1025 	rw_enter(&rwx->rwx_lock, mode);
1026 }
1027 
1028 /*
1029  * smb_rwx_rwexit
1030  */
1031 void
smb_rwx_rwexit(smb_rwx_t * rwx)1032 smb_rwx_rwexit(
1033     smb_rwx_t	*rwx)
1034 {
1035 	rw_exit(&rwx->rwx_lock);
1036 }
1037 
1038 
1039 /*
1040  * smb_rwx_cvwait
1041  *
1042  * Wait on rwx->rw_cv, dropping the rw lock and retake after wakeup.
1043  * Assumes the smb_rwx lock was entered in RW_READER or RW_WRITER
1044  * mode. It will:
1045  *
1046  *	1) release the lock and save its current mode.
1047  *	2) wait until the condition variable is signaled.
1048  *	3) re-acquire the lock in the mode saved in (1).
1049  *
1050  * Lock order: rwlock, mutex
1051  */
1052 int
smb_rwx_cvwait(smb_rwx_t * rwx,clock_t timeout)1053 smb_rwx_cvwait(
1054     smb_rwx_t	*rwx,
1055     clock_t	timeout)
1056 {
1057 	krw_t	mode;
1058 	int	rc = 1;
1059 
1060 	if (rw_write_held(&rwx->rwx_lock)) {
1061 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1062 		mode = RW_WRITER;
1063 	} else {
1064 		ASSERT(rw_read_held(&rwx->rwx_lock));
1065 		mode = RW_READER;
1066 	}
1067 
1068 	mutex_enter(&rwx->rwx_mutex);
1069 	rw_exit(&rwx->rwx_lock);
1070 
1071 	rwx->rwx_waiting = B_TRUE;
1072 	if (timeout == -1) {
1073 		cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
1074 	} else {
1075 		rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
1076 		    timeout, TR_CLOCK_TICK);
1077 	}
1078 	mutex_exit(&rwx->rwx_mutex);
1079 
1080 	rw_enter(&rwx->rwx_lock, mode);
1081 	return (rc);
1082 }
1083 
1084 /*
1085  * smb_rwx_cvbcast
1086  *
1087  * Wake up threads waiting on rx_cv
1088  * The rw lock may or may not be held.
1089  * The mutex MUST NOT be held.
1090  */
1091 void
smb_rwx_cvbcast(smb_rwx_t * rwx)1092 smb_rwx_cvbcast(smb_rwx_t *rwx)
1093 {
1094 	mutex_enter(&rwx->rwx_mutex);
1095 	if (rwx->rwx_waiting) {
1096 		rwx->rwx_waiting = B_FALSE;
1097 		cv_broadcast(&rwx->rwx_cv);
1098 	}
1099 	mutex_exit(&rwx->rwx_mutex);
1100 }
1101 
1102 /* smb_idmap_... moved to smb_idmap.c */
1103 
1104 uint64_t
smb_time_unix_to_nt(timestruc_t * unix_time)1105 smb_time_unix_to_nt(timestruc_t *unix_time)
1106 {
1107 	uint64_t nt_time;
1108 
1109 	if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
1110 		return (0);
1111 
1112 	nt_time = unix_time->tv_sec;
1113 	nt_time *= 10000000;  /* seconds to 100ns */
1114 	nt_time += unix_time->tv_nsec / 100;
1115 	return (nt_time + NT_TIME_BIAS);
1116 }
1117 
1118 const timestruc_t smb_nttime_m1 = { -1, -1 }; /* minus 1 */
1119 const timestruc_t smb_nttime_m2 = { -1, -2 }; /* minus 2 */
1120 
1121 void
smb_time_nt_to_unix(uint64_t nt_time,timestruc_t * unix_time)1122 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
1123 {
1124 	static const timestruc_t tzero = { 0, 0 };
1125 	uint32_t seconds;
1126 
1127 	ASSERT(unix_time);
1128 
1129 	/*
1130 	 * NT time values (0, -1, -2) get special treatment in SMB.
1131 	 * See notes above smb_node_setattr() for details.
1132 	 */
1133 	if (nt_time == 0) {
1134 		*unix_time = tzero;
1135 		return;
1136 	}
1137 	if ((int64_t)nt_time == -1) {
1138 		*unix_time = smb_nttime_m1;
1139 		return;
1140 	}
1141 	if ((int64_t)nt_time == -2) {
1142 		*unix_time = smb_nttime_m2;
1143 		return;
1144 	}
1145 
1146 	/*
1147 	 * Can't represent times less than or equal NT_TIME_BIAS,
1148 	 * so convert them to the oldest date we can store.
1149 	 * Note that time zero is "special" being converted
1150 	 * both directions as 0:0 (unix-to-nt, nt-to-unix).
1151 	 */
1152 	if (nt_time <= NT_TIME_BIAS) {
1153 		unix_time->tv_sec = 0;
1154 		unix_time->tv_nsec = 100;
1155 		return;
1156 	}
1157 
1158 	nt_time -= NT_TIME_BIAS;
1159 	seconds = nt_time / 10000000;
1160 	unix_time->tv_sec = seconds;
1161 	unix_time->tv_nsec = (nt_time  % 10000000) * 100;
1162 }
1163 
1164 /*
1165  * smb_time_gmt_to_local, smb_time_local_to_gmt
1166  *
1167  * Apply the gmt offset to convert between local time and gmt
1168  */
1169 int32_t
smb_time_gmt_to_local(smb_request_t * sr,int32_t gmt)1170 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
1171 {
1172 	if ((gmt == 0) || (gmt == -1))
1173 		return (0);
1174 
1175 	return (gmt - sr->sr_gmtoff);
1176 }
1177 
1178 int32_t
smb_time_local_to_gmt(smb_request_t * sr,int32_t local)1179 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
1180 {
1181 	if ((local == 0) || (local == -1))
1182 		return (0);
1183 
1184 	return (local + sr->sr_gmtoff);
1185 }
1186 
1187 
1188 /*
1189  * smb_time_dos_to_unix
1190  *
1191  * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
1192  *
1193  * A date/time field of 0 means that that server file system
1194  * assigned value need not be changed. The behaviour when the
1195  * date/time field is set to -1 is not documented but is
1196  * generally treated like 0.
1197  * If date or time is 0 or -1 the unix time is returned as 0
1198  * so that the caller can identify and handle this special case.
1199  */
1200 int32_t
smb_time_dos_to_unix(int16_t date,int16_t time)1201 smb_time_dos_to_unix(int16_t date, int16_t time)
1202 {
1203 	struct tm	atm;
1204 
1205 	if (((date == 0) || (time == 0)) ||
1206 	    ((date == -1) || (time == -1))) {
1207 		return (0);
1208 	}
1209 
1210 	atm.tm_year = ((date >>  9) & 0x3F) + 80;
1211 	atm.tm_mon  = ((date >>  5) & 0x0F) - 1;
1212 	atm.tm_mday = ((date >>  0) & 0x1F);
1213 	atm.tm_hour = ((time >> 11) & 0x1F);
1214 	atm.tm_min  = ((time >>  5) & 0x3F);
1215 	atm.tm_sec  = ((time >>  0) & 0x1F) << 1;
1216 
1217 	return (smb_timegm(&atm));
1218 }
1219 
1220 void
smb_time_unix_to_dos(int32_t ux_time,int16_t * date_p,int16_t * time_p)1221 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1222 {
1223 	struct tm	atm;
1224 	int		i;
1225 	time_t		tmp_time;
1226 
1227 	if (ux_time == 0) {
1228 		*date_p = 0;
1229 		*time_p = 0;
1230 		return;
1231 	}
1232 
1233 	tmp_time = (time_t)ux_time;
1234 	(void) smb_gmtime_r(&tmp_time, &atm);
1235 
1236 	if (date_p) {
1237 		i = 0;
1238 		i += atm.tm_year - 80;
1239 		i <<= 4;
1240 		i += atm.tm_mon + 1;
1241 		i <<= 5;
1242 		i += atm.tm_mday;
1243 
1244 		*date_p = (short)i;
1245 	}
1246 	if (time_p) {
1247 		i = 0;
1248 		i += atm.tm_hour;
1249 		i <<= 6;
1250 		i += atm.tm_min;
1251 		i <<= 5;
1252 		i += atm.tm_sec >> 1;
1253 
1254 		*time_p = (short)i;
1255 	}
1256 }
1257 
1258 
1259 /*
1260  * smb_gmtime_r
1261  *
1262  * Thread-safe version of smb_gmtime. Returns a null pointer if either
1263  * input parameter is a null pointer. Otherwise returns a pointer
1264  * to result.
1265  *
1266  * Day of the week calculation: the Epoch was a thursday.
1267  *
1268  * There are no timezone corrections so tm_isdst and tm_gmtoff are
1269  * always zero, and the zone is always WET.
1270  */
1271 struct tm *
smb_gmtime_r(time_t * clock,struct tm * result)1272 smb_gmtime_r(time_t *clock, struct tm *result)
1273 {
1274 	time_t tsec;
1275 	int year;
1276 	int month;
1277 	int sec_per_month;
1278 
1279 	if (clock == 0 || result == 0)
1280 		return (0);
1281 
1282 	bzero(result, sizeof (struct tm));
1283 	tsec = *clock;
1284 	tsec -= tzh_leapcnt;
1285 
1286 	result->tm_wday = tsec / SECSPERDAY;
1287 	result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1288 
1289 	year = EPOCH_YEAR;
1290 	while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1291 	    (SECSPERDAY * DAYSPERNYEAR))) {
1292 		if (isleap(year))
1293 			tsec -= SECSPERDAY * DAYSPERLYEAR;
1294 		else
1295 			tsec -= SECSPERDAY * DAYSPERNYEAR;
1296 
1297 		++year;
1298 	}
1299 
1300 	result->tm_year = year - TM_YEAR_BASE;
1301 	result->tm_yday = tsec / SECSPERDAY;
1302 
1303 	for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1304 		sec_per_month = days_in_month[month] * SECSPERDAY;
1305 
1306 		if (month == TM_FEBRUARY && isleap(year))
1307 			sec_per_month += SECSPERDAY;
1308 
1309 		if (tsec < sec_per_month)
1310 			break;
1311 
1312 		tsec -= sec_per_month;
1313 	}
1314 
1315 	result->tm_mon = month;
1316 	result->tm_mday = (tsec / SECSPERDAY) + 1;
1317 	tsec %= SECSPERDAY;
1318 	result->tm_sec = tsec % 60;
1319 	tsec /= 60;
1320 	result->tm_min = tsec % 60;
1321 	tsec /= 60;
1322 	result->tm_hour = (int)tsec;
1323 
1324 	return (result);
1325 }
1326 
1327 
1328 /*
1329  * smb_timegm
1330  *
1331  * Converts the broken-down time in tm to a time value, i.e. the number
1332  * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1333  * not a POSIX or ANSI function. Per the man page, the input values of
1334  * tm_wday and tm_yday are ignored and, as the input data is assumed to
1335  * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1336  *
1337  * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1338  * and tm_yday, and bring the other fields within normal range. I don't
1339  * think this is really how it should be done but it's convenient for
1340  * now.
1341  */
1342 time_t
smb_timegm(struct tm * tm)1343 smb_timegm(struct tm *tm)
1344 {
1345 	time_t tsec;
1346 	int dd;
1347 	int mm;
1348 	int yy;
1349 	int year;
1350 
1351 	if (tm == 0)
1352 		return (-1);
1353 
1354 	year = tm->tm_year + TM_YEAR_BASE;
1355 	tsec = tzh_leapcnt;
1356 
1357 	for (yy = EPOCH_YEAR; yy < year; ++yy) {
1358 		if (isleap(yy))
1359 			tsec += SECSPERDAY * DAYSPERLYEAR;
1360 		else
1361 			tsec += SECSPERDAY * DAYSPERNYEAR;
1362 	}
1363 
1364 	for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1365 		dd = days_in_month[mm] * SECSPERDAY;
1366 
1367 		if (mm == TM_FEBRUARY && isleap(year))
1368 			dd += SECSPERDAY;
1369 
1370 		tsec += dd;
1371 	}
1372 
1373 	tsec += (tm->tm_mday - 1) * SECSPERDAY;
1374 	tsec += tm->tm_sec;
1375 	tsec += tm->tm_min * SECSPERMIN;
1376 	tsec += tm->tm_hour * SECSPERHOUR;
1377 
1378 	tm->tm_isdst = 0;
1379 	(void) smb_gmtime_r(&tsec, tm);
1380 	return (tsec);
1381 }
1382 
1383 /*
1384  * smb_pad_align
1385  *
1386  * Returns the number of bytes required to pad an offset to the
1387  * specified alignment.
1388  */
1389 uint32_t
smb_pad_align(uint32_t offset,uint32_t align)1390 smb_pad_align(uint32_t offset, uint32_t align)
1391 {
1392 	uint32_t pad = offset % align;
1393 
1394 	if (pad != 0)
1395 		pad = align - pad;
1396 
1397 	return (pad);
1398 }
1399 
1400 /*
1401  * smb_panic
1402  *
1403  * Logs the file name, function name and line number passed in and panics the
1404  * system.
1405  */
1406 void
smb_panic(char * file,const char * func,int line)1407 smb_panic(char *file, const char *func, int line)
1408 {
1409 	cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
1410 }
1411 
1412 /*
1413  * Creates an AVL tree and initializes the given smb_avl_t
1414  * structure using the passed args
1415  */
1416 void
smb_avl_create(smb_avl_t * avl,size_t size,size_t offset,const smb_avl_nops_t * ops)1417 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset,
1418     const smb_avl_nops_t *ops)
1419 {
1420 	ASSERT(avl);
1421 	ASSERT(ops);
1422 
1423 	rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL);
1424 	mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL);
1425 
1426 	avl->avl_nops = ops;
1427 	avl->avl_state = SMB_AVL_STATE_READY;
1428 	avl->avl_refcnt = 0;
1429 	(void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence,
1430 	    sizeof (uint32_t));
1431 
1432 	avl_create(&avl->avl_tree, ops->avln_cmp, size, offset);
1433 }
1434 
1435 /*
1436  * Destroys the specified AVL tree.
1437  * It waits for all the in-flight operations to finish
1438  * before destroying the AVL.
1439  */
1440 void
smb_avl_destroy(smb_avl_t * avl)1441 smb_avl_destroy(smb_avl_t *avl)
1442 {
1443 	void *cookie = NULL;
1444 	void *node;
1445 
1446 	ASSERT(avl);
1447 
1448 	mutex_enter(&avl->avl_mutex);
1449 	if (avl->avl_state != SMB_AVL_STATE_READY) {
1450 		mutex_exit(&avl->avl_mutex);
1451 		return;
1452 	}
1453 
1454 	avl->avl_state = SMB_AVL_STATE_DESTROYING;
1455 
1456 	while (avl->avl_refcnt > 0)
1457 		(void) cv_wait(&avl->avl_cv, &avl->avl_mutex);
1458 	mutex_exit(&avl->avl_mutex);
1459 
1460 	rw_enter(&avl->avl_lock, RW_WRITER);
1461 	while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL)
1462 		avl->avl_nops->avln_destroy(node);
1463 
1464 	avl_destroy(&avl->avl_tree);
1465 	rw_exit(&avl->avl_lock);
1466 
1467 	rw_destroy(&avl->avl_lock);
1468 
1469 	mutex_destroy(&avl->avl_mutex);
1470 	bzero(avl, sizeof (smb_avl_t));
1471 }
1472 
1473 /*
1474  * Adds the given item to the AVL if it's
1475  * not already there.
1476  *
1477  * Returns:
1478  *
1479  *	ENOTACTIVE	AVL is not in READY state
1480  *	EEXIST		The item is already in AVL
1481  */
1482 int
smb_avl_add(smb_avl_t * avl,void * item)1483 smb_avl_add(smb_avl_t *avl, void *item)
1484 {
1485 	avl_index_t where;
1486 
1487 	ASSERT(avl);
1488 	ASSERT(item);
1489 
1490 	if (!smb_avl_hold(avl))
1491 		return (ENOTACTIVE);
1492 
1493 	rw_enter(&avl->avl_lock, RW_WRITER);
1494 	if (avl_find(&avl->avl_tree, item, &where) != NULL) {
1495 		rw_exit(&avl->avl_lock);
1496 		smb_avl_rele(avl);
1497 		return (EEXIST);
1498 	}
1499 
1500 	avl_insert(&avl->avl_tree, item, where);
1501 	avl->avl_sequence++;
1502 	rw_exit(&avl->avl_lock);
1503 
1504 	smb_avl_rele(avl);
1505 	return (0);
1506 }
1507 
1508 /*
1509  * Removes the given item from the AVL.
1510  * If no reference is left on the item
1511  * it will also be destroyed by calling the
1512  * registered destroy operation.
1513  */
1514 void
smb_avl_remove(smb_avl_t * avl,void * item)1515 smb_avl_remove(smb_avl_t *avl, void *item)
1516 {
1517 	avl_index_t where;
1518 	void *rm_item;
1519 
1520 	ASSERT(avl);
1521 	ASSERT(item);
1522 
1523 	if (!smb_avl_hold(avl))
1524 		return;
1525 
1526 	rw_enter(&avl->avl_lock, RW_WRITER);
1527 	if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) {
1528 		rw_exit(&avl->avl_lock);
1529 		smb_avl_rele(avl);
1530 		return;
1531 	}
1532 
1533 	avl_remove(&avl->avl_tree, rm_item);
1534 	if (avl->avl_nops->avln_rele(rm_item))
1535 		avl->avl_nops->avln_destroy(rm_item);
1536 	avl->avl_sequence++;
1537 	rw_exit(&avl->avl_lock);
1538 
1539 	smb_avl_rele(avl);
1540 }
1541 
1542 /*
1543  * Looks up the AVL for the given item.
1544  * If the item is found a hold on the object
1545  * is taken before the pointer to it is
1546  * returned to the caller. The caller MUST
1547  * always call smb_avl_release() after it's done
1548  * using the returned object to release the hold
1549  * taken on the object.
1550  */
1551 void *
smb_avl_lookup(smb_avl_t * avl,void * item)1552 smb_avl_lookup(smb_avl_t *avl, void *item)
1553 {
1554 	void *node = NULL;
1555 
1556 	ASSERT(avl);
1557 	ASSERT(item);
1558 
1559 	if (!smb_avl_hold(avl))
1560 		return (NULL);
1561 
1562 	rw_enter(&avl->avl_lock, RW_READER);
1563 	node = avl_find(&avl->avl_tree, item, NULL);
1564 	if (node != NULL)
1565 		avl->avl_nops->avln_hold(node);
1566 	rw_exit(&avl->avl_lock);
1567 
1568 	if (node == NULL)
1569 		smb_avl_rele(avl);
1570 
1571 	return (node);
1572 }
1573 
1574 /*
1575  * The hold on the given object is released.
1576  * This function MUST always be called after
1577  * smb_avl_lookup() and smb_avl_iterate() for
1578  * the returned object.
1579  *
1580  * If AVL is in DESTROYING state, the destroying
1581  * thread will be notified.
1582  */
1583 void
smb_avl_release(smb_avl_t * avl,void * item)1584 smb_avl_release(smb_avl_t *avl, void *item)
1585 {
1586 	ASSERT(avl);
1587 	ASSERT(item);
1588 
1589 	if (avl->avl_nops->avln_rele(item))
1590 		avl->avl_nops->avln_destroy(item);
1591 
1592 	smb_avl_rele(avl);
1593 }
1594 
1595 /*
1596  * Initializes the given cursor for the AVL.
1597  * The cursor will be used to iterate through the AVL
1598  */
1599 void
smb_avl_iterinit(smb_avl_t * avl,smb_avl_cursor_t * cursor)1600 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1601 {
1602 	ASSERT(avl);
1603 	ASSERT(cursor);
1604 
1605 	cursor->avlc_next = NULL;
1606 	cursor->avlc_sequence = avl->avl_sequence;
1607 }
1608 
1609 /*
1610  * Iterates through the AVL using the given cursor.
1611  * It always starts at the beginning and then returns
1612  * a pointer to the next object on each subsequent call.
1613  *
1614  * If a new object is added to or removed from the AVL
1615  * between two calls to this function, the iteration
1616  * will terminate prematurely.
1617  *
1618  * The caller MUST always call smb_avl_release() after it's
1619  * done using the returned object to release the hold taken
1620  * on the object.
1621  */
1622 void *
smb_avl_iterate(smb_avl_t * avl,smb_avl_cursor_t * cursor)1623 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1624 {
1625 	void *node;
1626 
1627 	ASSERT(avl);
1628 	ASSERT(cursor);
1629 
1630 	if (!smb_avl_hold(avl))
1631 		return (NULL);
1632 
1633 	rw_enter(&avl->avl_lock, RW_READER);
1634 	if (cursor->avlc_sequence != avl->avl_sequence) {
1635 		rw_exit(&avl->avl_lock);
1636 		smb_avl_rele(avl);
1637 		return (NULL);
1638 	}
1639 
1640 	if (cursor->avlc_next == NULL)
1641 		node = avl_first(&avl->avl_tree);
1642 	else
1643 		node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next);
1644 
1645 	if (node != NULL)
1646 		avl->avl_nops->avln_hold(node);
1647 
1648 	cursor->avlc_next = node;
1649 	rw_exit(&avl->avl_lock);
1650 
1651 	if (node == NULL)
1652 		smb_avl_rele(avl);
1653 
1654 	return (node);
1655 }
1656 
1657 /*
1658  * Increments the AVL reference count in order to
1659  * prevent the avl from being destroyed while it's
1660  * being accessed.
1661  */
1662 static boolean_t
smb_avl_hold(smb_avl_t * avl)1663 smb_avl_hold(smb_avl_t *avl)
1664 {
1665 	mutex_enter(&avl->avl_mutex);
1666 	if (avl->avl_state != SMB_AVL_STATE_READY) {
1667 		mutex_exit(&avl->avl_mutex);
1668 		return (B_FALSE);
1669 	}
1670 	avl->avl_refcnt++;
1671 	mutex_exit(&avl->avl_mutex);
1672 
1673 	return (B_TRUE);
1674 }
1675 
1676 /*
1677  * Decrements the AVL reference count to release the
1678  * hold. If another thread is trying to destroy the
1679  * AVL and is waiting for the reference count to become
1680  * 0, it is signaled to wake up.
1681  */
1682 static void
smb_avl_rele(smb_avl_t * avl)1683 smb_avl_rele(smb_avl_t *avl)
1684 {
1685 	mutex_enter(&avl->avl_mutex);
1686 	ASSERT(avl->avl_refcnt > 0);
1687 	avl->avl_refcnt--;
1688 	if (avl->avl_state == SMB_AVL_STATE_DESTROYING)
1689 		cv_broadcast(&avl->avl_cv);
1690 	mutex_exit(&avl->avl_mutex);
1691 }
1692 
1693 /*
1694  * smb_latency_init
1695  */
1696 void
smb_latency_init(smb_latency_t * lat)1697 smb_latency_init(smb_latency_t *lat)
1698 {
1699 	bzero(lat, sizeof (*lat));
1700 	mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1701 }
1702 
1703 /*
1704  * smb_latency_destroy
1705  */
1706 void
smb_latency_destroy(smb_latency_t * lat)1707 smb_latency_destroy(smb_latency_t *lat)
1708 {
1709 	mutex_destroy(&lat->ly_mutex);
1710 }
1711 
1712 /*
1713  * smb_latency_add_sample
1714  *
1715  * Uses the new sample to calculate the new mean and standard deviation. The
1716  * sample must be a scaled value.
1717  */
1718 void
smb_latency_add_sample(smb_latency_t * lat,hrtime_t sample)1719 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample)
1720 {
1721 	hrtime_t	a_mean;
1722 	hrtime_t	d_mean;
1723 
1724 	mutex_enter(&lat->ly_mutex);
1725 	lat->ly_a_nreq++;
1726 	lat->ly_a_sum += sample;
1727 	if (lat->ly_a_nreq != 0) {
1728 		a_mean = lat->ly_a_sum / lat->ly_a_nreq;
1729 		lat->ly_a_stddev =
1730 		    (sample - a_mean) * (sample - lat->ly_a_mean);
1731 		lat->ly_a_mean = a_mean;
1732 	}
1733 	lat->ly_d_nreq++;
1734 	lat->ly_d_sum += sample;
1735 	if (lat->ly_d_nreq != 0) {
1736 		d_mean = lat->ly_d_sum / lat->ly_d_nreq;
1737 		lat->ly_d_stddev =
1738 		    (sample - d_mean) * (sample - lat->ly_d_mean);
1739 		lat->ly_d_mean = d_mean;
1740 	}
1741 	mutex_exit(&lat->ly_mutex);
1742 }
1743 
1744 /*
1745  * smb_srqueue_init
1746  */
1747 void
smb_srqueue_init(smb_srqueue_t * srq)1748 smb_srqueue_init(smb_srqueue_t *srq)
1749 {
1750 	bzero(srq, sizeof (*srq));
1751 	mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1752 	srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled();
1753 }
1754 
1755 /*
1756  * smb_srqueue_destroy
1757  */
1758 void
smb_srqueue_destroy(smb_srqueue_t * srq)1759 smb_srqueue_destroy(smb_srqueue_t *srq)
1760 {
1761 	mutex_destroy(&srq->srq_mutex);
1762 }
1763 
1764 /*
1765  * smb_srqueue_waitq_enter
1766  */
1767 void
smb_srqueue_waitq_enter(smb_srqueue_t * srq)1768 smb_srqueue_waitq_enter(smb_srqueue_t *srq)
1769 {
1770 	hrtime_t	new;
1771 	hrtime_t	delta;
1772 	uint32_t	wcnt;
1773 
1774 	mutex_enter(&srq->srq_mutex);
1775 	new = gethrtime_unscaled();
1776 	delta = new - srq->srq_wlastupdate;
1777 	srq->srq_wlastupdate = new;
1778 	wcnt = srq->srq_wcnt++;
1779 	if (wcnt != 0) {
1780 		srq->srq_wlentime += delta * wcnt;
1781 		srq->srq_wtime += delta;
1782 	}
1783 	mutex_exit(&srq->srq_mutex);
1784 }
1785 
1786 /*
1787  * smb_srqueue_runq_exit
1788  */
1789 void
smb_srqueue_runq_exit(smb_srqueue_t * srq)1790 smb_srqueue_runq_exit(smb_srqueue_t *srq)
1791 {
1792 	hrtime_t	new;
1793 	hrtime_t	delta;
1794 	uint32_t	rcnt;
1795 
1796 	mutex_enter(&srq->srq_mutex);
1797 	new = gethrtime_unscaled();
1798 	delta = new - srq->srq_rlastupdate;
1799 	srq->srq_rlastupdate = new;
1800 	rcnt = srq->srq_rcnt--;
1801 	ASSERT(rcnt > 0);
1802 	srq->srq_rlentime += delta * rcnt;
1803 	srq->srq_rtime += delta;
1804 	mutex_exit(&srq->srq_mutex);
1805 }
1806 
1807 /*
1808  * smb_srqueue_waitq_to_runq
1809  */
1810 void
smb_srqueue_waitq_to_runq(smb_srqueue_t * srq)1811 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq)
1812 {
1813 	hrtime_t	new;
1814 	hrtime_t	delta;
1815 	uint32_t	wcnt;
1816 	uint32_t	rcnt;
1817 
1818 	mutex_enter(&srq->srq_mutex);
1819 	new = gethrtime_unscaled();
1820 	delta = new - srq->srq_wlastupdate;
1821 	srq->srq_wlastupdate = new;
1822 	wcnt = srq->srq_wcnt--;
1823 	ASSERT(wcnt > 0);
1824 	srq->srq_wlentime += delta * wcnt;
1825 	srq->srq_wtime += delta;
1826 	delta = new - srq->srq_rlastupdate;
1827 	srq->srq_rlastupdate = new;
1828 	rcnt = srq->srq_rcnt++;
1829 	if (rcnt != 0) {
1830 		srq->srq_rlentime += delta * rcnt;
1831 		srq->srq_rtime += delta;
1832 	}
1833 	mutex_exit(&srq->srq_mutex);
1834 }
1835 
1836 /*
1837  * smb_srqueue_update
1838  *
1839  * Takes a snapshot of the smb_sr_stat_t structure passed in.
1840  */
1841 void
smb_srqueue_update(smb_srqueue_t * srq,smb_kstat_utilization_t * kd)1842 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd)
1843 {
1844 	hrtime_t	delta;
1845 	hrtime_t	snaptime;
1846 
1847 	mutex_enter(&srq->srq_mutex);
1848 	snaptime = gethrtime_unscaled();
1849 	delta = snaptime - srq->srq_wlastupdate;
1850 	srq->srq_wlastupdate = snaptime;
1851 	if (srq->srq_wcnt != 0) {
1852 		srq->srq_wlentime += delta * srq->srq_wcnt;
1853 		srq->srq_wtime += delta;
1854 	}
1855 	delta = snaptime - srq->srq_rlastupdate;
1856 	srq->srq_rlastupdate = snaptime;
1857 	if (srq->srq_rcnt != 0) {
1858 		srq->srq_rlentime += delta * srq->srq_rcnt;
1859 		srq->srq_rtime += delta;
1860 	}
1861 	kd->ku_rlentime = srq->srq_rlentime;
1862 	kd->ku_rtime = srq->srq_rtime;
1863 	kd->ku_wlentime = srq->srq_wlentime;
1864 	kd->ku_wtime = srq->srq_wtime;
1865 	mutex_exit(&srq->srq_mutex);
1866 	scalehrtime(&kd->ku_rlentime);
1867 	scalehrtime(&kd->ku_rtime);
1868 	scalehrtime(&kd->ku_wlentime);
1869 	scalehrtime(&kd->ku_wtime);
1870 }
1871 
1872 void
smb_threshold_init(smb_cmd_threshold_t * ct,char * cmd,uint_t threshold,uint_t timeout)1873 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd,
1874     uint_t threshold, uint_t timeout)
1875 {
1876 	bzero(ct, sizeof (smb_cmd_threshold_t));
1877 	mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL);
1878 	cv_init(&ct->ct_cond, NULL, CV_DEFAULT, NULL);
1879 
1880 	ct->ct_cmd = cmd;
1881 	ct->ct_threshold = threshold;
1882 	ct->ct_timeout = timeout;
1883 }
1884 
1885 void
smb_threshold_fini(smb_cmd_threshold_t * ct)1886 smb_threshold_fini(smb_cmd_threshold_t *ct)
1887 {
1888 	cv_destroy(&ct->ct_cond);
1889 	mutex_destroy(&ct->ct_mutex);
1890 }
1891 
1892 /*
1893  * This threshold mechanism is used to limit the number of simultaneous
1894  * named pipe connections, concurrent authentication conversations, etc.
1895  * Requests that would take us over the threshold wait until either the
1896  * resources are available (return zero) or timeout (return error).
1897  */
1898 int
smb_threshold_enter(smb_cmd_threshold_t * ct)1899 smb_threshold_enter(smb_cmd_threshold_t *ct)
1900 {
1901 	clock_t	time, rem;
1902 
1903 	time = MSEC_TO_TICK(ct->ct_timeout) + ddi_get_lbolt();
1904 	mutex_enter(&ct->ct_mutex);
1905 
1906 	while (ct->ct_threshold != 0 &&
1907 	    ct->ct_threshold <= ct->ct_active_cnt) {
1908 		ct->ct_blocked_cnt++;
1909 		rem = cv_timedwait(&ct->ct_cond, &ct->ct_mutex, time);
1910 		ct->ct_blocked_cnt--;
1911 		if (rem < 0) {
1912 			mutex_exit(&ct->ct_mutex);
1913 			return (ETIME);
1914 		}
1915 	}
1916 	if (ct->ct_threshold == 0) {
1917 		mutex_exit(&ct->ct_mutex);
1918 		return (ECANCELED);
1919 	}
1920 
1921 	ASSERT3U(ct->ct_active_cnt, <, ct->ct_threshold);
1922 	ct->ct_active_cnt++;
1923 
1924 	mutex_exit(&ct->ct_mutex);
1925 	return (0);
1926 }
1927 
1928 void
smb_threshold_exit(smb_cmd_threshold_t * ct)1929 smb_threshold_exit(smb_cmd_threshold_t *ct)
1930 {
1931 	mutex_enter(&ct->ct_mutex);
1932 	ASSERT3U(ct->ct_active_cnt, >, 0);
1933 	ct->ct_active_cnt--;
1934 	if (ct->ct_blocked_cnt)
1935 		cv_signal(&ct->ct_cond);
1936 	mutex_exit(&ct->ct_mutex);
1937 }
1938 
1939 void
smb_threshold_wake_all(smb_cmd_threshold_t * ct)1940 smb_threshold_wake_all(smb_cmd_threshold_t *ct)
1941 {
1942 	mutex_enter(&ct->ct_mutex);
1943 	ct->ct_threshold = 0;
1944 	cv_broadcast(&ct->ct_cond);
1945 	mutex_exit(&ct->ct_mutex);
1946 }
1947 
1948 /* taken from mod_hash_byptr */
1949 uint_t
smb_hash_uint64(smb_hash_t * hash,uint64_t val)1950 smb_hash_uint64(smb_hash_t *hash, uint64_t val)
1951 {
1952 	uint64_t k = val >> hash->rshift;
1953 	uint_t idx = ((uint_t)k) & (hash->num_buckets - 1);
1954 
1955 	return (idx);
1956 }
1957 
1958 boolean_t
smb_is_pow2(size_t n)1959 smb_is_pow2(size_t n)
1960 {
1961 	return ((n & (n - 1)) == 0);
1962 }
1963 
1964 smb_hash_t *
smb_hash_create(size_t elemsz,size_t link_offset,uint32_t num_buckets)1965 smb_hash_create(size_t elemsz, size_t link_offset,
1966     uint32_t num_buckets)
1967 {
1968 	smb_hash_t *hash = kmem_alloc(sizeof (*hash), KM_SLEEP);
1969 	int i;
1970 
1971 	if (!smb_is_pow2(num_buckets))
1972 		num_buckets = 1 << highbit(num_buckets);
1973 
1974 	hash->rshift = highbit(elemsz);
1975 	hash->num_buckets = num_buckets;
1976 	hash->buckets = kmem_zalloc(num_buckets * sizeof (smb_bucket_t),
1977 	    KM_SLEEP);
1978 	for (i = 0; i < num_buckets; i++)
1979 		smb_llist_constructor(&hash->buckets[i].b_list, elemsz,
1980 		    link_offset);
1981 	return (hash);
1982 }
1983 
1984 void
smb_hash_destroy(smb_hash_t * hash)1985 smb_hash_destroy(smb_hash_t *hash)
1986 {
1987 	int i;
1988 
1989 	for (i = 0; i < hash->num_buckets; i++)
1990 		smb_llist_destructor(&hash->buckets[i].b_list);
1991 
1992 	kmem_free(hash->buckets, hash->num_buckets * sizeof (smb_bucket_t));
1993 	kmem_free(hash, sizeof (*hash));
1994 }
1995