xref: /illumos-gate/usr/src/uts/common/fs/smbsrv/smb_kutil.c (revision d48be21240dfd051b689384ce2b23479d757f2d8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/types.h>
29 #include <sys/tzfile.h>
30 #include <sys/atomic.h>
31 #include <sys/time.h>
32 #include <sys/spl.h>
33 #include <sys/random.h>
34 #include <smbsrv/smb_kproto.h>
35 #include <smbsrv/smb_fsops.h>
36 #include <smbsrv/smbinfo.h>
37 #include <smbsrv/smb_xdr.h>
38 #include <smbsrv/smb_vops.h>
39 #include <smbsrv/smb_idmap.h>
40 
41 #include <sys/sid.h>
42 #include <sys/priv_names.h>
43 #include <sys/bitmap.h>
44 
45 static kmem_cache_t	*smb_dtor_cache = NULL;
46 
47 static boolean_t smb_avl_hold(smb_avl_t *);
48 static void smb_avl_rele(smb_avl_t *);
49 
50 time_t tzh_leapcnt = 0;
51 
52 struct tm
53 *smb_gmtime_r(time_t *clock, struct tm *result);
54 
55 time_t
56 smb_timegm(struct tm *tm);
57 
58 struct	tm {
59 	int	tm_sec;
60 	int	tm_min;
61 	int	tm_hour;
62 	int	tm_mday;
63 	int	tm_mon;
64 	int	tm_year;
65 	int	tm_wday;
66 	int	tm_yday;
67 	int	tm_isdst;
68 };
69 
70 static const int days_in_month[] = {
71 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
72 };
73 
74 /*
75  * Given a UTF-8 string (our internal form everywhere)
76  * return either the Unicode (UTF-16) length in bytes,
77  * or the OEM length in bytes.  Which we return is
78  * determined by whether the client supports Unicode.
79  * This length does NOT include the null.
80  */
81 int
82 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
83 {
84 	if (sr->session->dialect >= SMB_VERS_2_BASE ||
85 	    (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
86 		return (smb_wcequiv_strlen(str));
87 	return (smb_sbequiv_strlen(str));
88 }
89 
90 /*
91  * Given a UTF-8 string (our internal form everywhere)
92  * return either the Unicode (UTF-16) length in bytes,
93  * or the OEM length in bytes.  Which we return is
94  * determined by whether the client supports Unicode.
95  * This length DOES include the null.
96  */
97 int
98 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
99 {
100 	if (sr->session->dialect >= SMB_VERS_2_BASE ||
101 	    (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
102 		return (smb_wcequiv_strlen(str) + 2);
103 	return (smb_sbequiv_strlen(str) + 1);
104 }
105 
106 int
107 smb_ascii_or_unicode_null_len(struct smb_request *sr)
108 {
109 	if (sr->session->dialect >= SMB_VERS_2_BASE ||
110 	    (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
111 		return (2);
112 	return (1);
113 }
114 
115 /*
116  *
117  * Convert old-style (DOS, LanMan) wildcard strings to NT style.
118  * This should ONLY happen to patterns that come from old clients,
119  * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12).
120  *
121  *	? is converted to >
122  *	* is converted to < if it is followed by .
123  *	. is converted to " if it is followed by ? or * or end of pattern
124  *
125  * Note: modifies pattern in place.
126  */
127 void
128 smb_convert_wildcards(char *pattern)
129 {
130 	char	*p;
131 
132 	for (p = pattern; *p != '\0'; p++) {
133 		switch (*p) {
134 		case '?':
135 			*p = '>';
136 			break;
137 		case '*':
138 			if (p[1] == '.')
139 				*p = '<';
140 			break;
141 		case '.':
142 			if (p[1] == '?' || p[1] == '*' || p[1] == '\0')
143 				*p = '\"';
144 			break;
145 		}
146 	}
147 }
148 
149 /*
150  * smb_sattr_check
151  *
152  * Check file attributes against a search attribute (sattr) mask.
153  *
154  * Normal files, which includes READONLY and ARCHIVE, always pass
155  * this check.  If the DIRECTORY, HIDDEN or SYSTEM special attributes
156  * are set then they must appear in the search mask.  The special
157  * attributes are inclusive, i.e. all special attributes that appear
158  * in sattr must also appear in the file attributes for the check to
159  * pass.
160  *
161  * The following examples show how this works:
162  *
163  *		fileA:	READONLY
164  *		fileB:	0 (no attributes = normal file)
165  *		fileC:	READONLY, ARCHIVE
166  *		fileD:	HIDDEN
167  *		fileE:	READONLY, HIDDEN, SYSTEM
168  *		dirA:	DIRECTORY
169  *
170  * search attribute: 0
171  *		Returns: fileA, fileB and fileC.
172  * search attribute: HIDDEN
173  *		Returns: fileA, fileB, fileC and fileD.
174  * search attribute: SYSTEM
175  *		Returns: fileA, fileB and fileC.
176  * search attribute: DIRECTORY
177  *		Returns: fileA, fileB, fileC and dirA.
178  * search attribute: HIDDEN and SYSTEM
179  *		Returns: fileA, fileB, fileC, fileD and fileE.
180  *
181  * Returns true if the file and sattr match; otherwise, returns false.
182  */
183 boolean_t
184 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
185 {
186 	if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
187 	    !(sattr & FILE_ATTRIBUTE_DIRECTORY))
188 		return (B_FALSE);
189 
190 	if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
191 	    !(sattr & FILE_ATTRIBUTE_HIDDEN))
192 		return (B_FALSE);
193 
194 	if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
195 	    !(sattr & FILE_ATTRIBUTE_SYSTEM))
196 		return (B_FALSE);
197 
198 	return (B_TRUE);
199 }
200 
201 time_t
202 smb_get_boottime(void)
203 {
204 	extern time_t	boot_time;
205 	zone_t *z = curzone;
206 
207 	/* Unfortunately, the GZ doesn't set zone_boot_time. */
208 	if (z->zone_id == GLOBAL_ZONEID)
209 		return (boot_time);
210 
211 	return (z->zone_boot_time);
212 }
213 
214 /*
215  * smb_idpool_increment
216  *
217  * This function increments the ID pool by doubling the current size. This
218  * function assumes the caller entered the mutex of the pool.
219  */
220 static int
221 smb_idpool_increment(
222     smb_idpool_t	*pool)
223 {
224 	uint8_t		*new_pool;
225 	uint32_t	new_size;
226 
227 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
228 
229 	new_size = pool->id_size * 2;
230 	if (new_size <= SMB_IDPOOL_MAX_SIZE) {
231 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
232 		if (new_pool) {
233 			bzero(new_pool, new_size / 8);
234 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
235 			kmem_free(pool->id_pool, pool->id_size / 8);
236 			pool->id_pool = new_pool;
237 			pool->id_free_counter += new_size - pool->id_size;
238 			pool->id_max_free_counter += new_size - pool->id_size;
239 			pool->id_size = new_size;
240 			pool->id_idx_msk = (new_size / 8) - 1;
241 			if (new_size >= SMB_IDPOOL_MAX_SIZE) {
242 				/* id -1 made unavailable */
243 				pool->id_pool[pool->id_idx_msk] = 0x80;
244 				pool->id_free_counter--;
245 				pool->id_max_free_counter--;
246 			}
247 			return (0);
248 		}
249 	}
250 	return (-1);
251 }
252 
253 /*
254  * smb_idpool_constructor
255  *
256  * This function initializes the pool structure provided.
257  */
258 int
259 smb_idpool_constructor(
260     smb_idpool_t	*pool)
261 {
262 
263 	ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
264 
265 	pool->id_size = SMB_IDPOOL_MIN_SIZE;
266 	pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
267 	pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
268 	pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
269 	pool->id_bit = 0x02;
270 	pool->id_bit_idx = 1;
271 	pool->id_idx = 0;
272 	pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
273 	    KM_SLEEP);
274 	bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
275 	/* -1 id made unavailable */
276 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
277 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
278 	pool->id_magic = SMB_IDPOOL_MAGIC;
279 	return (0);
280 }
281 
282 /*
283  * smb_idpool_destructor
284  *
285  * This function tears down and frees the resources associated with the
286  * pool provided.
287  */
288 void
289 smb_idpool_destructor(
290     smb_idpool_t	*pool)
291 {
292 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
293 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
294 	pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
295 	mutex_destroy(&pool->id_mutex);
296 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
297 }
298 
299 /*
300  * smb_idpool_alloc
301  *
302  * This function allocates an ID from the pool provided.
303  */
304 int
305 smb_idpool_alloc(
306     smb_idpool_t	*pool,
307     uint16_t		*id)
308 {
309 	uint32_t	i;
310 	uint8_t		bit;
311 	uint8_t		bit_idx;
312 	uint8_t		byte;
313 
314 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
315 
316 	mutex_enter(&pool->id_mutex);
317 	if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
318 		mutex_exit(&pool->id_mutex);
319 		return (-1);
320 	}
321 
322 	i = pool->id_size;
323 	while (i) {
324 		bit = pool->id_bit;
325 		bit_idx = pool->id_bit_idx;
326 		byte = pool->id_pool[pool->id_idx];
327 		while (bit) {
328 			if (byte & bit) {
329 				bit = bit << 1;
330 				bit_idx++;
331 				continue;
332 			}
333 			pool->id_pool[pool->id_idx] |= bit;
334 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
335 			pool->id_free_counter--;
336 			/*
337 			 * Leave position at next bit to allocate,
338 			 * so we don't keep re-using the last in an
339 			 * alloc/free/alloc/free sequence.  Doing
340 			 * that can confuse some SMB clients.
341 			 */
342 			if (bit & 0x80) {
343 				pool->id_bit = 1;
344 				pool->id_bit_idx = 0;
345 				pool->id_idx++;
346 				pool->id_idx &= pool->id_idx_msk;
347 			} else {
348 				pool->id_bit = (bit << 1);
349 				pool->id_bit_idx = bit_idx + 1;
350 				/* keep id_idx */
351 			}
352 			mutex_exit(&pool->id_mutex);
353 			return (0);
354 		}
355 		pool->id_bit = 1;
356 		pool->id_bit_idx = 0;
357 		pool->id_idx++;
358 		pool->id_idx &= pool->id_idx_msk;
359 		--i;
360 	}
361 	/*
362 	 * This section of code shouldn't be reached. If there are IDs
363 	 * available and none could be found there's a problem.
364 	 */
365 	ASSERT(0);
366 	mutex_exit(&pool->id_mutex);
367 	return (-1);
368 }
369 
370 /*
371  * smb_idpool_free
372  *
373  * This function frees the ID provided.
374  */
375 void
376 smb_idpool_free(
377     smb_idpool_t	*pool,
378     uint16_t		id)
379 {
380 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
381 	ASSERT(id != 0);
382 	ASSERT(id != 0xFFFF);
383 
384 	mutex_enter(&pool->id_mutex);
385 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
386 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
387 		pool->id_free_counter++;
388 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
389 		mutex_exit(&pool->id_mutex);
390 		return;
391 	}
392 	/* Freeing a free ID. */
393 	ASSERT(0);
394 	mutex_exit(&pool->id_mutex);
395 }
396 
397 /*
398  * Initialize the llist delete queue object cache.
399  */
400 void
401 smb_llist_init(void)
402 {
403 	if (smb_dtor_cache != NULL)
404 		return;
405 
406 	smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
407 	    sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
408 }
409 
410 /*
411  * Destroy the llist delete queue object cache.
412  */
413 void
414 smb_llist_fini(void)
415 {
416 	if (smb_dtor_cache != NULL) {
417 		kmem_cache_destroy(smb_dtor_cache);
418 		smb_dtor_cache = NULL;
419 	}
420 }
421 
422 /*
423  * smb_llist_constructor
424  *
425  * This function initializes a locked list.
426  */
427 void
428 smb_llist_constructor(
429     smb_llist_t	*ll,
430     size_t	size,
431     size_t	offset)
432 {
433 	rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
434 	mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
435 	list_create(&ll->ll_list, size, offset);
436 	list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
437 	    offsetof(smb_dtor_t, dt_lnd));
438 	ll->ll_count = 0;
439 	ll->ll_wrop = 0;
440 	ll->ll_deleteq_count = 0;
441 	ll->ll_flushing = B_FALSE;
442 }
443 
444 /*
445  * Flush the delete queue and destroy a locked list.
446  */
447 void
448 smb_llist_destructor(
449     smb_llist_t	*ll)
450 {
451 	smb_llist_flush(ll);
452 
453 	ASSERT(ll->ll_count == 0);
454 	ASSERT(ll->ll_deleteq_count == 0);
455 
456 	rw_destroy(&ll->ll_lock);
457 	list_destroy(&ll->ll_list);
458 	list_destroy(&ll->ll_deleteq);
459 	mutex_destroy(&ll->ll_mutex);
460 }
461 
462 /*
463  * smb_llist_enter
464  * Not a macro so dtrace smbsrv:* can see it.
465  */
466 void
467 smb_llist_enter(smb_llist_t *ll, krw_t mode)
468 {
469 	rw_enter(&ll->ll_lock, mode);
470 }
471 
472 /*
473  * Post an object to the delete queue.  The delete queue will be processed
474  * during list exit or list destruction.  Objects are often posted for
475  * deletion during list iteration (while the list is locked) but that is
476  * not required, and an object can be posted at any time.
477  */
478 void
479 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
480 {
481 	smb_dtor_t	*dtor;
482 
483 	ASSERT((object != NULL) && (dtorproc != NULL));
484 
485 	dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
486 	bzero(dtor, sizeof (smb_dtor_t));
487 	dtor->dt_magic = SMB_DTOR_MAGIC;
488 	dtor->dt_object = object;
489 	dtor->dt_proc = dtorproc;
490 
491 	mutex_enter(&ll->ll_mutex);
492 	list_insert_tail(&ll->ll_deleteq, dtor);
493 	++ll->ll_deleteq_count;
494 	mutex_exit(&ll->ll_mutex);
495 }
496 
497 /*
498  * Exit the list lock and process the delete queue.
499  */
500 void
501 smb_llist_exit(smb_llist_t *ll)
502 {
503 	rw_exit(&ll->ll_lock);
504 	smb_llist_flush(ll);
505 }
506 
507 /*
508  * Flush the list delete queue.  The mutex is dropped across the destructor
509  * call in case this leads to additional objects being posted to the delete
510  * queue.
511  */
512 void
513 smb_llist_flush(smb_llist_t *ll)
514 {
515 	smb_dtor_t    *dtor;
516 
517 	mutex_enter(&ll->ll_mutex);
518 	if (ll->ll_flushing) {
519 		mutex_exit(&ll->ll_mutex);
520 		return;
521 	}
522 	ll->ll_flushing = B_TRUE;
523 
524 	dtor = list_head(&ll->ll_deleteq);
525 	while (dtor != NULL) {
526 		SMB_DTOR_VALID(dtor);
527 		ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
528 		list_remove(&ll->ll_deleteq, dtor);
529 		--ll->ll_deleteq_count;
530 		mutex_exit(&ll->ll_mutex);
531 
532 		dtor->dt_proc(dtor->dt_object);
533 
534 		dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
535 		kmem_cache_free(smb_dtor_cache, dtor);
536 		mutex_enter(&ll->ll_mutex);
537 		dtor = list_head(&ll->ll_deleteq);
538 	}
539 	ll->ll_flushing = B_FALSE;
540 
541 	mutex_exit(&ll->ll_mutex);
542 }
543 
544 /*
545  * smb_llist_upgrade
546  *
547  * This function tries to upgrade the lock of the locked list. It assumes the
548  * locked has already been entered in RW_READER mode. It first tries using the
549  * Solaris function rw_tryupgrade(). If that call fails the lock is released
550  * and reentered in RW_WRITER mode. In that last case a window is opened during
551  * which the contents of the list may have changed. The return code indicates
552  * whether or not the list was modified when the lock was exited.
553  */
554 int smb_llist_upgrade(
555     smb_llist_t *ll)
556 {
557 	uint64_t	wrop;
558 
559 	if (rw_tryupgrade(&ll->ll_lock) != 0) {
560 		return (0);
561 	}
562 	wrop = ll->ll_wrop;
563 	rw_exit(&ll->ll_lock);
564 	rw_enter(&ll->ll_lock, RW_WRITER);
565 	return (wrop != ll->ll_wrop);
566 }
567 
568 /*
569  * smb_llist_insert_head
570  *
571  * This function inserts the object passed a the beginning of the list. This
572  * function assumes the lock of the list has already been entered.
573  */
574 void
575 smb_llist_insert_head(
576     smb_llist_t	*ll,
577     void	*obj)
578 {
579 	list_insert_head(&ll->ll_list, obj);
580 	++ll->ll_wrop;
581 	++ll->ll_count;
582 }
583 
584 /*
585  * smb_llist_insert_tail
586  *
587  * This function appends to the object passed to the list. This function assumes
588  * the lock of the list has already been entered.
589  *
590  */
591 void
592 smb_llist_insert_tail(
593     smb_llist_t	*ll,
594     void	*obj)
595 {
596 	list_insert_tail(&ll->ll_list, obj);
597 	++ll->ll_wrop;
598 	++ll->ll_count;
599 }
600 
601 /*
602  * smb_llist_remove
603  *
604  * This function removes the object passed from the list. This function assumes
605  * the lock of the list has already been entered.
606  */
607 void
608 smb_llist_remove(
609     smb_llist_t	*ll,
610     void	*obj)
611 {
612 	list_remove(&ll->ll_list, obj);
613 	++ll->ll_wrop;
614 	--ll->ll_count;
615 }
616 
617 /*
618  * smb_llist_get_count
619  *
620  * This function returns the number of elements in the specified list.
621  */
622 uint32_t
623 smb_llist_get_count(
624     smb_llist_t *ll)
625 {
626 	return (ll->ll_count);
627 }
628 
629 /*
630  * smb_slist_constructor
631  *
632  * Synchronized list constructor.
633  */
634 void
635 smb_slist_constructor(
636     smb_slist_t	*sl,
637     size_t	size,
638     size_t	offset)
639 {
640 	mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
641 	cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
642 	list_create(&sl->sl_list, size, offset);
643 	sl->sl_count = 0;
644 	sl->sl_waiting = B_FALSE;
645 }
646 
647 /*
648  * smb_slist_destructor
649  *
650  * Synchronized list destructor.
651  */
652 void
653 smb_slist_destructor(
654     smb_slist_t	*sl)
655 {
656 	VERIFY(sl->sl_count == 0);
657 
658 	mutex_destroy(&sl->sl_mutex);
659 	cv_destroy(&sl->sl_cv);
660 	list_destroy(&sl->sl_list);
661 }
662 
663 /*
664  * smb_slist_enter
665  * Not a macro so dtrace smbsrv:* can see it.
666  */
667 void
668 smb_slist_enter(smb_slist_t *sl)
669 {
670 	mutex_enter(&(sl)->sl_mutex);
671 }
672 
673 /*
674  * smb_slist_insert_head
675  *
676  * This function inserts the object passed a the beginning of the list.
677  */
678 void
679 smb_slist_insert_head(
680     smb_slist_t	*sl,
681     void	*obj)
682 {
683 	mutex_enter(&sl->sl_mutex);
684 	list_insert_head(&sl->sl_list, obj);
685 	++sl->sl_count;
686 	mutex_exit(&sl->sl_mutex);
687 }
688 
689 /*
690  * smb_slist_insert_tail
691  *
692  * This function appends the object passed to the list.
693  */
694 void
695 smb_slist_insert_tail(
696     smb_slist_t	*sl,
697     void	*obj)
698 {
699 	mutex_enter(&sl->sl_mutex);
700 	list_insert_tail(&sl->sl_list, obj);
701 	++sl->sl_count;
702 	mutex_exit(&sl->sl_mutex);
703 }
704 
705 /*
706  * smb_llist_remove
707  *
708  * This function removes the object passed by the caller from the list.
709  */
710 void
711 smb_slist_remove(
712     smb_slist_t	*sl,
713     void	*obj)
714 {
715 	mutex_enter(&sl->sl_mutex);
716 	list_remove(&sl->sl_list, obj);
717 	if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
718 		sl->sl_waiting = B_FALSE;
719 		cv_broadcast(&sl->sl_cv);
720 	}
721 	mutex_exit(&sl->sl_mutex);
722 }
723 
724 /*
725  * smb_slist_move_tail
726  *
727  * This function transfers all the contents of the synchronized list to the
728  * list_t provided. It returns the number of objects transferred.
729  */
730 uint32_t
731 smb_slist_move_tail(
732     list_t	*lst,
733     smb_slist_t	*sl)
734 {
735 	uint32_t	rv;
736 
737 	mutex_enter(&sl->sl_mutex);
738 	rv = sl->sl_count;
739 	if (sl->sl_count) {
740 		list_move_tail(lst, &sl->sl_list);
741 		sl->sl_count = 0;
742 		if (sl->sl_waiting) {
743 			sl->sl_waiting = B_FALSE;
744 			cv_broadcast(&sl->sl_cv);
745 		}
746 	}
747 	mutex_exit(&sl->sl_mutex);
748 	return (rv);
749 }
750 
751 /*
752  * smb_slist_obj_move
753  *
754  * This function moves an object from one list to the end of the other list. It
755  * assumes the mutex of each list has been entered.
756  */
757 void
758 smb_slist_obj_move(
759     smb_slist_t	*dst,
760     smb_slist_t	*src,
761     void	*obj)
762 {
763 	ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
764 	ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
765 
766 	list_remove(&src->sl_list, obj);
767 	list_insert_tail(&dst->sl_list, obj);
768 	dst->sl_count++;
769 	src->sl_count--;
770 	if ((src->sl_count == 0) && (src->sl_waiting)) {
771 		src->sl_waiting = B_FALSE;
772 		cv_broadcast(&src->sl_cv);
773 	}
774 }
775 
776 /*
777  * smb_slist_wait_for_empty
778  *
779  * This function waits for a list to be emptied.
780  */
781 void
782 smb_slist_wait_for_empty(
783     smb_slist_t	*sl)
784 {
785 	mutex_enter(&sl->sl_mutex);
786 	while (sl->sl_count) {
787 		sl->sl_waiting = B_TRUE;
788 		cv_wait(&sl->sl_cv, &sl->sl_mutex);
789 	}
790 	mutex_exit(&sl->sl_mutex);
791 }
792 
793 /*
794  * smb_slist_exit
795  *
796  * This function exits the muetx of the list and signal the condition variable
797  * if the list is empty.
798  */
799 void
800 smb_slist_exit(smb_slist_t *sl)
801 {
802 	if ((sl->sl_count == 0) && (sl->sl_waiting)) {
803 		sl->sl_waiting = B_FALSE;
804 		cv_broadcast(&sl->sl_cv);
805 	}
806 	mutex_exit(&sl->sl_mutex);
807 }
808 
809 /* smb_thread_... moved to smb_thread.c */
810 
811 /*
812  * smb_rwx_init
813  */
814 void
815 smb_rwx_init(
816     smb_rwx_t	*rwx)
817 {
818 	bzero(rwx, sizeof (smb_rwx_t));
819 	cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
820 	mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
821 	rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
822 }
823 
824 /*
825  * smb_rwx_destroy
826  */
827 void
828 smb_rwx_destroy(
829     smb_rwx_t	*rwx)
830 {
831 	mutex_destroy(&rwx->rwx_mutex);
832 	cv_destroy(&rwx->rwx_cv);
833 	rw_destroy(&rwx->rwx_lock);
834 }
835 
836 /*
837  * smb_rwx_rwenter
838  */
839 void
840 smb_rwx_rwenter(smb_rwx_t *rwx, krw_t mode)
841 {
842 	rw_enter(&rwx->rwx_lock, mode);
843 }
844 
845 /*
846  * smb_rwx_rwexit
847  */
848 void
849 smb_rwx_rwexit(
850     smb_rwx_t	*rwx)
851 {
852 	rw_exit(&rwx->rwx_lock);
853 }
854 
855 
856 /*
857  * smb_rwx_cvwait
858  *
859  * Wait on rwx->rw_cv, dropping the rw lock and retake after wakeup.
860  * Assumes the smb_rwx lock was entered in RW_READER or RW_WRITER
861  * mode. It will:
862  *
863  *	1) release the lock and save its current mode.
864  *	2) wait until the condition variable is signaled.
865  *	3) re-acquire the lock in the mode saved in (1).
866  *
867  * Lock order: rwlock, mutex
868  */
869 int
870 smb_rwx_cvwait(
871     smb_rwx_t	*rwx,
872     clock_t	timeout)
873 {
874 	krw_t	mode;
875 	int	rc = 1;
876 
877 	if (rw_write_held(&rwx->rwx_lock)) {
878 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
879 		mode = RW_WRITER;
880 	} else {
881 		ASSERT(rw_read_held(&rwx->rwx_lock));
882 		mode = RW_READER;
883 	}
884 
885 	mutex_enter(&rwx->rwx_mutex);
886 	rw_exit(&rwx->rwx_lock);
887 
888 	rwx->rwx_waiting = B_TRUE;
889 	if (timeout == -1) {
890 		cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
891 	} else {
892 		rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
893 		    timeout, TR_CLOCK_TICK);
894 	}
895 	mutex_exit(&rwx->rwx_mutex);
896 
897 	rw_enter(&rwx->rwx_lock, mode);
898 	return (rc);
899 }
900 
901 /*
902  * smb_rwx_cvbcast
903  *
904  * Wake up threads waiting on rx_cv
905  * The rw lock may or may not be held.
906  * The mutex MUST NOT be held.
907  */
908 void
909 smb_rwx_cvbcast(smb_rwx_t *rwx)
910 {
911 	mutex_enter(&rwx->rwx_mutex);
912 	if (rwx->rwx_waiting) {
913 		rwx->rwx_waiting = B_FALSE;
914 		cv_broadcast(&rwx->rwx_cv);
915 	}
916 	mutex_exit(&rwx->rwx_mutex);
917 }
918 
919 /* smb_idmap_... moved to smb_idmap.c */
920 
921 uint64_t
922 smb_time_unix_to_nt(timestruc_t *unix_time)
923 {
924 	uint64_t nt_time;
925 
926 	if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
927 		return (0);
928 
929 	nt_time = unix_time->tv_sec;
930 	nt_time *= 10000000;  /* seconds to 100ns */
931 	nt_time += unix_time->tv_nsec / 100;
932 	return (nt_time + NT_TIME_BIAS);
933 }
934 
935 void
936 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
937 {
938 	uint32_t seconds;
939 
940 	ASSERT(unix_time);
941 
942 	if ((nt_time == 0) || (nt_time == -1)) {
943 		unix_time->tv_sec = 0;
944 		unix_time->tv_nsec = 0;
945 		return;
946 	}
947 
948 	/*
949 	 * Can't represent times less than or equal NT_TIME_BIAS,
950 	 * so convert them to the oldest date we can store.
951 	 * Note that time zero is "special" being converted
952 	 * both directions as 0:0 (unix-to-nt, nt-to-unix).
953 	 */
954 	if (nt_time <= NT_TIME_BIAS) {
955 		unix_time->tv_sec = 0;
956 		unix_time->tv_nsec = 100;
957 		return;
958 	}
959 
960 	nt_time -= NT_TIME_BIAS;
961 	seconds = nt_time / 10000000;
962 	unix_time->tv_sec = seconds;
963 	unix_time->tv_nsec = (nt_time  % 10000000) * 100;
964 }
965 
966 /*
967  * smb_time_gmt_to_local, smb_time_local_to_gmt
968  *
969  * Apply the gmt offset to convert between local time and gmt
970  */
971 int32_t
972 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
973 {
974 	if ((gmt == 0) || (gmt == -1))
975 		return (0);
976 
977 	return (gmt - sr->sr_gmtoff);
978 }
979 
980 int32_t
981 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
982 {
983 	if ((local == 0) || (local == -1))
984 		return (0);
985 
986 	return (local + sr->sr_gmtoff);
987 }
988 
989 
990 /*
991  * smb_time_dos_to_unix
992  *
993  * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
994  *
995  * A date/time field of 0 means that that server file system
996  * assigned value need not be changed. The behaviour when the
997  * date/time field is set to -1 is not documented but is
998  * generally treated like 0.
999  * If date or time is 0 or -1 the unix time is returned as 0
1000  * so that the caller can identify and handle this special case.
1001  */
1002 int32_t
1003 smb_time_dos_to_unix(int16_t date, int16_t time)
1004 {
1005 	struct tm	atm;
1006 
1007 	if (((date == 0) || (time == 0)) ||
1008 	    ((date == -1) || (time == -1))) {
1009 		return (0);
1010 	}
1011 
1012 	atm.tm_year = ((date >>  9) & 0x3F) + 80;
1013 	atm.tm_mon  = ((date >>  5) & 0x0F) - 1;
1014 	atm.tm_mday = ((date >>  0) & 0x1F);
1015 	atm.tm_hour = ((time >> 11) & 0x1F);
1016 	atm.tm_min  = ((time >>  5) & 0x3F);
1017 	atm.tm_sec  = ((time >>  0) & 0x1F) << 1;
1018 
1019 	return (smb_timegm(&atm));
1020 }
1021 
1022 void
1023 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1024 {
1025 	struct tm	atm;
1026 	int		i;
1027 	time_t		tmp_time;
1028 
1029 	if (ux_time == 0) {
1030 		*date_p = 0;
1031 		*time_p = 0;
1032 		return;
1033 	}
1034 
1035 	tmp_time = (time_t)ux_time;
1036 	(void) smb_gmtime_r(&tmp_time, &atm);
1037 
1038 	if (date_p) {
1039 		i = 0;
1040 		i += atm.tm_year - 80;
1041 		i <<= 4;
1042 		i += atm.tm_mon + 1;
1043 		i <<= 5;
1044 		i += atm.tm_mday;
1045 
1046 		*date_p = (short)i;
1047 	}
1048 	if (time_p) {
1049 		i = 0;
1050 		i += atm.tm_hour;
1051 		i <<= 6;
1052 		i += atm.tm_min;
1053 		i <<= 5;
1054 		i += atm.tm_sec >> 1;
1055 
1056 		*time_p = (short)i;
1057 	}
1058 }
1059 
1060 
1061 /*
1062  * smb_gmtime_r
1063  *
1064  * Thread-safe version of smb_gmtime. Returns a null pointer if either
1065  * input parameter is a null pointer. Otherwise returns a pointer
1066  * to result.
1067  *
1068  * Day of the week calculation: the Epoch was a thursday.
1069  *
1070  * There are no timezone corrections so tm_isdst and tm_gmtoff are
1071  * always zero, and the zone is always WET.
1072  */
1073 struct tm *
1074 smb_gmtime_r(time_t *clock, struct tm *result)
1075 {
1076 	time_t tsec;
1077 	int year;
1078 	int month;
1079 	int sec_per_month;
1080 
1081 	if (clock == 0 || result == 0)
1082 		return (0);
1083 
1084 	bzero(result, sizeof (struct tm));
1085 	tsec = *clock;
1086 	tsec -= tzh_leapcnt;
1087 
1088 	result->tm_wday = tsec / SECSPERDAY;
1089 	result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1090 
1091 	year = EPOCH_YEAR;
1092 	while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1093 	    (SECSPERDAY * DAYSPERNYEAR))) {
1094 		if (isleap(year))
1095 			tsec -= SECSPERDAY * DAYSPERLYEAR;
1096 		else
1097 			tsec -= SECSPERDAY * DAYSPERNYEAR;
1098 
1099 		++year;
1100 	}
1101 
1102 	result->tm_year = year - TM_YEAR_BASE;
1103 	result->tm_yday = tsec / SECSPERDAY;
1104 
1105 	for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1106 		sec_per_month = days_in_month[month] * SECSPERDAY;
1107 
1108 		if (month == TM_FEBRUARY && isleap(year))
1109 			sec_per_month += SECSPERDAY;
1110 
1111 		if (tsec < sec_per_month)
1112 			break;
1113 
1114 		tsec -= sec_per_month;
1115 	}
1116 
1117 	result->tm_mon = month;
1118 	result->tm_mday = (tsec / SECSPERDAY) + 1;
1119 	tsec %= SECSPERDAY;
1120 	result->tm_sec = tsec % 60;
1121 	tsec /= 60;
1122 	result->tm_min = tsec % 60;
1123 	tsec /= 60;
1124 	result->tm_hour = (int)tsec;
1125 
1126 	return (result);
1127 }
1128 
1129 
1130 /*
1131  * smb_timegm
1132  *
1133  * Converts the broken-down time in tm to a time value, i.e. the number
1134  * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1135  * not a POSIX or ANSI function. Per the man page, the input values of
1136  * tm_wday and tm_yday are ignored and, as the input data is assumed to
1137  * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1138  *
1139  * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1140  * and tm_yday, and bring the other fields within normal range. I don't
1141  * think this is really how it should be done but it's convenient for
1142  * now.
1143  */
1144 time_t
1145 smb_timegm(struct tm *tm)
1146 {
1147 	time_t tsec;
1148 	int dd;
1149 	int mm;
1150 	int yy;
1151 	int year;
1152 
1153 	if (tm == 0)
1154 		return (-1);
1155 
1156 	year = tm->tm_year + TM_YEAR_BASE;
1157 	tsec = tzh_leapcnt;
1158 
1159 	for (yy = EPOCH_YEAR; yy < year; ++yy) {
1160 		if (isleap(yy))
1161 			tsec += SECSPERDAY * DAYSPERLYEAR;
1162 		else
1163 			tsec += SECSPERDAY * DAYSPERNYEAR;
1164 	}
1165 
1166 	for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1167 		dd = days_in_month[mm] * SECSPERDAY;
1168 
1169 		if (mm == TM_FEBRUARY && isleap(year))
1170 			dd += SECSPERDAY;
1171 
1172 		tsec += dd;
1173 	}
1174 
1175 	tsec += (tm->tm_mday - 1) * SECSPERDAY;
1176 	tsec += tm->tm_sec;
1177 	tsec += tm->tm_min * SECSPERMIN;
1178 	tsec += tm->tm_hour * SECSPERHOUR;
1179 
1180 	tm->tm_isdst = 0;
1181 	(void) smb_gmtime_r(&tsec, tm);
1182 	return (tsec);
1183 }
1184 
1185 /*
1186  * smb_pad_align
1187  *
1188  * Returns the number of bytes required to pad an offset to the
1189  * specified alignment.
1190  */
1191 uint32_t
1192 smb_pad_align(uint32_t offset, uint32_t align)
1193 {
1194 	uint32_t pad = offset % align;
1195 
1196 	if (pad != 0)
1197 		pad = align - pad;
1198 
1199 	return (pad);
1200 }
1201 
1202 /*
1203  * smb_panic
1204  *
1205  * Logs the file name, function name and line number passed in and panics the
1206  * system.
1207  */
1208 void
1209 smb_panic(char *file, const char *func, int line)
1210 {
1211 	cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
1212 }
1213 
1214 /*
1215  * Creates an AVL tree and initializes the given smb_avl_t
1216  * structure using the passed args
1217  */
1218 void
1219 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset,
1220 	const smb_avl_nops_t *ops)
1221 {
1222 	ASSERT(avl);
1223 	ASSERT(ops);
1224 
1225 	rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL);
1226 	mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL);
1227 
1228 	avl->avl_nops = ops;
1229 	avl->avl_state = SMB_AVL_STATE_READY;
1230 	avl->avl_refcnt = 0;
1231 	(void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence,
1232 	    sizeof (uint32_t));
1233 
1234 	avl_create(&avl->avl_tree, ops->avln_cmp, size, offset);
1235 }
1236 
1237 /*
1238  * Destroys the specified AVL tree.
1239  * It waits for all the in-flight operations to finish
1240  * before destroying the AVL.
1241  */
1242 void
1243 smb_avl_destroy(smb_avl_t *avl)
1244 {
1245 	void *cookie = NULL;
1246 	void *node;
1247 
1248 	ASSERT(avl);
1249 
1250 	mutex_enter(&avl->avl_mutex);
1251 	if (avl->avl_state != SMB_AVL_STATE_READY) {
1252 		mutex_exit(&avl->avl_mutex);
1253 		return;
1254 	}
1255 
1256 	avl->avl_state = SMB_AVL_STATE_DESTROYING;
1257 
1258 	while (avl->avl_refcnt > 0)
1259 		(void) cv_wait(&avl->avl_cv, &avl->avl_mutex);
1260 	mutex_exit(&avl->avl_mutex);
1261 
1262 	rw_enter(&avl->avl_lock, RW_WRITER);
1263 	while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL)
1264 		avl->avl_nops->avln_destroy(node);
1265 
1266 	avl_destroy(&avl->avl_tree);
1267 	rw_exit(&avl->avl_lock);
1268 
1269 	rw_destroy(&avl->avl_lock);
1270 
1271 	mutex_destroy(&avl->avl_mutex);
1272 	bzero(avl, sizeof (smb_avl_t));
1273 }
1274 
1275 /*
1276  * Adds the given item to the AVL if it's
1277  * not already there.
1278  *
1279  * Returns:
1280  *
1281  *	ENOTACTIVE	AVL is not in READY state
1282  *	EEXIST		The item is already in AVL
1283  */
1284 int
1285 smb_avl_add(smb_avl_t *avl, void *item)
1286 {
1287 	avl_index_t where;
1288 
1289 	ASSERT(avl);
1290 	ASSERT(item);
1291 
1292 	if (!smb_avl_hold(avl))
1293 		return (ENOTACTIVE);
1294 
1295 	rw_enter(&avl->avl_lock, RW_WRITER);
1296 	if (avl_find(&avl->avl_tree, item, &where) != NULL) {
1297 		rw_exit(&avl->avl_lock);
1298 		smb_avl_rele(avl);
1299 		return (EEXIST);
1300 	}
1301 
1302 	avl_insert(&avl->avl_tree, item, where);
1303 	avl->avl_sequence++;
1304 	rw_exit(&avl->avl_lock);
1305 
1306 	smb_avl_rele(avl);
1307 	return (0);
1308 }
1309 
1310 /*
1311  * Removes the given item from the AVL.
1312  * If no reference is left on the item
1313  * it will also be destroyed by calling the
1314  * registered destroy operation.
1315  */
1316 void
1317 smb_avl_remove(smb_avl_t *avl, void *item)
1318 {
1319 	avl_index_t where;
1320 	void *rm_item;
1321 
1322 	ASSERT(avl);
1323 	ASSERT(item);
1324 
1325 	if (!smb_avl_hold(avl))
1326 		return;
1327 
1328 	rw_enter(&avl->avl_lock, RW_WRITER);
1329 	if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) {
1330 		rw_exit(&avl->avl_lock);
1331 		smb_avl_rele(avl);
1332 		return;
1333 	}
1334 
1335 	avl_remove(&avl->avl_tree, rm_item);
1336 	if (avl->avl_nops->avln_rele(rm_item))
1337 		avl->avl_nops->avln_destroy(rm_item);
1338 	avl->avl_sequence++;
1339 	rw_exit(&avl->avl_lock);
1340 
1341 	smb_avl_rele(avl);
1342 }
1343 
1344 /*
1345  * Looks up the AVL for the given item.
1346  * If the item is found a hold on the object
1347  * is taken before the pointer to it is
1348  * returned to the caller. The caller MUST
1349  * always call smb_avl_release() after it's done
1350  * using the returned object to release the hold
1351  * taken on the object.
1352  */
1353 void *
1354 smb_avl_lookup(smb_avl_t *avl, void *item)
1355 {
1356 	void *node = NULL;
1357 
1358 	ASSERT(avl);
1359 	ASSERT(item);
1360 
1361 	if (!smb_avl_hold(avl))
1362 		return (NULL);
1363 
1364 	rw_enter(&avl->avl_lock, RW_READER);
1365 	node = avl_find(&avl->avl_tree, item, NULL);
1366 	if (node != NULL)
1367 		avl->avl_nops->avln_hold(node);
1368 	rw_exit(&avl->avl_lock);
1369 
1370 	if (node == NULL)
1371 		smb_avl_rele(avl);
1372 
1373 	return (node);
1374 }
1375 
1376 /*
1377  * The hold on the given object is released.
1378  * This function MUST always be called after
1379  * smb_avl_lookup() and smb_avl_iterate() for
1380  * the returned object.
1381  *
1382  * If AVL is in DESTROYING state, the destroying
1383  * thread will be notified.
1384  */
1385 void
1386 smb_avl_release(smb_avl_t *avl, void *item)
1387 {
1388 	ASSERT(avl);
1389 	ASSERT(item);
1390 
1391 	if (avl->avl_nops->avln_rele(item))
1392 		avl->avl_nops->avln_destroy(item);
1393 
1394 	smb_avl_rele(avl);
1395 }
1396 
1397 /*
1398  * Initializes the given cursor for the AVL.
1399  * The cursor will be used to iterate through the AVL
1400  */
1401 void
1402 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1403 {
1404 	ASSERT(avl);
1405 	ASSERT(cursor);
1406 
1407 	cursor->avlc_next = NULL;
1408 	cursor->avlc_sequence = avl->avl_sequence;
1409 }
1410 
1411 /*
1412  * Iterates through the AVL using the given cursor.
1413  * It always starts at the beginning and then returns
1414  * a pointer to the next object on each subsequent call.
1415  *
1416  * If a new object is added to or removed from the AVL
1417  * between two calls to this function, the iteration
1418  * will terminate prematurely.
1419  *
1420  * The caller MUST always call smb_avl_release() after it's
1421  * done using the returned object to release the hold taken
1422  * on the object.
1423  */
1424 void *
1425 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1426 {
1427 	void *node;
1428 
1429 	ASSERT(avl);
1430 	ASSERT(cursor);
1431 
1432 	if (!smb_avl_hold(avl))
1433 		return (NULL);
1434 
1435 	rw_enter(&avl->avl_lock, RW_READER);
1436 	if (cursor->avlc_sequence != avl->avl_sequence) {
1437 		rw_exit(&avl->avl_lock);
1438 		smb_avl_rele(avl);
1439 		return (NULL);
1440 	}
1441 
1442 	if (cursor->avlc_next == NULL)
1443 		node = avl_first(&avl->avl_tree);
1444 	else
1445 		node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next);
1446 
1447 	if (node != NULL)
1448 		avl->avl_nops->avln_hold(node);
1449 
1450 	cursor->avlc_next = node;
1451 	rw_exit(&avl->avl_lock);
1452 
1453 	if (node == NULL)
1454 		smb_avl_rele(avl);
1455 
1456 	return (node);
1457 }
1458 
1459 /*
1460  * Increments the AVL reference count in order to
1461  * prevent the avl from being destroyed while it's
1462  * being accessed.
1463  */
1464 static boolean_t
1465 smb_avl_hold(smb_avl_t *avl)
1466 {
1467 	mutex_enter(&avl->avl_mutex);
1468 	if (avl->avl_state != SMB_AVL_STATE_READY) {
1469 		mutex_exit(&avl->avl_mutex);
1470 		return (B_FALSE);
1471 	}
1472 	avl->avl_refcnt++;
1473 	mutex_exit(&avl->avl_mutex);
1474 
1475 	return (B_TRUE);
1476 }
1477 
1478 /*
1479  * Decrements the AVL reference count to release the
1480  * hold. If another thread is trying to destroy the
1481  * AVL and is waiting for the reference count to become
1482  * 0, it is signaled to wake up.
1483  */
1484 static void
1485 smb_avl_rele(smb_avl_t *avl)
1486 {
1487 	mutex_enter(&avl->avl_mutex);
1488 	ASSERT(avl->avl_refcnt > 0);
1489 	avl->avl_refcnt--;
1490 	if (avl->avl_state == SMB_AVL_STATE_DESTROYING)
1491 		cv_broadcast(&avl->avl_cv);
1492 	mutex_exit(&avl->avl_mutex);
1493 }
1494 
1495 /*
1496  * smb_latency_init
1497  */
1498 void
1499 smb_latency_init(smb_latency_t *lat)
1500 {
1501 	bzero(lat, sizeof (*lat));
1502 	mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1503 }
1504 
1505 /*
1506  * smb_latency_destroy
1507  */
1508 void
1509 smb_latency_destroy(smb_latency_t *lat)
1510 {
1511 	mutex_destroy(&lat->ly_mutex);
1512 }
1513 
1514 /*
1515  * smb_latency_add_sample
1516  *
1517  * Uses the new sample to calculate the new mean and standard deviation. The
1518  * sample must be a scaled value.
1519  */
1520 void
1521 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample)
1522 {
1523 	hrtime_t	a_mean;
1524 	hrtime_t	d_mean;
1525 
1526 	mutex_enter(&lat->ly_mutex);
1527 	lat->ly_a_nreq++;
1528 	lat->ly_a_sum += sample;
1529 	if (lat->ly_a_nreq != 0) {
1530 		a_mean = lat->ly_a_sum / lat->ly_a_nreq;
1531 		lat->ly_a_stddev =
1532 		    (sample - a_mean) * (sample - lat->ly_a_mean);
1533 		lat->ly_a_mean = a_mean;
1534 	}
1535 	lat->ly_d_nreq++;
1536 	lat->ly_d_sum += sample;
1537 	if (lat->ly_d_nreq != 0) {
1538 		d_mean = lat->ly_d_sum / lat->ly_d_nreq;
1539 		lat->ly_d_stddev =
1540 		    (sample - d_mean) * (sample - lat->ly_d_mean);
1541 		lat->ly_d_mean = d_mean;
1542 	}
1543 	mutex_exit(&lat->ly_mutex);
1544 }
1545 
1546 /*
1547  * smb_srqueue_init
1548  */
1549 void
1550 smb_srqueue_init(smb_srqueue_t *srq)
1551 {
1552 	bzero(srq, sizeof (*srq));
1553 	mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1554 	srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled();
1555 }
1556 
1557 /*
1558  * smb_srqueue_destroy
1559  */
1560 void
1561 smb_srqueue_destroy(smb_srqueue_t *srq)
1562 {
1563 	mutex_destroy(&srq->srq_mutex);
1564 }
1565 
1566 /*
1567  * smb_srqueue_waitq_enter
1568  */
1569 void
1570 smb_srqueue_waitq_enter(smb_srqueue_t *srq)
1571 {
1572 	hrtime_t	new;
1573 	hrtime_t	delta;
1574 	uint32_t	wcnt;
1575 
1576 	mutex_enter(&srq->srq_mutex);
1577 	new = gethrtime_unscaled();
1578 	delta = new - srq->srq_wlastupdate;
1579 	srq->srq_wlastupdate = new;
1580 	wcnt = srq->srq_wcnt++;
1581 	if (wcnt != 0) {
1582 		srq->srq_wlentime += delta * wcnt;
1583 		srq->srq_wtime += delta;
1584 	}
1585 	mutex_exit(&srq->srq_mutex);
1586 }
1587 
1588 /*
1589  * smb_srqueue_runq_exit
1590  */
1591 void
1592 smb_srqueue_runq_exit(smb_srqueue_t *srq)
1593 {
1594 	hrtime_t	new;
1595 	hrtime_t	delta;
1596 	uint32_t	rcnt;
1597 
1598 	mutex_enter(&srq->srq_mutex);
1599 	new = gethrtime_unscaled();
1600 	delta = new - srq->srq_rlastupdate;
1601 	srq->srq_rlastupdate = new;
1602 	rcnt = srq->srq_rcnt--;
1603 	ASSERT(rcnt > 0);
1604 	srq->srq_rlentime += delta * rcnt;
1605 	srq->srq_rtime += delta;
1606 	mutex_exit(&srq->srq_mutex);
1607 }
1608 
1609 /*
1610  * smb_srqueue_waitq_to_runq
1611  */
1612 void
1613 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq)
1614 {
1615 	hrtime_t	new;
1616 	hrtime_t	delta;
1617 	uint32_t	wcnt;
1618 	uint32_t	rcnt;
1619 
1620 	mutex_enter(&srq->srq_mutex);
1621 	new = gethrtime_unscaled();
1622 	delta = new - srq->srq_wlastupdate;
1623 	srq->srq_wlastupdate = new;
1624 	wcnt = srq->srq_wcnt--;
1625 	ASSERT(wcnt > 0);
1626 	srq->srq_wlentime += delta * wcnt;
1627 	srq->srq_wtime += delta;
1628 	delta = new - srq->srq_rlastupdate;
1629 	srq->srq_rlastupdate = new;
1630 	rcnt = srq->srq_rcnt++;
1631 	if (rcnt != 0) {
1632 		srq->srq_rlentime += delta * rcnt;
1633 		srq->srq_rtime += delta;
1634 	}
1635 	mutex_exit(&srq->srq_mutex);
1636 }
1637 
1638 /*
1639  * smb_srqueue_update
1640  *
1641  * Takes a snapshot of the smb_sr_stat_t structure passed in.
1642  */
1643 void
1644 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd)
1645 {
1646 	hrtime_t	delta;
1647 	hrtime_t	snaptime;
1648 
1649 	mutex_enter(&srq->srq_mutex);
1650 	snaptime = gethrtime_unscaled();
1651 	delta = snaptime - srq->srq_wlastupdate;
1652 	srq->srq_wlastupdate = snaptime;
1653 	if (srq->srq_wcnt != 0) {
1654 		srq->srq_wlentime += delta * srq->srq_wcnt;
1655 		srq->srq_wtime += delta;
1656 	}
1657 	delta = snaptime - srq->srq_rlastupdate;
1658 	srq->srq_rlastupdate = snaptime;
1659 	if (srq->srq_rcnt != 0) {
1660 		srq->srq_rlentime += delta * srq->srq_rcnt;
1661 		srq->srq_rtime += delta;
1662 	}
1663 	kd->ku_rlentime = srq->srq_rlentime;
1664 	kd->ku_rtime = srq->srq_rtime;
1665 	kd->ku_wlentime = srq->srq_wlentime;
1666 	kd->ku_wtime = srq->srq_wtime;
1667 	mutex_exit(&srq->srq_mutex);
1668 	scalehrtime(&kd->ku_rlentime);
1669 	scalehrtime(&kd->ku_rtime);
1670 	scalehrtime(&kd->ku_wlentime);
1671 	scalehrtime(&kd->ku_wtime);
1672 }
1673 
1674 void
1675 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd,
1676     uint_t threshold, uint_t timeout)
1677 {
1678 	bzero(ct, sizeof (smb_cmd_threshold_t));
1679 	mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL);
1680 	cv_init(&ct->ct_cond, NULL, CV_DEFAULT, NULL);
1681 
1682 	ct->ct_cmd = cmd;
1683 	ct->ct_threshold = threshold;
1684 	ct->ct_timeout = timeout;
1685 }
1686 
1687 void
1688 smb_threshold_fini(smb_cmd_threshold_t *ct)
1689 {
1690 	cv_destroy(&ct->ct_cond);
1691 	mutex_destroy(&ct->ct_mutex);
1692 }
1693 
1694 /*
1695  * This threshold mechanism is used to limit the number of simultaneous
1696  * named pipe connections, concurrent authentication conversations, etc.
1697  * Requests that would take us over the threshold wait until either the
1698  * resources are available (return zero) or timeout (return error).
1699  */
1700 int
1701 smb_threshold_enter(smb_cmd_threshold_t *ct)
1702 {
1703 	clock_t	time, rem;
1704 
1705 	time = MSEC_TO_TICK(ct->ct_timeout) + ddi_get_lbolt();
1706 	mutex_enter(&ct->ct_mutex);
1707 
1708 	while (ct->ct_threshold != 0 &&
1709 	    ct->ct_threshold <= ct->ct_active_cnt) {
1710 		ct->ct_blocked_cnt++;
1711 		rem = cv_timedwait(&ct->ct_cond, &ct->ct_mutex, time);
1712 		ct->ct_blocked_cnt--;
1713 		if (rem < 0) {
1714 			mutex_exit(&ct->ct_mutex);
1715 			return (ETIME);
1716 		}
1717 	}
1718 	if (ct->ct_threshold == 0) {
1719 		mutex_exit(&ct->ct_mutex);
1720 		return (ECANCELED);
1721 	}
1722 
1723 	ASSERT3U(ct->ct_active_cnt, <, ct->ct_threshold);
1724 	ct->ct_active_cnt++;
1725 
1726 	mutex_exit(&ct->ct_mutex);
1727 	return (0);
1728 }
1729 
1730 void
1731 smb_threshold_exit(smb_cmd_threshold_t *ct)
1732 {
1733 	mutex_enter(&ct->ct_mutex);
1734 	ASSERT3U(ct->ct_active_cnt, >, 0);
1735 	ct->ct_active_cnt--;
1736 	if (ct->ct_blocked_cnt)
1737 		cv_signal(&ct->ct_cond);
1738 	mutex_exit(&ct->ct_mutex);
1739 }
1740 
1741 void
1742 smb_threshold_wake_all(smb_cmd_threshold_t *ct)
1743 {
1744 	mutex_enter(&ct->ct_mutex);
1745 	ct->ct_threshold = 0;
1746 	cv_broadcast(&ct->ct_cond);
1747 	mutex_exit(&ct->ct_mutex);
1748 }
1749 
1750 /* taken from mod_hash_byptr */
1751 uint_t
1752 smb_hash_uint64(smb_hash_t *hash, uint64_t val)
1753 {
1754 	uint64_t k = val >> hash->rshift;
1755 	uint_t idx = ((uint_t)k) & (hash->num_buckets - 1);
1756 
1757 	return (idx);
1758 }
1759 
1760 boolean_t
1761 smb_is_pow2(size_t n)
1762 {
1763 	return ((n & (n - 1)) == 0);
1764 }
1765 
1766 smb_hash_t *
1767 smb_hash_create(size_t elemsz, size_t link_offset,
1768     uint32_t num_buckets)
1769 {
1770 	smb_hash_t *hash = kmem_alloc(sizeof (*hash), KM_SLEEP);
1771 	int i;
1772 
1773 	if (!smb_is_pow2(num_buckets))
1774 		num_buckets = 1 << highbit(num_buckets);
1775 
1776 	hash->rshift = highbit(elemsz);
1777 	hash->num_buckets = num_buckets;
1778 	hash->buckets = kmem_zalloc(num_buckets * sizeof (smb_bucket_t),
1779 	    KM_SLEEP);
1780 	for (i = 0; i < num_buckets; i++)
1781 		smb_llist_constructor(&hash->buckets[i].b_list, elemsz,
1782 		    link_offset);
1783 	return (hash);
1784 }
1785 
1786 void
1787 smb_hash_destroy(smb_hash_t *hash)
1788 {
1789 	int i;
1790 
1791 	for (i = 0; i < hash->num_buckets; i++)
1792 		smb_llist_destructor(&hash->buckets[i].b_list);
1793 
1794 	kmem_free(hash->buckets, hash->num_buckets * sizeof (smb_bucket_t));
1795 	kmem_free(hash, sizeof (*hash));
1796 }
1797