xref: /titanic_51/usr/src/uts/common/fs/smbsrv/smb_kutil.c (revision 3f745f41d6d087602fbb2f748e1baabc3768f5fb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/types.h>
29 #include <sys/tzfile.h>
30 #include <sys/atomic.h>
31 #include <sys/time.h>
32 #include <sys/spl.h>
33 #include <sys/random.h>
34 #include <smbsrv/smb_kproto.h>
35 #include <smbsrv/smb_fsops.h>
36 #include <smbsrv/smbinfo.h>
37 #include <smbsrv/smb_xdr.h>
38 #include <smbsrv/smb_vops.h>
39 #include <smbsrv/smb_idmap.h>
40 
41 #include <sys/sid.h>
42 #include <sys/priv_names.h>
43 
44 static kmem_cache_t	*smb_dtor_cache = NULL;
45 
46 static boolean_t smb_avl_hold(smb_avl_t *);
47 static void smb_avl_rele(smb_avl_t *);
48 
49 time_t tzh_leapcnt = 0;
50 
51 struct tm
52 *smb_gmtime_r(time_t *clock, struct tm *result);
53 
54 time_t
55 smb_timegm(struct tm *tm);
56 
57 struct	tm {
58 	int	tm_sec;
59 	int	tm_min;
60 	int	tm_hour;
61 	int	tm_mday;
62 	int	tm_mon;
63 	int	tm_year;
64 	int	tm_wday;
65 	int	tm_yday;
66 	int	tm_isdst;
67 };
68 
69 static const int days_in_month[] = {
70 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
71 };
72 
73 int
74 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
75 {
76 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
77 		return (smb_wcequiv_strlen(str));
78 	return (strlen(str));
79 }
80 
81 int
82 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
83 {
84 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
85 		return (smb_wcequiv_strlen(str) + 2);
86 	return (strlen(str) + 1);
87 }
88 
89 int
90 smb_ascii_or_unicode_null_len(struct smb_request *sr)
91 {
92 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
93 		return (2);
94 	return (1);
95 }
96 
97 /*
98  *
99  * Convert old-style (DOS, LanMan) wildcard strings to NT style.
100  * This should ONLY happen to patterns that come from old clients,
101  * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12).
102  *
103  *	? is converted to >
104  *	* is converted to < if it is followed by .
105  *	. is converted to " if it is followed by ? or * or end of pattern
106  *
107  * Note: modifies pattern in place.
108  */
109 void
110 smb_convert_wildcards(char *pattern)
111 {
112 	char	*p;
113 
114 	for (p = pattern; *p != '\0'; p++) {
115 		switch (*p) {
116 		case '?':
117 			*p = '>';
118 			break;
119 		case '*':
120 			if (p[1] == '.')
121 				*p = '<';
122 			break;
123 		case '.':
124 			if (p[1] == '?' || p[1] == '*' || p[1] == '\0')
125 				*p = '\"';
126 			break;
127 		}
128 	}
129 }
130 
131 /*
132  * smb_sattr_check
133  *
134  * Check file attributes against a search attribute (sattr) mask.
135  *
136  * Normal files, which includes READONLY and ARCHIVE, always pass
137  * this check.  If the DIRECTORY, HIDDEN or SYSTEM special attributes
138  * are set then they must appear in the search mask.  The special
139  * attributes are inclusive, i.e. all special attributes that appear
140  * in sattr must also appear in the file attributes for the check to
141  * pass.
142  *
143  * The following examples show how this works:
144  *
145  *		fileA:	READONLY
146  *		fileB:	0 (no attributes = normal file)
147  *		fileC:	READONLY, ARCHIVE
148  *		fileD:	HIDDEN
149  *		fileE:	READONLY, HIDDEN, SYSTEM
150  *		dirA:	DIRECTORY
151  *
152  * search attribute: 0
153  *		Returns: fileA, fileB and fileC.
154  * search attribute: HIDDEN
155  *		Returns: fileA, fileB, fileC and fileD.
156  * search attribute: SYSTEM
157  *		Returns: fileA, fileB and fileC.
158  * search attribute: DIRECTORY
159  *		Returns: fileA, fileB, fileC and dirA.
160  * search attribute: HIDDEN and SYSTEM
161  *		Returns: fileA, fileB, fileC, fileD and fileE.
162  *
163  * Returns true if the file and sattr match; otherwise, returns false.
164  */
165 boolean_t
166 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
167 {
168 	if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
169 	    !(sattr & FILE_ATTRIBUTE_DIRECTORY))
170 		return (B_FALSE);
171 
172 	if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
173 	    !(sattr & FILE_ATTRIBUTE_HIDDEN))
174 		return (B_FALSE);
175 
176 	if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
177 	    !(sattr & FILE_ATTRIBUTE_SYSTEM))
178 		return (B_FALSE);
179 
180 	return (B_TRUE);
181 }
182 
183 int
184 microtime(timestruc_t *tvp)
185 {
186 	tvp->tv_sec = gethrestime_sec();
187 	tvp->tv_nsec = 0;
188 	return (0);
189 }
190 
191 int32_t
192 clock_get_milli_uptime()
193 {
194 	return (TICK_TO_MSEC(ddi_get_lbolt()));
195 }
196 
197 /*
198  * smb_idpool_increment
199  *
200  * This function increments the ID pool by doubling the current size. This
201  * function assumes the caller entered the mutex of the pool.
202  */
203 static int
204 smb_idpool_increment(
205     smb_idpool_t	*pool)
206 {
207 	uint8_t		*new_pool;
208 	uint32_t	new_size;
209 
210 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
211 
212 	new_size = pool->id_size * 2;
213 	if (new_size <= SMB_IDPOOL_MAX_SIZE) {
214 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
215 		if (new_pool) {
216 			bzero(new_pool, new_size / 8);
217 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
218 			kmem_free(pool->id_pool, pool->id_size / 8);
219 			pool->id_pool = new_pool;
220 			pool->id_free_counter += new_size - pool->id_size;
221 			pool->id_max_free_counter += new_size - pool->id_size;
222 			pool->id_size = new_size;
223 			pool->id_idx_msk = (new_size / 8) - 1;
224 			if (new_size >= SMB_IDPOOL_MAX_SIZE) {
225 				/* id -1 made unavailable */
226 				pool->id_pool[pool->id_idx_msk] = 0x80;
227 				pool->id_free_counter--;
228 				pool->id_max_free_counter--;
229 			}
230 			return (0);
231 		}
232 	}
233 	return (-1);
234 }
235 
236 /*
237  * smb_idpool_constructor
238  *
239  * This function initializes the pool structure provided.
240  */
241 int
242 smb_idpool_constructor(
243     smb_idpool_t	*pool)
244 {
245 
246 	ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
247 
248 	pool->id_size = SMB_IDPOOL_MIN_SIZE;
249 	pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
250 	pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
251 	pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
252 	pool->id_bit = 0x02;
253 	pool->id_bit_idx = 1;
254 	pool->id_idx = 0;
255 	pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
256 	    KM_SLEEP);
257 	bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
258 	/* -1 id made unavailable */
259 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
260 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
261 	pool->id_magic = SMB_IDPOOL_MAGIC;
262 	return (0);
263 }
264 
265 /*
266  * smb_idpool_destructor
267  *
268  * This function tears down and frees the resources associated with the
269  * pool provided.
270  */
271 void
272 smb_idpool_destructor(
273     smb_idpool_t	*pool)
274 {
275 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
276 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
277 	pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
278 	mutex_destroy(&pool->id_mutex);
279 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
280 }
281 
282 /*
283  * smb_idpool_alloc
284  *
285  * This function allocates an ID from the pool provided.
286  */
287 int
288 smb_idpool_alloc(
289     smb_idpool_t	*pool,
290     uint16_t		*id)
291 {
292 	uint32_t	i;
293 	uint8_t		bit;
294 	uint8_t		bit_idx;
295 	uint8_t		byte;
296 
297 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
298 
299 	mutex_enter(&pool->id_mutex);
300 	if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
301 		mutex_exit(&pool->id_mutex);
302 		return (-1);
303 	}
304 
305 	i = pool->id_size;
306 	while (i) {
307 		bit = pool->id_bit;
308 		bit_idx = pool->id_bit_idx;
309 		byte = pool->id_pool[pool->id_idx];
310 		while (bit) {
311 			if (byte & bit) {
312 				bit = bit << 1;
313 				bit_idx++;
314 				continue;
315 			}
316 			pool->id_pool[pool->id_idx] |= bit;
317 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
318 			pool->id_free_counter--;
319 			pool->id_bit = bit;
320 			pool->id_bit_idx = bit_idx;
321 			mutex_exit(&pool->id_mutex);
322 			return (0);
323 		}
324 		pool->id_bit = 1;
325 		pool->id_bit_idx = 0;
326 		pool->id_idx++;
327 		pool->id_idx &= pool->id_idx_msk;
328 		--i;
329 	}
330 	/*
331 	 * This section of code shouldn't be reached. If there are IDs
332 	 * available and none could be found there's a problem.
333 	 */
334 	ASSERT(0);
335 	mutex_exit(&pool->id_mutex);
336 	return (-1);
337 }
338 
339 /*
340  * smb_idpool_free
341  *
342  * This function frees the ID provided.
343  */
344 void
345 smb_idpool_free(
346     smb_idpool_t	*pool,
347     uint16_t		id)
348 {
349 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
350 	ASSERT(id != 0);
351 	ASSERT(id != 0xFFFF);
352 
353 	mutex_enter(&pool->id_mutex);
354 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
355 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
356 		pool->id_free_counter++;
357 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
358 		mutex_exit(&pool->id_mutex);
359 		return;
360 	}
361 	/* Freeing a free ID. */
362 	ASSERT(0);
363 	mutex_exit(&pool->id_mutex);
364 }
365 
366 /*
367  * Initialize the llist delete queue object cache.
368  */
369 void
370 smb_llist_init(void)
371 {
372 	if (smb_dtor_cache != NULL)
373 		return;
374 
375 	smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
376 	    sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
377 }
378 
379 /*
380  * Destroy the llist delete queue object cache.
381  */
382 void
383 smb_llist_fini(void)
384 {
385 	if (smb_dtor_cache != NULL) {
386 		kmem_cache_destroy(smb_dtor_cache);
387 		smb_dtor_cache = NULL;
388 	}
389 }
390 
391 /*
392  * smb_llist_constructor
393  *
394  * This function initializes a locked list.
395  */
396 void
397 smb_llist_constructor(
398     smb_llist_t	*ll,
399     size_t	size,
400     size_t	offset)
401 {
402 	rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
403 	mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
404 	list_create(&ll->ll_list, size, offset);
405 	list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
406 	    offsetof(smb_dtor_t, dt_lnd));
407 	ll->ll_count = 0;
408 	ll->ll_wrop = 0;
409 	ll->ll_deleteq_count = 0;
410 	ll->ll_flushing = B_FALSE;
411 }
412 
413 /*
414  * Flush the delete queue and destroy a locked list.
415  */
416 void
417 smb_llist_destructor(
418     smb_llist_t	*ll)
419 {
420 	smb_llist_flush(ll);
421 
422 	ASSERT(ll->ll_count == 0);
423 	ASSERT(ll->ll_deleteq_count == 0);
424 
425 	rw_destroy(&ll->ll_lock);
426 	list_destroy(&ll->ll_list);
427 	list_destroy(&ll->ll_deleteq);
428 	mutex_destroy(&ll->ll_mutex);
429 }
430 
431 /*
432  * Post an object to the delete queue.  The delete queue will be processed
433  * during list exit or list destruction.  Objects are often posted for
434  * deletion during list iteration (while the list is locked) but that is
435  * not required, and an object can be posted at any time.
436  */
437 void
438 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
439 {
440 	smb_dtor_t	*dtor;
441 
442 	ASSERT((object != NULL) && (dtorproc != NULL));
443 
444 	dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
445 	bzero(dtor, sizeof (smb_dtor_t));
446 	dtor->dt_magic = SMB_DTOR_MAGIC;
447 	dtor->dt_object = object;
448 	dtor->dt_proc = dtorproc;
449 
450 	mutex_enter(&ll->ll_mutex);
451 	list_insert_tail(&ll->ll_deleteq, dtor);
452 	++ll->ll_deleteq_count;
453 	mutex_exit(&ll->ll_mutex);
454 }
455 
456 /*
457  * Exit the list lock and process the delete queue.
458  */
459 void
460 smb_llist_exit(smb_llist_t *ll)
461 {
462 	rw_exit(&ll->ll_lock);
463 	smb_llist_flush(ll);
464 }
465 
466 /*
467  * Flush the list delete queue.  The mutex is dropped across the destructor
468  * call in case this leads to additional objects being posted to the delete
469  * queue.
470  */
471 void
472 smb_llist_flush(smb_llist_t *ll)
473 {
474 	smb_dtor_t    *dtor;
475 
476 	mutex_enter(&ll->ll_mutex);
477 	if (ll->ll_flushing) {
478 		mutex_exit(&ll->ll_mutex);
479 		return;
480 	}
481 	ll->ll_flushing = B_TRUE;
482 
483 	dtor = list_head(&ll->ll_deleteq);
484 	while (dtor != NULL) {
485 		SMB_DTOR_VALID(dtor);
486 		ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
487 		list_remove(&ll->ll_deleteq, dtor);
488 		--ll->ll_deleteq_count;
489 		mutex_exit(&ll->ll_mutex);
490 
491 		dtor->dt_proc(dtor->dt_object);
492 
493 		dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
494 		kmem_cache_free(smb_dtor_cache, dtor);
495 		mutex_enter(&ll->ll_mutex);
496 		dtor = list_head(&ll->ll_deleteq);
497 	}
498 	ll->ll_flushing = B_FALSE;
499 
500 	mutex_exit(&ll->ll_mutex);
501 }
502 
503 /*
504  * smb_llist_upgrade
505  *
506  * This function tries to upgrade the lock of the locked list. It assumes the
507  * locked has already been entered in RW_READER mode. It first tries using the
508  * Solaris function rw_tryupgrade(). If that call fails the lock is released
509  * and reentered in RW_WRITER mode. In that last case a window is opened during
510  * which the contents of the list may have changed. The return code indicates
511  * whether or not the list was modified when the lock was exited.
512  */
513 int smb_llist_upgrade(
514     smb_llist_t *ll)
515 {
516 	uint64_t	wrop;
517 
518 	if (rw_tryupgrade(&ll->ll_lock) != 0) {
519 		return (0);
520 	}
521 	wrop = ll->ll_wrop;
522 	rw_exit(&ll->ll_lock);
523 	rw_enter(&ll->ll_lock, RW_WRITER);
524 	return (wrop != ll->ll_wrop);
525 }
526 
527 /*
528  * smb_llist_insert_head
529  *
530  * This function inserts the object passed a the beginning of the list. This
531  * function assumes the lock of the list has already been entered.
532  */
533 void
534 smb_llist_insert_head(
535     smb_llist_t	*ll,
536     void	*obj)
537 {
538 	list_insert_head(&ll->ll_list, obj);
539 	++ll->ll_wrop;
540 	++ll->ll_count;
541 }
542 
543 /*
544  * smb_llist_insert_tail
545  *
546  * This function appends to the object passed to the list. This function assumes
547  * the lock of the list has already been entered.
548  *
549  */
550 void
551 smb_llist_insert_tail(
552     smb_llist_t	*ll,
553     void	*obj)
554 {
555 	list_insert_tail(&ll->ll_list, obj);
556 	++ll->ll_wrop;
557 	++ll->ll_count;
558 }
559 
560 /*
561  * smb_llist_remove
562  *
563  * This function removes the object passed from the list. This function assumes
564  * the lock of the list has already been entered.
565  */
566 void
567 smb_llist_remove(
568     smb_llist_t	*ll,
569     void	*obj)
570 {
571 	list_remove(&ll->ll_list, obj);
572 	++ll->ll_wrop;
573 	--ll->ll_count;
574 }
575 
576 /*
577  * smb_llist_get_count
578  *
579  * This function returns the number of elements in the specified list.
580  */
581 uint32_t
582 smb_llist_get_count(
583     smb_llist_t *ll)
584 {
585 	return (ll->ll_count);
586 }
587 
588 /*
589  * smb_slist_constructor
590  *
591  * Synchronized list constructor.
592  */
593 void
594 smb_slist_constructor(
595     smb_slist_t	*sl,
596     size_t	size,
597     size_t	offset)
598 {
599 	mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
600 	cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
601 	list_create(&sl->sl_list, size, offset);
602 	sl->sl_count = 0;
603 	sl->sl_waiting = B_FALSE;
604 }
605 
606 /*
607  * smb_slist_destructor
608  *
609  * Synchronized list destructor.
610  */
611 void
612 smb_slist_destructor(
613     smb_slist_t	*sl)
614 {
615 	VERIFY(sl->sl_count == 0);
616 
617 	mutex_destroy(&sl->sl_mutex);
618 	cv_destroy(&sl->sl_cv);
619 	list_destroy(&sl->sl_list);
620 }
621 
622 /*
623  * smb_slist_insert_head
624  *
625  * This function inserts the object passed a the beginning of the list.
626  */
627 void
628 smb_slist_insert_head(
629     smb_slist_t	*sl,
630     void	*obj)
631 {
632 	mutex_enter(&sl->sl_mutex);
633 	list_insert_head(&sl->sl_list, obj);
634 	++sl->sl_count;
635 	mutex_exit(&sl->sl_mutex);
636 }
637 
638 /*
639  * smb_slist_insert_tail
640  *
641  * This function appends the object passed to the list.
642  */
643 void
644 smb_slist_insert_tail(
645     smb_slist_t	*sl,
646     void	*obj)
647 {
648 	mutex_enter(&sl->sl_mutex);
649 	list_insert_tail(&sl->sl_list, obj);
650 	++sl->sl_count;
651 	mutex_exit(&sl->sl_mutex);
652 }
653 
654 /*
655  * smb_llist_remove
656  *
657  * This function removes the object passed by the caller from the list.
658  */
659 void
660 smb_slist_remove(
661     smb_slist_t	*sl,
662     void	*obj)
663 {
664 	mutex_enter(&sl->sl_mutex);
665 	list_remove(&sl->sl_list, obj);
666 	if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
667 		sl->sl_waiting = B_FALSE;
668 		cv_broadcast(&sl->sl_cv);
669 	}
670 	mutex_exit(&sl->sl_mutex);
671 }
672 
673 /*
674  * smb_slist_move_tail
675  *
676  * This function transfers all the contents of the synchronized list to the
677  * list_t provided. It returns the number of objects transferred.
678  */
679 uint32_t
680 smb_slist_move_tail(
681     list_t	*lst,
682     smb_slist_t	*sl)
683 {
684 	uint32_t	rv;
685 
686 	mutex_enter(&sl->sl_mutex);
687 	rv = sl->sl_count;
688 	if (sl->sl_count) {
689 		list_move_tail(lst, &sl->sl_list);
690 		sl->sl_count = 0;
691 		if (sl->sl_waiting) {
692 			sl->sl_waiting = B_FALSE;
693 			cv_broadcast(&sl->sl_cv);
694 		}
695 	}
696 	mutex_exit(&sl->sl_mutex);
697 	return (rv);
698 }
699 
700 /*
701  * smb_slist_obj_move
702  *
703  * This function moves an object from one list to the end of the other list. It
704  * assumes the mutex of each list has been entered.
705  */
706 void
707 smb_slist_obj_move(
708     smb_slist_t	*dst,
709     smb_slist_t	*src,
710     void	*obj)
711 {
712 	ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
713 	ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
714 
715 	list_remove(&src->sl_list, obj);
716 	list_insert_tail(&dst->sl_list, obj);
717 	dst->sl_count++;
718 	src->sl_count--;
719 	if ((src->sl_count == 0) && (src->sl_waiting)) {
720 		src->sl_waiting = B_FALSE;
721 		cv_broadcast(&src->sl_cv);
722 	}
723 }
724 
725 /*
726  * smb_slist_wait_for_empty
727  *
728  * This function waits for a list to be emptied.
729  */
730 void
731 smb_slist_wait_for_empty(
732     smb_slist_t	*sl)
733 {
734 	mutex_enter(&sl->sl_mutex);
735 	while (sl->sl_count) {
736 		sl->sl_waiting = B_TRUE;
737 		cv_wait(&sl->sl_cv, &sl->sl_mutex);
738 	}
739 	mutex_exit(&sl->sl_mutex);
740 }
741 
742 /*
743  * smb_slist_exit
744  *
745  * This function exits the muetx of the list and signal the condition variable
746  * if the list is empty.
747  */
748 void
749 smb_slist_exit(smb_slist_t *sl)
750 {
751 	if ((sl->sl_count == 0) && (sl->sl_waiting)) {
752 		sl->sl_waiting = B_FALSE;
753 		cv_broadcast(&sl->sl_cv);
754 	}
755 	mutex_exit(&sl->sl_mutex);
756 }
757 
758 /* smb_thread_... moved to smb_thread.c */
759 
760 /*
761  * smb_rwx_init
762  */
763 void
764 smb_rwx_init(
765     smb_rwx_t	*rwx)
766 {
767 	bzero(rwx, sizeof (smb_rwx_t));
768 	cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
769 	mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
770 	rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
771 }
772 
773 /*
774  * smb_rwx_destroy
775  */
776 void
777 smb_rwx_destroy(
778     smb_rwx_t	*rwx)
779 {
780 	mutex_destroy(&rwx->rwx_mutex);
781 	cv_destroy(&rwx->rwx_cv);
782 	rw_destroy(&rwx->rwx_lock);
783 }
784 
785 /*
786  * smb_rwx_rwexit
787  */
788 void
789 smb_rwx_rwexit(
790     smb_rwx_t	*rwx)
791 {
792 	if (rw_write_held(&rwx->rwx_lock)) {
793 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
794 		mutex_enter(&rwx->rwx_mutex);
795 		if (rwx->rwx_waiting) {
796 			rwx->rwx_waiting = B_FALSE;
797 			cv_broadcast(&rwx->rwx_cv);
798 		}
799 		mutex_exit(&rwx->rwx_mutex);
800 	}
801 	rw_exit(&rwx->rwx_lock);
802 }
803 
804 /*
805  * smb_rwx_rwupgrade
806  */
807 krw_t
808 smb_rwx_rwupgrade(
809     smb_rwx_t	*rwx)
810 {
811 	if (rw_write_held(&rwx->rwx_lock)) {
812 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
813 		return (RW_WRITER);
814 	}
815 	if (!rw_tryupgrade(&rwx->rwx_lock)) {
816 		rw_exit(&rwx->rwx_lock);
817 		rw_enter(&rwx->rwx_lock, RW_WRITER);
818 	}
819 	return (RW_READER);
820 }
821 
822 /*
823  * smb_rwx_rwrestore
824  */
825 void
826 smb_rwx_rwdowngrade(
827     smb_rwx_t	*rwx,
828     krw_t	mode)
829 {
830 	ASSERT(rw_write_held(&rwx->rwx_lock));
831 	ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
832 
833 	if (mode == RW_WRITER) {
834 		return;
835 	}
836 	ASSERT(mode == RW_READER);
837 	mutex_enter(&rwx->rwx_mutex);
838 	if (rwx->rwx_waiting) {
839 		rwx->rwx_waiting = B_FALSE;
840 		cv_broadcast(&rwx->rwx_cv);
841 	}
842 	mutex_exit(&rwx->rwx_mutex);
843 	rw_downgrade(&rwx->rwx_lock);
844 }
845 
846 /*
847  * smb_rwx_wait
848  *
849  * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER
850  * mode. It will:
851  *
852  *	1) release the lock and save its current mode.
853  *	2) wait until the condition variable is signaled. This can happen for
854  *	   2 reasons: When a writer releases the lock or when the time out (if
855  *	   provided) expires.
856  *	3) re-acquire the lock in the mode saved in (1).
857  */
858 int
859 smb_rwx_rwwait(
860     smb_rwx_t	*rwx,
861     clock_t	timeout)
862 {
863 	krw_t	mode;
864 	int	rc = 1;
865 
866 	mutex_enter(&rwx->rwx_mutex);
867 	rwx->rwx_waiting = B_TRUE;
868 	mutex_exit(&rwx->rwx_mutex);
869 
870 	if (rw_write_held(&rwx->rwx_lock)) {
871 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
872 		mode = RW_WRITER;
873 	} else {
874 		ASSERT(rw_read_held(&rwx->rwx_lock));
875 		mode = RW_READER;
876 	}
877 	rw_exit(&rwx->rwx_lock);
878 
879 	mutex_enter(&rwx->rwx_mutex);
880 	if (rwx->rwx_waiting) {
881 		if (timeout == -1) {
882 			cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
883 		} else {
884 			rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
885 			    timeout, TR_CLOCK_TICK);
886 		}
887 	}
888 	mutex_exit(&rwx->rwx_mutex);
889 
890 	rw_enter(&rwx->rwx_lock, mode);
891 	return (rc);
892 }
893 
894 /* smb_idmap_... moved to smb_idmap.c */
895 
896 uint64_t
897 smb_time_unix_to_nt(timestruc_t *unix_time)
898 {
899 	uint64_t nt_time;
900 
901 	if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
902 		return (0);
903 
904 	nt_time = unix_time->tv_sec;
905 	nt_time *= 10000000;  /* seconds to 100ns */
906 	nt_time += unix_time->tv_nsec / 100;
907 	return (nt_time + NT_TIME_BIAS);
908 }
909 
910 void
911 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
912 {
913 	uint32_t seconds;
914 
915 	ASSERT(unix_time);
916 
917 	if ((nt_time == 0) || (nt_time == -1)) {
918 		unix_time->tv_sec = 0;
919 		unix_time->tv_nsec = 0;
920 		return;
921 	}
922 
923 	/*
924 	 * Can't represent times less than or equal NT_TIME_BIAS,
925 	 * so convert them to the oldest date we can store.
926 	 * Note that time zero is "special" being converted
927 	 * both directions as 0:0 (unix-to-nt, nt-to-unix).
928 	 */
929 	if (nt_time <= NT_TIME_BIAS) {
930 		unix_time->tv_sec = 0;
931 		unix_time->tv_nsec = 100;
932 		return;
933 	}
934 
935 	nt_time -= NT_TIME_BIAS;
936 	seconds = nt_time / 10000000;
937 	unix_time->tv_sec = seconds;
938 	unix_time->tv_nsec = (nt_time  % 10000000) * 100;
939 }
940 
941 /*
942  * smb_time_gmt_to_local, smb_time_local_to_gmt
943  *
944  * Apply the gmt offset to convert between local time and gmt
945  */
946 int32_t
947 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
948 {
949 	if ((gmt == 0) || (gmt == -1))
950 		return (0);
951 
952 	return (gmt - sr->sr_gmtoff);
953 }
954 
955 int32_t
956 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
957 {
958 	if ((local == 0) || (local == -1))
959 		return (0);
960 
961 	return (local + sr->sr_gmtoff);
962 }
963 
964 
965 /*
966  * smb_time_dos_to_unix
967  *
968  * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
969  *
970  * A date/time field of 0 means that that server file system
971  * assigned value need not be changed. The behaviour when the
972  * date/time field is set to -1 is not documented but is
973  * generally treated like 0.
974  * If date or time is 0 or -1 the unix time is returned as 0
975  * so that the caller can identify and handle this special case.
976  */
977 int32_t
978 smb_time_dos_to_unix(int16_t date, int16_t time)
979 {
980 	struct tm	atm;
981 
982 	if (((date == 0) || (time == 0)) ||
983 	    ((date == -1) || (time == -1))) {
984 		return (0);
985 	}
986 
987 	atm.tm_year = ((date >>  9) & 0x3F) + 80;
988 	atm.tm_mon  = ((date >>  5) & 0x0F) - 1;
989 	atm.tm_mday = ((date >>  0) & 0x1F);
990 	atm.tm_hour = ((time >> 11) & 0x1F);
991 	atm.tm_min  = ((time >>  5) & 0x3F);
992 	atm.tm_sec  = ((time >>  0) & 0x1F) << 1;
993 
994 	return (smb_timegm(&atm));
995 }
996 
997 void
998 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
999 {
1000 	struct tm	atm;
1001 	int		i;
1002 	time_t		tmp_time;
1003 
1004 	if (ux_time == 0) {
1005 		*date_p = 0;
1006 		*time_p = 0;
1007 		return;
1008 	}
1009 
1010 	tmp_time = (time_t)ux_time;
1011 	(void) smb_gmtime_r(&tmp_time, &atm);
1012 
1013 	if (date_p) {
1014 		i = 0;
1015 		i += atm.tm_year - 80;
1016 		i <<= 4;
1017 		i += atm.tm_mon + 1;
1018 		i <<= 5;
1019 		i += atm.tm_mday;
1020 
1021 		*date_p = (short)i;
1022 	}
1023 	if (time_p) {
1024 		i = 0;
1025 		i += atm.tm_hour;
1026 		i <<= 6;
1027 		i += atm.tm_min;
1028 		i <<= 5;
1029 		i += atm.tm_sec >> 1;
1030 
1031 		*time_p = (short)i;
1032 	}
1033 }
1034 
1035 
1036 /*
1037  * smb_gmtime_r
1038  *
1039  * Thread-safe version of smb_gmtime. Returns a null pointer if either
1040  * input parameter is a null pointer. Otherwise returns a pointer
1041  * to result.
1042  *
1043  * Day of the week calculation: the Epoch was a thursday.
1044  *
1045  * There are no timezone corrections so tm_isdst and tm_gmtoff are
1046  * always zero, and the zone is always WET.
1047  */
1048 struct tm *
1049 smb_gmtime_r(time_t *clock, struct tm *result)
1050 {
1051 	time_t tsec;
1052 	int year;
1053 	int month;
1054 	int sec_per_month;
1055 
1056 	if (clock == 0 || result == 0)
1057 		return (0);
1058 
1059 	bzero(result, sizeof (struct tm));
1060 	tsec = *clock;
1061 	tsec -= tzh_leapcnt;
1062 
1063 	result->tm_wday = tsec / SECSPERDAY;
1064 	result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1065 
1066 	year = EPOCH_YEAR;
1067 	while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1068 	    (SECSPERDAY * DAYSPERNYEAR))) {
1069 		if (isleap(year))
1070 			tsec -= SECSPERDAY * DAYSPERLYEAR;
1071 		else
1072 			tsec -= SECSPERDAY * DAYSPERNYEAR;
1073 
1074 		++year;
1075 	}
1076 
1077 	result->tm_year = year - TM_YEAR_BASE;
1078 	result->tm_yday = tsec / SECSPERDAY;
1079 
1080 	for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1081 		sec_per_month = days_in_month[month] * SECSPERDAY;
1082 
1083 		if (month == TM_FEBRUARY && isleap(year))
1084 			sec_per_month += SECSPERDAY;
1085 
1086 		if (tsec < sec_per_month)
1087 			break;
1088 
1089 		tsec -= sec_per_month;
1090 	}
1091 
1092 	result->tm_mon = month;
1093 	result->tm_mday = (tsec / SECSPERDAY) + 1;
1094 	tsec %= SECSPERDAY;
1095 	result->tm_sec = tsec % 60;
1096 	tsec /= 60;
1097 	result->tm_min = tsec % 60;
1098 	tsec /= 60;
1099 	result->tm_hour = (int)tsec;
1100 
1101 	return (result);
1102 }
1103 
1104 
1105 /*
1106  * smb_timegm
1107  *
1108  * Converts the broken-down time in tm to a time value, i.e. the number
1109  * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1110  * not a POSIX or ANSI function. Per the man page, the input values of
1111  * tm_wday and tm_yday are ignored and, as the input data is assumed to
1112  * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1113  *
1114  * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1115  * and tm_yday, and bring the other fields within normal range. I don't
1116  * think this is really how it should be done but it's convenient for
1117  * now.
1118  */
1119 time_t
1120 smb_timegm(struct tm *tm)
1121 {
1122 	time_t tsec;
1123 	int dd;
1124 	int mm;
1125 	int yy;
1126 	int year;
1127 
1128 	if (tm == 0)
1129 		return (-1);
1130 
1131 	year = tm->tm_year + TM_YEAR_BASE;
1132 	tsec = tzh_leapcnt;
1133 
1134 	for (yy = EPOCH_YEAR; yy < year; ++yy) {
1135 		if (isleap(yy))
1136 			tsec += SECSPERDAY * DAYSPERLYEAR;
1137 		else
1138 			tsec += SECSPERDAY * DAYSPERNYEAR;
1139 	}
1140 
1141 	for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1142 		dd = days_in_month[mm] * SECSPERDAY;
1143 
1144 		if (mm == TM_FEBRUARY && isleap(year))
1145 			dd += SECSPERDAY;
1146 
1147 		tsec += dd;
1148 	}
1149 
1150 	tsec += (tm->tm_mday - 1) * SECSPERDAY;
1151 	tsec += tm->tm_sec;
1152 	tsec += tm->tm_min * SECSPERMIN;
1153 	tsec += tm->tm_hour * SECSPERHOUR;
1154 
1155 	tm->tm_isdst = 0;
1156 	(void) smb_gmtime_r(&tsec, tm);
1157 	return (tsec);
1158 }
1159 
1160 /*
1161  * smb_pad_align
1162  *
1163  * Returns the number of bytes required to pad an offset to the
1164  * specified alignment.
1165  */
1166 uint32_t
1167 smb_pad_align(uint32_t offset, uint32_t align)
1168 {
1169 	uint32_t pad = offset % align;
1170 
1171 	if (pad != 0)
1172 		pad = align - pad;
1173 
1174 	return (pad);
1175 }
1176 
1177 /*
1178  * smb_panic
1179  *
1180  * Logs the file name, function name and line number passed in and panics the
1181  * system.
1182  */
1183 void
1184 smb_panic(char *file, const char *func, int line)
1185 {
1186 	cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
1187 }
1188 
1189 /*
1190  * Creates an AVL tree and initializes the given smb_avl_t
1191  * structure using the passed args
1192  */
1193 void
1194 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset,
1195 	const smb_avl_nops_t *ops)
1196 {
1197 	ASSERT(avl);
1198 	ASSERT(ops);
1199 
1200 	rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL);
1201 	mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL);
1202 
1203 	avl->avl_nops = ops;
1204 	avl->avl_state = SMB_AVL_STATE_READY;
1205 	avl->avl_refcnt = 0;
1206 	(void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence,
1207 	    sizeof (uint32_t));
1208 
1209 	avl_create(&avl->avl_tree, ops->avln_cmp, size, offset);
1210 }
1211 
1212 /*
1213  * Destroys the specified AVL tree.
1214  * It waits for all the in-flight operations to finish
1215  * before destroying the AVL.
1216  */
1217 void
1218 smb_avl_destroy(smb_avl_t *avl)
1219 {
1220 	void *cookie = NULL;
1221 	void *node;
1222 
1223 	ASSERT(avl);
1224 
1225 	mutex_enter(&avl->avl_mutex);
1226 	if (avl->avl_state != SMB_AVL_STATE_READY) {
1227 		mutex_exit(&avl->avl_mutex);
1228 		return;
1229 	}
1230 
1231 	avl->avl_state = SMB_AVL_STATE_DESTROYING;
1232 
1233 	while (avl->avl_refcnt > 0)
1234 		(void) cv_wait(&avl->avl_cv, &avl->avl_mutex);
1235 	mutex_exit(&avl->avl_mutex);
1236 
1237 	rw_enter(&avl->avl_lock, RW_WRITER);
1238 	while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL)
1239 		avl->avl_nops->avln_destroy(node);
1240 
1241 	avl_destroy(&avl->avl_tree);
1242 	rw_exit(&avl->avl_lock);
1243 
1244 	rw_destroy(&avl->avl_lock);
1245 
1246 	mutex_destroy(&avl->avl_mutex);
1247 	bzero(avl, sizeof (smb_avl_t));
1248 }
1249 
1250 /*
1251  * Adds the given item to the AVL if it's
1252  * not already there.
1253  *
1254  * Returns:
1255  *
1256  * 	ENOTACTIVE	AVL is not in READY state
1257  * 	EEXIST		The item is already in AVL
1258  */
1259 int
1260 smb_avl_add(smb_avl_t *avl, void *item)
1261 {
1262 	avl_index_t where;
1263 
1264 	ASSERT(avl);
1265 	ASSERT(item);
1266 
1267 	if (!smb_avl_hold(avl))
1268 		return (ENOTACTIVE);
1269 
1270 	rw_enter(&avl->avl_lock, RW_WRITER);
1271 	if (avl_find(&avl->avl_tree, item, &where) != NULL) {
1272 		rw_exit(&avl->avl_lock);
1273 		smb_avl_rele(avl);
1274 		return (EEXIST);
1275 	}
1276 
1277 	avl_insert(&avl->avl_tree, item, where);
1278 	avl->avl_sequence++;
1279 	rw_exit(&avl->avl_lock);
1280 
1281 	smb_avl_rele(avl);
1282 	return (0);
1283 }
1284 
1285 /*
1286  * Removes the given item from the AVL.
1287  * If no reference is left on the item
1288  * it will also be destroyed by calling the
1289  * registered destroy operation.
1290  */
1291 void
1292 smb_avl_remove(smb_avl_t *avl, void *item)
1293 {
1294 	avl_index_t where;
1295 	void *rm_item;
1296 
1297 	ASSERT(avl);
1298 	ASSERT(item);
1299 
1300 	if (!smb_avl_hold(avl))
1301 		return;
1302 
1303 	rw_enter(&avl->avl_lock, RW_WRITER);
1304 	if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) {
1305 		rw_exit(&avl->avl_lock);
1306 		smb_avl_rele(avl);
1307 		return;
1308 	}
1309 
1310 	avl_remove(&avl->avl_tree, rm_item);
1311 	if (avl->avl_nops->avln_rele(rm_item))
1312 		avl->avl_nops->avln_destroy(rm_item);
1313 	avl->avl_sequence++;
1314 	rw_exit(&avl->avl_lock);
1315 
1316 	smb_avl_rele(avl);
1317 }
1318 
1319 /*
1320  * Looks up the AVL for the given item.
1321  * If the item is found a hold on the object
1322  * is taken before the pointer to it is
1323  * returned to the caller. The caller MUST
1324  * always call smb_avl_release() after it's done
1325  * using the returned object to release the hold
1326  * taken on the object.
1327  */
1328 void *
1329 smb_avl_lookup(smb_avl_t *avl, void *item)
1330 {
1331 	void *node = NULL;
1332 
1333 	ASSERT(avl);
1334 	ASSERT(item);
1335 
1336 	if (!smb_avl_hold(avl))
1337 		return (NULL);
1338 
1339 	rw_enter(&avl->avl_lock, RW_READER);
1340 	node = avl_find(&avl->avl_tree, item, NULL);
1341 	if (node != NULL)
1342 		avl->avl_nops->avln_hold(node);
1343 	rw_exit(&avl->avl_lock);
1344 
1345 	if (node == NULL)
1346 		smb_avl_rele(avl);
1347 
1348 	return (node);
1349 }
1350 
1351 /*
1352  * The hold on the given object is released.
1353  * This function MUST always be called after
1354  * smb_avl_lookup() and smb_avl_iterate() for
1355  * the returned object.
1356  *
1357  * If AVL is in DESTROYING state, the destroying
1358  * thread will be notified.
1359  */
1360 void
1361 smb_avl_release(smb_avl_t *avl, void *item)
1362 {
1363 	ASSERT(avl);
1364 	ASSERT(item);
1365 
1366 	if (avl->avl_nops->avln_rele(item))
1367 		avl->avl_nops->avln_destroy(item);
1368 
1369 	smb_avl_rele(avl);
1370 }
1371 
1372 /*
1373  * Initializes the given cursor for the AVL.
1374  * The cursor will be used to iterate through the AVL
1375  */
1376 void
1377 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1378 {
1379 	ASSERT(avl);
1380 	ASSERT(cursor);
1381 
1382 	cursor->avlc_next = NULL;
1383 	cursor->avlc_sequence = avl->avl_sequence;
1384 }
1385 
1386 /*
1387  * Iterates through the AVL using the given cursor.
1388  * It always starts at the beginning and then returns
1389  * a pointer to the next object on each subsequent call.
1390  *
1391  * If a new object is added to or removed from the AVL
1392  * between two calls to this function, the iteration
1393  * will terminate prematurely.
1394  *
1395  * The caller MUST always call smb_avl_release() after it's
1396  * done using the returned object to release the hold taken
1397  * on the object.
1398  */
1399 void *
1400 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1401 {
1402 	void *node;
1403 
1404 	ASSERT(avl);
1405 	ASSERT(cursor);
1406 
1407 	if (!smb_avl_hold(avl))
1408 		return (NULL);
1409 
1410 	rw_enter(&avl->avl_lock, RW_READER);
1411 	if (cursor->avlc_sequence != avl->avl_sequence) {
1412 		rw_exit(&avl->avl_lock);
1413 		smb_avl_rele(avl);
1414 		return (NULL);
1415 	}
1416 
1417 	if (cursor->avlc_next == NULL)
1418 		node = avl_first(&avl->avl_tree);
1419 	else
1420 		node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next);
1421 
1422 	if (node != NULL)
1423 		avl->avl_nops->avln_hold(node);
1424 
1425 	cursor->avlc_next = node;
1426 	rw_exit(&avl->avl_lock);
1427 
1428 	if (node == NULL)
1429 		smb_avl_rele(avl);
1430 
1431 	return (node);
1432 }
1433 
1434 /*
1435  * Increments the AVL reference count in order to
1436  * prevent the avl from being destroyed while it's
1437  * being accessed.
1438  */
1439 static boolean_t
1440 smb_avl_hold(smb_avl_t *avl)
1441 {
1442 	mutex_enter(&avl->avl_mutex);
1443 	if (avl->avl_state != SMB_AVL_STATE_READY) {
1444 		mutex_exit(&avl->avl_mutex);
1445 		return (B_FALSE);
1446 	}
1447 	avl->avl_refcnt++;
1448 	mutex_exit(&avl->avl_mutex);
1449 
1450 	return (B_TRUE);
1451 }
1452 
1453 /*
1454  * Decrements the AVL reference count to release the
1455  * hold. If another thread is trying to destroy the
1456  * AVL and is waiting for the reference count to become
1457  * 0, it is signaled to wake up.
1458  */
1459 static void
1460 smb_avl_rele(smb_avl_t *avl)
1461 {
1462 	mutex_enter(&avl->avl_mutex);
1463 	ASSERT(avl->avl_refcnt > 0);
1464 	avl->avl_refcnt--;
1465 	if (avl->avl_state == SMB_AVL_STATE_DESTROYING)
1466 		cv_broadcast(&avl->avl_cv);
1467 	mutex_exit(&avl->avl_mutex);
1468 }
1469 
1470 /*
1471  * smb_latency_init
1472  */
1473 void
1474 smb_latency_init(smb_latency_t *lat)
1475 {
1476 	bzero(lat, sizeof (*lat));
1477 	mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1478 }
1479 
1480 /*
1481  * smb_latency_destroy
1482  */
1483 void
1484 smb_latency_destroy(smb_latency_t *lat)
1485 {
1486 	mutex_destroy(&lat->ly_mutex);
1487 }
1488 
1489 /*
1490  * smb_latency_add_sample
1491  *
1492  * Uses the new sample to calculate the new mean and standard deviation. The
1493  * sample must be a scaled value.
1494  */
1495 void
1496 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample)
1497 {
1498 	hrtime_t	a_mean;
1499 	hrtime_t	d_mean;
1500 
1501 	mutex_enter(&lat->ly_mutex);
1502 	lat->ly_a_nreq++;
1503 	lat->ly_a_sum += sample;
1504 	if (lat->ly_a_nreq != 0) {
1505 		a_mean = lat->ly_a_sum / lat->ly_a_nreq;
1506 		lat->ly_a_stddev =
1507 		    (sample - a_mean) * (sample - lat->ly_a_mean);
1508 		lat->ly_a_mean = a_mean;
1509 	}
1510 	lat->ly_d_nreq++;
1511 	lat->ly_d_sum += sample;
1512 	if (lat->ly_d_nreq != 0) {
1513 		d_mean = lat->ly_d_sum / lat->ly_d_nreq;
1514 		lat->ly_d_stddev =
1515 		    (sample - d_mean) * (sample - lat->ly_d_mean);
1516 		lat->ly_d_mean = d_mean;
1517 	}
1518 	mutex_exit(&lat->ly_mutex);
1519 }
1520 
1521 /*
1522  * smb_srqueue_init
1523  */
1524 void
1525 smb_srqueue_init(smb_srqueue_t *srq)
1526 {
1527 	bzero(srq, sizeof (*srq));
1528 	mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1529 	srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled();
1530 }
1531 
1532 /*
1533  * smb_srqueue_destroy
1534  */
1535 void
1536 smb_srqueue_destroy(smb_srqueue_t *srq)
1537 {
1538 	mutex_destroy(&srq->srq_mutex);
1539 }
1540 
1541 /*
1542  * smb_srqueue_waitq_enter
1543  */
1544 void
1545 smb_srqueue_waitq_enter(smb_srqueue_t *srq)
1546 {
1547 	hrtime_t	new;
1548 	hrtime_t	delta;
1549 	uint32_t	wcnt;
1550 
1551 	mutex_enter(&srq->srq_mutex);
1552 	new = gethrtime_unscaled();
1553 	delta = new - srq->srq_wlastupdate;
1554 	srq->srq_wlastupdate = new;
1555 	wcnt = srq->srq_wcnt++;
1556 	if (wcnt != 0) {
1557 		srq->srq_wlentime += delta * wcnt;
1558 		srq->srq_wtime += delta;
1559 	}
1560 	mutex_exit(&srq->srq_mutex);
1561 }
1562 
1563 /*
1564  * smb_srqueue_runq_exit
1565  */
1566 void
1567 smb_srqueue_runq_exit(smb_srqueue_t *srq)
1568 {
1569 	hrtime_t	new;
1570 	hrtime_t	delta;
1571 	uint32_t	rcnt;
1572 
1573 	mutex_enter(&srq->srq_mutex);
1574 	new = gethrtime_unscaled();
1575 	delta = new - srq->srq_rlastupdate;
1576 	srq->srq_rlastupdate = new;
1577 	rcnt = srq->srq_rcnt--;
1578 	ASSERT(rcnt > 0);
1579 	srq->srq_rlentime += delta * rcnt;
1580 	srq->srq_rtime += delta;
1581 	mutex_exit(&srq->srq_mutex);
1582 }
1583 
1584 /*
1585  * smb_srqueue_waitq_to_runq
1586  */
1587 void
1588 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq)
1589 {
1590 	hrtime_t	new;
1591 	hrtime_t	delta;
1592 	uint32_t	wcnt;
1593 	uint32_t	rcnt;
1594 
1595 	mutex_enter(&srq->srq_mutex);
1596 	new = gethrtime_unscaled();
1597 	delta = new - srq->srq_wlastupdate;
1598 	srq->srq_wlastupdate = new;
1599 	wcnt = srq->srq_wcnt--;
1600 	ASSERT(wcnt > 0);
1601 	srq->srq_wlentime += delta * wcnt;
1602 	srq->srq_wtime += delta;
1603 	delta = new - srq->srq_rlastupdate;
1604 	srq->srq_rlastupdate = new;
1605 	rcnt = srq->srq_rcnt++;
1606 	if (rcnt != 0) {
1607 		srq->srq_rlentime += delta * rcnt;
1608 		srq->srq_rtime += delta;
1609 	}
1610 	mutex_exit(&srq->srq_mutex);
1611 }
1612 
1613 /*
1614  * smb_srqueue_update
1615  *
1616  * Takes a snapshot of the smb_sr_stat_t structure passed in.
1617  */
1618 void
1619 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd)
1620 {
1621 	hrtime_t	delta;
1622 	hrtime_t	snaptime;
1623 
1624 	mutex_enter(&srq->srq_mutex);
1625 	snaptime = gethrtime_unscaled();
1626 	delta = snaptime - srq->srq_wlastupdate;
1627 	srq->srq_wlastupdate = snaptime;
1628 	if (srq->srq_wcnt != 0) {
1629 		srq->srq_wlentime += delta * srq->srq_wcnt;
1630 		srq->srq_wtime += delta;
1631 	}
1632 	delta = snaptime - srq->srq_rlastupdate;
1633 	srq->srq_rlastupdate = snaptime;
1634 	if (srq->srq_rcnt != 0) {
1635 		srq->srq_rlentime += delta * srq->srq_rcnt;
1636 		srq->srq_rtime += delta;
1637 	}
1638 	kd->ku_rlentime = srq->srq_rlentime;
1639 	kd->ku_rtime = srq->srq_rtime;
1640 	kd->ku_wlentime = srq->srq_wlentime;
1641 	kd->ku_wtime = srq->srq_wtime;
1642 	mutex_exit(&srq->srq_mutex);
1643 	scalehrtime(&kd->ku_rlentime);
1644 	scalehrtime(&kd->ku_rtime);
1645 	scalehrtime(&kd->ku_wlentime);
1646 	scalehrtime(&kd->ku_wtime);
1647 }
1648 
1649 void
1650 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd,
1651     uint_t threshold, uint_t timeout)
1652 {
1653 	bzero(ct, sizeof (smb_cmd_threshold_t));
1654 	mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL);
1655 	cv_init(&ct->ct_cond, NULL, CV_DEFAULT, NULL);
1656 
1657 	ct->ct_cmd = cmd;
1658 	ct->ct_threshold = threshold;
1659 	ct->ct_timeout = timeout;
1660 }
1661 
1662 void
1663 smb_threshold_fini(smb_cmd_threshold_t *ct)
1664 {
1665 	cv_destroy(&ct->ct_cond);
1666 	mutex_destroy(&ct->ct_mutex);
1667 }
1668 
1669 /*
1670  * This threshold mechanism is used to limit the number of simultaneous
1671  * named pipe connections, concurrent authentication conversations, etc.
1672  * Requests that would take us over the threshold wait until either the
1673  * resources are available (return zero) or timeout (return error).
1674  */
1675 int
1676 smb_threshold_enter(smb_cmd_threshold_t *ct)
1677 {
1678 	clock_t	time, rem;
1679 
1680 	time = MSEC_TO_TICK(ct->ct_timeout) + ddi_get_lbolt();
1681 	mutex_enter(&ct->ct_mutex);
1682 
1683 	while (ct->ct_threshold != 0 &&
1684 	    ct->ct_threshold <= ct->ct_active_cnt) {
1685 		ct->ct_blocked_cnt++;
1686 		rem = cv_timedwait(&ct->ct_cond, &ct->ct_mutex, time);
1687 		ct->ct_blocked_cnt--;
1688 		if (rem < 0) {
1689 			mutex_exit(&ct->ct_mutex);
1690 			return (ETIME);
1691 		}
1692 	}
1693 	if (ct->ct_threshold == 0) {
1694 		mutex_exit(&ct->ct_mutex);
1695 		return (ECANCELED);
1696 	}
1697 
1698 	ASSERT3U(ct->ct_active_cnt, <, ct->ct_threshold);
1699 	ct->ct_active_cnt++;
1700 
1701 	mutex_exit(&ct->ct_mutex);
1702 	return (0);
1703 }
1704 
1705 void
1706 smb_threshold_exit(smb_cmd_threshold_t *ct)
1707 {
1708 	mutex_enter(&ct->ct_mutex);
1709 	ASSERT3U(ct->ct_active_cnt, >, 0);
1710 	ct->ct_active_cnt--;
1711 	if (ct->ct_blocked_cnt)
1712 		cv_signal(&ct->ct_cond);
1713 	mutex_exit(&ct->ct_mutex);
1714 }
1715 
1716 void
1717 smb_threshold_wake_all(smb_cmd_threshold_t *ct)
1718 {
1719 	mutex_enter(&ct->ct_mutex);
1720 	ct->ct_threshold = 0;
1721 	cv_broadcast(&ct->ct_cond);
1722 	mutex_exit(&ct->ct_mutex);
1723 }
1724