xref: /titanic_44/usr/src/uts/common/fs/smbsrv/smb_kutil.c (revision c3e9074d863038c38dc15c3af85b017f42133816)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/param.h>
26 #include <sys/types.h>
27 #include <sys/tzfile.h>
28 #include <sys/atomic.h>
29 #include <sys/kidmap.h>
30 #include <sys/time.h>
31 #include <sys/spl.h>
32 #include <sys/cpuvar.h>
33 #include <sys/random.h>
34 #include <smbsrv/smb_kproto.h>
35 #include <smbsrv/smb_fsops.h>
36 #include <smbsrv/smbinfo.h>
37 #include <smbsrv/smb_xdr.h>
38 #include <smbsrv/smb_vops.h>
39 #include <smbsrv/smb_idmap.h>
40 
41 #include <sys/sid.h>
42 #include <sys/priv_names.h>
43 
44 static kmem_cache_t	*smb_dtor_cache;
45 static boolean_t	smb_llist_initialized = B_FALSE;
46 
47 static boolean_t smb_thread_continue_timedwait_locked(smb_thread_t *, int);
48 
49 static boolean_t smb_avl_hold(smb_avl_t *);
50 static void smb_avl_rele(smb_avl_t *);
51 
52 time_t tzh_leapcnt = 0;
53 
54 struct tm
55 *smb_gmtime_r(time_t *clock, struct tm *result);
56 
57 time_t
58 smb_timegm(struct tm *tm);
59 
60 struct	tm {
61 	int	tm_sec;
62 	int	tm_min;
63 	int	tm_hour;
64 	int	tm_mday;
65 	int	tm_mon;
66 	int	tm_year;
67 	int	tm_wday;
68 	int	tm_yday;
69 	int	tm_isdst;
70 };
71 
72 static int days_in_month[] = {
73 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
74 };
75 
76 int
77 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
78 {
79 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
80 		return (smb_wcequiv_strlen(str));
81 	return (strlen(str));
82 }
83 
84 int
85 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
86 {
87 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
88 		return (smb_wcequiv_strlen(str) + 2);
89 	return (strlen(str) + 1);
90 }
91 
92 int
93 smb_ascii_or_unicode_null_len(struct smb_request *sr)
94 {
95 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
96 		return (2);
97 	return (1);
98 }
99 
100 /*
101  * Return B_TRUE if pattern contains wildcards
102  */
103 boolean_t
104 smb_contains_wildcards(const char *pattern)
105 {
106 	static const char *wildcards = "*?";
107 
108 	return (strpbrk(pattern, wildcards) != NULL);
109 }
110 
111 /*
112  * When converting wildcards a '.' in a name is treated as a base and
113  * extension separator even if the name is longer than 8.3.
114  *
115  * The '*' character matches an entire part of the name.  For example,
116  * "*.abc" matches any name with an extension of "abc".
117  *
118  * The '?' character matches a single character.
119  * If the base contains all ? (8 or more) then it is treated as *.
120  * If the extension contains all ? (3 or more) then it is treated as *.
121  *
122  * Clients convert ASCII wildcards to Unicode wildcards as follows:
123  *
124  *	? is converted to >
125  *	. is converted to " if it is followed by ? or *
126  *	* is converted to < if it is followed by .
127  *
128  * Note that clients convert "*." to '< and drop the '.' but "*.txt"
129  * is sent as "<.TXT", i.e.
130  *
131  * 	dir *.		->	dir <
132  * 	dir *.txt	->	dir <.TXT
133  *
134  * Since " and < are illegal in Windows file names, we always convert
135  * these Unicode wildcards without checking the following character.
136  */
137 void
138 smb_convert_wildcards(char *pattern)
139 {
140 	static char *match_all[] = {
141 		"*.",
142 		"*.*"
143 	};
144 	char	*extension;
145 	char	*p;
146 	int	len;
147 	int	i;
148 
149 	/*
150 	 * Special case "<" for "dir *.", and fast-track for "*".
151 	 */
152 	if ((*pattern == '<') || (*pattern == '*')) {
153 		if (*(pattern + 1) == '\0') {
154 			*pattern = '*';
155 			return;
156 		}
157 	}
158 
159 	for (p = pattern; *p != '\0'; ++p) {
160 		switch (*p) {
161 		case '<':
162 			*p = '*';
163 			break;
164 		case '>':
165 			*p = '?';
166 			break;
167 		case '\"':
168 			*p = '.';
169 			break;
170 		default:
171 			break;
172 		}
173 	}
174 
175 	/*
176 	 * Replace "????????.ext" with "*.ext".
177 	 */
178 	p = pattern;
179 	p += strspn(p, "?");
180 	if (*p == '.') {
181 		*p = '\0';
182 		len = strlen(pattern);
183 		*p = '.';
184 		if (len >= SMB_NAME83_BASELEN) {
185 			*pattern = '*';
186 			(void) strlcpy(pattern + 1, p, MAXPATHLEN - 1);
187 		}
188 	}
189 
190 	/*
191 	 * Replace "base.???" with 'base.*'.
192 	 */
193 	if ((extension = strrchr(pattern, '.')) != NULL) {
194 		p = ++extension;
195 		p += strspn(p, "?");
196 		if (*p == '\0') {
197 			len = strlen(extension);
198 			if (len >= SMB_NAME83_EXTLEN) {
199 				*extension = '\0';
200 				(void) strlcat(pattern, "*", MAXPATHLEN);
201 			}
202 		}
203 	}
204 
205 	/*
206 	 * Replace anything that matches an entry in match_all with "*".
207 	 */
208 	for (i = 0; i < sizeof (match_all) / sizeof (match_all[0]); ++i) {
209 		if (strcmp(pattern, match_all[i]) == 0) {
210 			(void) strlcpy(pattern, "*", MAXPATHLEN);
211 			break;
212 		}
213 	}
214 }
215 
216 /*
217  * smb_sattr_check
218  *
219  * Check file attributes against a search attribute (sattr) mask.
220  *
221  * Normal files, which includes READONLY and ARCHIVE, always pass
222  * this check.  If the DIRECTORY, HIDDEN or SYSTEM special attributes
223  * are set then they must appear in the search mask.  The special
224  * attributes are inclusive, i.e. all special attributes that appear
225  * in sattr must also appear in the file attributes for the check to
226  * pass.
227  *
228  * The following examples show how this works:
229  *
230  *		fileA:	READONLY
231  *		fileB:	0 (no attributes = normal file)
232  *		fileC:	READONLY, ARCHIVE
233  *		fileD:	HIDDEN
234  *		fileE:	READONLY, HIDDEN, SYSTEM
235  *		dirA:	DIRECTORY
236  *
237  * search attribute: 0
238  *		Returns: fileA, fileB and fileC.
239  * search attribute: HIDDEN
240  *		Returns: fileA, fileB, fileC and fileD.
241  * search attribute: SYSTEM
242  *		Returns: fileA, fileB and fileC.
243  * search attribute: DIRECTORY
244  *		Returns: fileA, fileB, fileC and dirA.
245  * search attribute: HIDDEN and SYSTEM
246  *		Returns: fileA, fileB, fileC, fileD and fileE.
247  *
248  * Returns true if the file and sattr match; otherwise, returns false.
249  */
250 boolean_t
251 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
252 {
253 	if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
254 	    !(sattr & FILE_ATTRIBUTE_DIRECTORY))
255 		return (B_FALSE);
256 
257 	if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
258 	    !(sattr & FILE_ATTRIBUTE_HIDDEN))
259 		return (B_FALSE);
260 
261 	if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
262 	    !(sattr & FILE_ATTRIBUTE_SYSTEM))
263 		return (B_FALSE);
264 
265 	return (B_TRUE);
266 }
267 
268 int
269 microtime(timestruc_t *tvp)
270 {
271 	tvp->tv_sec = gethrestime_sec();
272 	tvp->tv_nsec = 0;
273 	return (0);
274 }
275 
276 int32_t
277 clock_get_milli_uptime()
278 {
279 	return (TICK_TO_MSEC(ddi_get_lbolt()));
280 }
281 
282 int /*ARGSUSED*/
283 smb_noop(void *p, size_t size, int foo)
284 {
285 	return (0);
286 }
287 
288 /*
289  * smb_idpool_increment
290  *
291  * This function increments the ID pool by doubling the current size. This
292  * function assumes the caller entered the mutex of the pool.
293  */
294 static int
295 smb_idpool_increment(
296     smb_idpool_t	*pool)
297 {
298 	uint8_t		*new_pool;
299 	uint32_t	new_size;
300 
301 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
302 
303 	new_size = pool->id_size * 2;
304 	if (new_size <= SMB_IDPOOL_MAX_SIZE) {
305 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
306 		if (new_pool) {
307 			bzero(new_pool, new_size / 8);
308 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
309 			kmem_free(pool->id_pool, pool->id_size / 8);
310 			pool->id_pool = new_pool;
311 			pool->id_free_counter += new_size - pool->id_size;
312 			pool->id_max_free_counter += new_size - pool->id_size;
313 			pool->id_size = new_size;
314 			pool->id_idx_msk = (new_size / 8) - 1;
315 			if (new_size >= SMB_IDPOOL_MAX_SIZE) {
316 				/* id -1 made unavailable */
317 				pool->id_pool[pool->id_idx_msk] = 0x80;
318 				pool->id_free_counter--;
319 				pool->id_max_free_counter--;
320 			}
321 			return (0);
322 		}
323 	}
324 	return (-1);
325 }
326 
327 /*
328  * smb_idpool_constructor
329  *
330  * This function initializes the pool structure provided.
331  */
332 int
333 smb_idpool_constructor(
334     smb_idpool_t	*pool)
335 {
336 
337 	ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
338 
339 	pool->id_size = SMB_IDPOOL_MIN_SIZE;
340 	pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
341 	pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
342 	pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
343 	pool->id_bit = 0x02;
344 	pool->id_bit_idx = 1;
345 	pool->id_idx = 0;
346 	pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
347 	    KM_SLEEP);
348 	bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
349 	/* -1 id made unavailable */
350 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
351 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
352 	pool->id_magic = SMB_IDPOOL_MAGIC;
353 	return (0);
354 }
355 
356 /*
357  * smb_idpool_destructor
358  *
359  * This function tears down and frees the resources associated with the
360  * pool provided.
361  */
362 void
363 smb_idpool_destructor(
364     smb_idpool_t	*pool)
365 {
366 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
367 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
368 	pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
369 	mutex_destroy(&pool->id_mutex);
370 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
371 }
372 
373 /*
374  * smb_idpool_alloc
375  *
376  * This function allocates an ID from the pool provided.
377  */
378 int
379 smb_idpool_alloc(
380     smb_idpool_t	*pool,
381     uint16_t		*id)
382 {
383 	uint32_t	i;
384 	uint8_t		bit;
385 	uint8_t		bit_idx;
386 	uint8_t		byte;
387 
388 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
389 
390 	mutex_enter(&pool->id_mutex);
391 	if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
392 		mutex_exit(&pool->id_mutex);
393 		return (-1);
394 	}
395 
396 	i = pool->id_size;
397 	while (i) {
398 		bit = pool->id_bit;
399 		bit_idx = pool->id_bit_idx;
400 		byte = pool->id_pool[pool->id_idx];
401 		while (bit) {
402 			if (byte & bit) {
403 				bit = bit << 1;
404 				bit_idx++;
405 				continue;
406 			}
407 			pool->id_pool[pool->id_idx] |= bit;
408 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
409 			pool->id_free_counter--;
410 			pool->id_bit = bit;
411 			pool->id_bit_idx = bit_idx;
412 			mutex_exit(&pool->id_mutex);
413 			return (0);
414 		}
415 		pool->id_bit = 1;
416 		pool->id_bit_idx = 0;
417 		pool->id_idx++;
418 		pool->id_idx &= pool->id_idx_msk;
419 		--i;
420 	}
421 	/*
422 	 * This section of code shouldn't be reached. If there are IDs
423 	 * available and none could be found there's a problem.
424 	 */
425 	ASSERT(0);
426 	mutex_exit(&pool->id_mutex);
427 	return (-1);
428 }
429 
430 /*
431  * smb_idpool_free
432  *
433  * This function frees the ID provided.
434  */
435 void
436 smb_idpool_free(
437     smb_idpool_t	*pool,
438     uint16_t		id)
439 {
440 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
441 	ASSERT(id != 0);
442 	ASSERT(id != 0xFFFF);
443 
444 	mutex_enter(&pool->id_mutex);
445 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
446 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
447 		pool->id_free_counter++;
448 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
449 		mutex_exit(&pool->id_mutex);
450 		return;
451 	}
452 	/* Freeing a free ID. */
453 	ASSERT(0);
454 	mutex_exit(&pool->id_mutex);
455 }
456 
457 /*
458  * Initialize the llist delete queue object cache.
459  */
460 void
461 smb_llist_init(void)
462 {
463 	if (smb_llist_initialized)
464 		return;
465 
466 	smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
467 	    sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
468 
469 	smb_llist_initialized = B_TRUE;
470 }
471 
472 /*
473  * Destroy the llist delete queue object cache.
474  */
475 void
476 smb_llist_fini(void)
477 {
478 	if (!smb_llist_initialized)
479 		return;
480 
481 	kmem_cache_destroy(smb_dtor_cache);
482 	smb_llist_initialized = B_FALSE;
483 }
484 
485 /*
486  * smb_llist_constructor
487  *
488  * This function initializes a locked list.
489  */
490 void
491 smb_llist_constructor(
492     smb_llist_t	*ll,
493     size_t	size,
494     size_t	offset)
495 {
496 	rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
497 	mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
498 	list_create(&ll->ll_list, size, offset);
499 	list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
500 	    offsetof(smb_dtor_t, dt_lnd));
501 	ll->ll_count = 0;
502 	ll->ll_wrop = 0;
503 	ll->ll_deleteq_count = 0;
504 	ll->ll_flushing = B_FALSE;
505 }
506 
507 /*
508  * Flush the delete queue and destroy a locked list.
509  */
510 void
511 smb_llist_destructor(
512     smb_llist_t	*ll)
513 {
514 	smb_llist_flush(ll);
515 
516 	ASSERT(ll->ll_count == 0);
517 	ASSERT(ll->ll_deleteq_count == 0);
518 
519 	rw_destroy(&ll->ll_lock);
520 	list_destroy(&ll->ll_list);
521 	list_destroy(&ll->ll_deleteq);
522 	mutex_destroy(&ll->ll_mutex);
523 }
524 
525 /*
526  * Post an object to the delete queue.  The delete queue will be processed
527  * during list exit or list destruction.  Objects are often posted for
528  * deletion during list iteration (while the list is locked) but that is
529  * not required, and an object can be posted at any time.
530  */
531 void
532 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
533 {
534 	smb_dtor_t	*dtor;
535 
536 	ASSERT((object != NULL) && (dtorproc != NULL));
537 
538 	dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
539 	bzero(dtor, sizeof (smb_dtor_t));
540 	dtor->dt_magic = SMB_DTOR_MAGIC;
541 	dtor->dt_object = object;
542 	dtor->dt_proc = dtorproc;
543 
544 	mutex_enter(&ll->ll_mutex);
545 	list_insert_tail(&ll->ll_deleteq, dtor);
546 	++ll->ll_deleteq_count;
547 	mutex_exit(&ll->ll_mutex);
548 }
549 
550 /*
551  * Exit the list lock and process the delete queue.
552  */
553 void
554 smb_llist_exit(smb_llist_t *ll)
555 {
556 	rw_exit(&ll->ll_lock);
557 	smb_llist_flush(ll);
558 }
559 
560 /*
561  * Flush the list delete queue.  The mutex is dropped across the destructor
562  * call in case this leads to additional objects being posted to the delete
563  * queue.
564  */
565 void
566 smb_llist_flush(smb_llist_t *ll)
567 {
568 	smb_dtor_t    *dtor;
569 
570 	mutex_enter(&ll->ll_mutex);
571 	if (ll->ll_flushing) {
572 		mutex_exit(&ll->ll_mutex);
573 		return;
574 	}
575 	ll->ll_flushing = B_TRUE;
576 
577 	dtor = list_head(&ll->ll_deleteq);
578 	while (dtor != NULL) {
579 		SMB_DTOR_VALID(dtor);
580 		ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
581 		list_remove(&ll->ll_deleteq, dtor);
582 		--ll->ll_deleteq_count;
583 		mutex_exit(&ll->ll_mutex);
584 
585 		dtor->dt_proc(dtor->dt_object);
586 
587 		dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
588 		kmem_cache_free(smb_dtor_cache, dtor);
589 		mutex_enter(&ll->ll_mutex);
590 		dtor = list_head(&ll->ll_deleteq);
591 	}
592 	ll->ll_flushing = B_FALSE;
593 
594 	mutex_exit(&ll->ll_mutex);
595 }
596 
597 /*
598  * smb_llist_upgrade
599  *
600  * This function tries to upgrade the lock of the locked list. It assumes the
601  * locked has already been entered in RW_READER mode. It first tries using the
602  * Solaris function rw_tryupgrade(). If that call fails the lock is released
603  * and reentered in RW_WRITER mode. In that last case a window is opened during
604  * which the contents of the list may have changed. The return code indicates
605  * whether or not the list was modified when the lock was exited.
606  */
607 int smb_llist_upgrade(
608     smb_llist_t *ll)
609 {
610 	uint64_t	wrop;
611 
612 	if (rw_tryupgrade(&ll->ll_lock) != 0) {
613 		return (0);
614 	}
615 	wrop = ll->ll_wrop;
616 	rw_exit(&ll->ll_lock);
617 	rw_enter(&ll->ll_lock, RW_WRITER);
618 	return (wrop != ll->ll_wrop);
619 }
620 
621 /*
622  * smb_llist_insert_head
623  *
624  * This function inserts the object passed a the beginning of the list. This
625  * function assumes the lock of the list has already been entered.
626  */
627 void
628 smb_llist_insert_head(
629     smb_llist_t	*ll,
630     void	*obj)
631 {
632 	list_insert_head(&ll->ll_list, obj);
633 	++ll->ll_wrop;
634 	++ll->ll_count;
635 }
636 
637 /*
638  * smb_llist_insert_tail
639  *
640  * This function appends to the object passed to the list. This function assumes
641  * the lock of the list has already been entered.
642  *
643  */
644 void
645 smb_llist_insert_tail(
646     smb_llist_t	*ll,
647     void	*obj)
648 {
649 	list_insert_tail(&ll->ll_list, obj);
650 	++ll->ll_wrop;
651 	++ll->ll_count;
652 }
653 
654 /*
655  * smb_llist_remove
656  *
657  * This function removes the object passed from the list. This function assumes
658  * the lock of the list has already been entered.
659  */
660 void
661 smb_llist_remove(
662     smb_llist_t	*ll,
663     void	*obj)
664 {
665 	list_remove(&ll->ll_list, obj);
666 	++ll->ll_wrop;
667 	--ll->ll_count;
668 }
669 
670 /*
671  * smb_llist_get_count
672  *
673  * This function returns the number of elements in the specified list.
674  */
675 uint32_t
676 smb_llist_get_count(
677     smb_llist_t *ll)
678 {
679 	return (ll->ll_count);
680 }
681 
682 /*
683  * smb_slist_constructor
684  *
685  * Synchronized list constructor.
686  */
687 void
688 smb_slist_constructor(
689     smb_slist_t	*sl,
690     size_t	size,
691     size_t	offset)
692 {
693 	mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
694 	cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
695 	list_create(&sl->sl_list, size, offset);
696 	sl->sl_count = 0;
697 	sl->sl_waiting = B_FALSE;
698 }
699 
700 /*
701  * smb_slist_destructor
702  *
703  * Synchronized list destructor.
704  */
705 void
706 smb_slist_destructor(
707     smb_slist_t	*sl)
708 {
709 	VERIFY(sl->sl_count == 0);
710 
711 	mutex_destroy(&sl->sl_mutex);
712 	cv_destroy(&sl->sl_cv);
713 	list_destroy(&sl->sl_list);
714 }
715 
716 /*
717  * smb_slist_insert_head
718  *
719  * This function inserts the object passed a the beginning of the list.
720  */
721 void
722 smb_slist_insert_head(
723     smb_slist_t	*sl,
724     void	*obj)
725 {
726 	mutex_enter(&sl->sl_mutex);
727 	list_insert_head(&sl->sl_list, obj);
728 	++sl->sl_count;
729 	mutex_exit(&sl->sl_mutex);
730 }
731 
732 /*
733  * smb_slist_insert_tail
734  *
735  * This function appends the object passed to the list.
736  */
737 void
738 smb_slist_insert_tail(
739     smb_slist_t	*sl,
740     void	*obj)
741 {
742 	mutex_enter(&sl->sl_mutex);
743 	list_insert_tail(&sl->sl_list, obj);
744 	++sl->sl_count;
745 	mutex_exit(&sl->sl_mutex);
746 }
747 
748 /*
749  * smb_llist_remove
750  *
751  * This function removes the object passed by the caller from the list.
752  */
753 void
754 smb_slist_remove(
755     smb_slist_t	*sl,
756     void	*obj)
757 {
758 	mutex_enter(&sl->sl_mutex);
759 	list_remove(&sl->sl_list, obj);
760 	if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
761 		sl->sl_waiting = B_FALSE;
762 		cv_broadcast(&sl->sl_cv);
763 	}
764 	mutex_exit(&sl->sl_mutex);
765 }
766 
767 /*
768  * smb_slist_move_tail
769  *
770  * This function transfers all the contents of the synchronized list to the
771  * list_t provided. It returns the number of objects transferred.
772  */
773 uint32_t
774 smb_slist_move_tail(
775     list_t	*lst,
776     smb_slist_t	*sl)
777 {
778 	uint32_t	rv;
779 
780 	mutex_enter(&sl->sl_mutex);
781 	rv = sl->sl_count;
782 	if (sl->sl_count) {
783 		list_move_tail(lst, &sl->sl_list);
784 		sl->sl_count = 0;
785 		if (sl->sl_waiting) {
786 			sl->sl_waiting = B_FALSE;
787 			cv_broadcast(&sl->sl_cv);
788 		}
789 	}
790 	mutex_exit(&sl->sl_mutex);
791 	return (rv);
792 }
793 
794 /*
795  * smb_slist_obj_move
796  *
797  * This function moves an object from one list to the end of the other list. It
798  * assumes the mutex of each list has been entered.
799  */
800 void
801 smb_slist_obj_move(
802     smb_slist_t	*dst,
803     smb_slist_t	*src,
804     void	*obj)
805 {
806 	ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
807 	ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
808 
809 	list_remove(&src->sl_list, obj);
810 	list_insert_tail(&dst->sl_list, obj);
811 	dst->sl_count++;
812 	src->sl_count--;
813 	if ((src->sl_count == 0) && (src->sl_waiting)) {
814 		src->sl_waiting = B_FALSE;
815 		cv_broadcast(&src->sl_cv);
816 	}
817 }
818 
819 /*
820  * smb_slist_wait_for_empty
821  *
822  * This function waits for a list to be emptied.
823  */
824 void
825 smb_slist_wait_for_empty(
826     smb_slist_t	*sl)
827 {
828 	mutex_enter(&sl->sl_mutex);
829 	while (sl->sl_count) {
830 		sl->sl_waiting = B_TRUE;
831 		cv_wait(&sl->sl_cv, &sl->sl_mutex);
832 	}
833 	mutex_exit(&sl->sl_mutex);
834 }
835 
836 /*
837  * smb_slist_exit
838  *
839  * This function exits the muetx of the list and signal the condition variable
840  * if the list is empty.
841  */
842 void
843 smb_slist_exit(smb_slist_t *sl)
844 {
845 	if ((sl->sl_count == 0) && (sl->sl_waiting)) {
846 		sl->sl_waiting = B_FALSE;
847 		cv_broadcast(&sl->sl_cv);
848 	}
849 	mutex_exit(&sl->sl_mutex);
850 }
851 
852 /*
853  * smb_thread_entry_point
854  *
855  * Common entry point for all the threads created through smb_thread_start.
856  * The state of the thread is set to "running" at the beginning and moved to
857  * "exiting" just before calling thread_exit(). The condition variable is
858  *  also signaled.
859  */
860 static void
861 smb_thread_entry_point(
862     smb_thread_t	*thread)
863 {
864 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
865 	mutex_enter(&thread->sth_mtx);
866 	ASSERT(thread->sth_state == SMB_THREAD_STATE_STARTING);
867 	thread->sth_th = curthread;
868 	thread->sth_did = thread->sth_th->t_did;
869 
870 	if (!thread->sth_kill) {
871 		thread->sth_state = SMB_THREAD_STATE_RUNNING;
872 		cv_signal(&thread->sth_cv);
873 		mutex_exit(&thread->sth_mtx);
874 		thread->sth_ep(thread, thread->sth_ep_arg);
875 		mutex_enter(&thread->sth_mtx);
876 	}
877 	thread->sth_th = NULL;
878 	thread->sth_state = SMB_THREAD_STATE_EXITING;
879 	cv_broadcast(&thread->sth_cv);
880 	mutex_exit(&thread->sth_mtx);
881 	thread_exit();
882 }
883 
884 /*
885  * smb_thread_init
886  */
887 void
888 smb_thread_init(
889     smb_thread_t	*thread,
890     char		*name,
891     smb_thread_ep_t	ep,
892     void		*ep_arg,
893     smb_thread_aw_t	aw,
894     void		*aw_arg)
895 {
896 	ASSERT(thread->sth_magic != SMB_THREAD_MAGIC);
897 
898 	bzero(thread, sizeof (*thread));
899 
900 	(void) strlcpy(thread->sth_name, name, sizeof (thread->sth_name));
901 	thread->sth_ep = ep;
902 	thread->sth_ep_arg = ep_arg;
903 	thread->sth_aw = aw;
904 	thread->sth_aw_arg = aw_arg;
905 	thread->sth_state = SMB_THREAD_STATE_EXITED;
906 	mutex_init(&thread->sth_mtx, NULL, MUTEX_DEFAULT, NULL);
907 	cv_init(&thread->sth_cv, NULL, CV_DEFAULT, NULL);
908 	thread->sth_magic = SMB_THREAD_MAGIC;
909 }
910 
911 /*
912  * smb_thread_destroy
913  */
914 void
915 smb_thread_destroy(
916     smb_thread_t	*thread)
917 {
918 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
919 	ASSERT(thread->sth_state == SMB_THREAD_STATE_EXITED);
920 	thread->sth_magic = 0;
921 	mutex_destroy(&thread->sth_mtx);
922 	cv_destroy(&thread->sth_cv);
923 }
924 
925 /*
926  * smb_thread_start
927  *
928  * This function starts a thread with the parameters provided. It waits until
929  * the state of the thread has been moved to running.
930  */
931 /*ARGSUSED*/
932 int
933 smb_thread_start(
934     smb_thread_t	*thread)
935 {
936 	int		rc = 0;
937 	kthread_t	*tmpthread;
938 
939 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
940 
941 	mutex_enter(&thread->sth_mtx);
942 	switch (thread->sth_state) {
943 	case SMB_THREAD_STATE_EXITED:
944 		thread->sth_state = SMB_THREAD_STATE_STARTING;
945 		mutex_exit(&thread->sth_mtx);
946 		tmpthread = thread_create(NULL, 0, smb_thread_entry_point,
947 		    thread, 0, &p0, TS_RUN, minclsyspri);
948 		ASSERT(tmpthread != NULL);
949 		mutex_enter(&thread->sth_mtx);
950 		while (thread->sth_state == SMB_THREAD_STATE_STARTING)
951 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
952 		if (thread->sth_state != SMB_THREAD_STATE_RUNNING)
953 			rc = -1;
954 		break;
955 	default:
956 		ASSERT(0);
957 		rc = -1;
958 		break;
959 	}
960 	mutex_exit(&thread->sth_mtx);
961 	return (rc);
962 }
963 
964 /*
965  * smb_thread_stop
966  *
967  * This function signals a thread to kill itself and waits until the "exiting"
968  * state has been reached.
969  */
970 void
971 smb_thread_stop(
972     smb_thread_t	*thread)
973 {
974 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
975 
976 	mutex_enter(&thread->sth_mtx);
977 	switch (thread->sth_state) {
978 	case SMB_THREAD_STATE_RUNNING:
979 	case SMB_THREAD_STATE_STARTING:
980 		if (!thread->sth_kill) {
981 			thread->sth_kill = B_TRUE;
982 			if (thread->sth_aw)
983 				thread->sth_aw(thread, thread->sth_aw_arg);
984 			cv_broadcast(&thread->sth_cv);
985 			while (thread->sth_state != SMB_THREAD_STATE_EXITING)
986 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
987 			mutex_exit(&thread->sth_mtx);
988 			thread_join(thread->sth_did);
989 			mutex_enter(&thread->sth_mtx);
990 			thread->sth_state = SMB_THREAD_STATE_EXITED;
991 			thread->sth_did = 0;
992 			thread->sth_kill = B_FALSE;
993 			cv_broadcast(&thread->sth_cv);
994 			break;
995 		}
996 		/*FALLTHRU*/
997 
998 	case SMB_THREAD_STATE_EXITING:
999 		if (thread->sth_kill) {
1000 			while (thread->sth_state != SMB_THREAD_STATE_EXITED)
1001 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
1002 		} else {
1003 			thread->sth_state = SMB_THREAD_STATE_EXITED;
1004 			thread->sth_did = 0;
1005 		}
1006 		break;
1007 
1008 	case SMB_THREAD_STATE_EXITED:
1009 		break;
1010 
1011 	default:
1012 		ASSERT(0);
1013 		break;
1014 	}
1015 	mutex_exit(&thread->sth_mtx);
1016 }
1017 
1018 /*
1019  * smb_thread_signal
1020  *
1021  * This function signals a thread.
1022  */
1023 void
1024 smb_thread_signal(
1025     smb_thread_t	*thread)
1026 {
1027 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1028 
1029 	mutex_enter(&thread->sth_mtx);
1030 	switch (thread->sth_state) {
1031 	case SMB_THREAD_STATE_RUNNING:
1032 		if (thread->sth_aw)
1033 			thread->sth_aw(thread, thread->sth_aw_arg);
1034 		cv_signal(&thread->sth_cv);
1035 		break;
1036 
1037 	default:
1038 		break;
1039 	}
1040 	mutex_exit(&thread->sth_mtx);
1041 }
1042 
1043 boolean_t
1044 smb_thread_continue(smb_thread_t *thread)
1045 {
1046 	boolean_t result;
1047 
1048 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1049 
1050 	mutex_enter(&thread->sth_mtx);
1051 	result = smb_thread_continue_timedwait_locked(thread, 0);
1052 	mutex_exit(&thread->sth_mtx);
1053 
1054 	return (result);
1055 }
1056 
1057 boolean_t
1058 smb_thread_continue_nowait(smb_thread_t *thread)
1059 {
1060 	boolean_t result;
1061 
1062 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1063 
1064 	mutex_enter(&thread->sth_mtx);
1065 	/*
1066 	 * Setting ticks=-1 requests a non-blocking check.  We will
1067 	 * still block if the thread is in "suspend" state.
1068 	 */
1069 	result = smb_thread_continue_timedwait_locked(thread, -1);
1070 	mutex_exit(&thread->sth_mtx);
1071 
1072 	return (result);
1073 }
1074 
1075 boolean_t
1076 smb_thread_continue_timedwait(smb_thread_t *thread, int seconds)
1077 {
1078 	boolean_t result;
1079 
1080 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1081 
1082 	mutex_enter(&thread->sth_mtx);
1083 	result = smb_thread_continue_timedwait_locked(thread,
1084 	    SEC_TO_TICK(seconds));
1085 	mutex_exit(&thread->sth_mtx);
1086 
1087 	return (result);
1088 }
1089 
1090 /*
1091  * smb_thread_continue_timedwait_locked
1092  *
1093  * Internal only.  Ticks==-1 means don't block, Ticks == 0 means wait
1094  * indefinitely
1095  */
1096 static boolean_t
1097 smb_thread_continue_timedwait_locked(smb_thread_t *thread, int ticks)
1098 {
1099 	boolean_t	result;
1100 
1101 	/* -1 means don't block */
1102 	if (ticks != -1 && !thread->sth_kill) {
1103 		if (ticks == 0) {
1104 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
1105 		} else {
1106 			(void) cv_reltimedwait(&thread->sth_cv,
1107 			    &thread->sth_mtx, (clock_t)ticks, TR_CLOCK_TICK);
1108 		}
1109 	}
1110 	result = (thread->sth_kill == 0);
1111 
1112 	return (result);
1113 }
1114 
1115 void
1116 smb_thread_set_awaken(smb_thread_t *thread, smb_thread_aw_t new_aw_fn,
1117     void *new_aw_arg)
1118 {
1119 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1120 
1121 	mutex_enter(&thread->sth_mtx);
1122 	thread->sth_aw = new_aw_fn;
1123 	thread->sth_aw_arg = new_aw_arg;
1124 	mutex_exit(&thread->sth_mtx);
1125 }
1126 
1127 /*
1128  * smb_rwx_init
1129  */
1130 void
1131 smb_rwx_init(
1132     smb_rwx_t	*rwx)
1133 {
1134 	bzero(rwx, sizeof (smb_rwx_t));
1135 	cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
1136 	mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
1137 	rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
1138 }
1139 
1140 /*
1141  * smb_rwx_destroy
1142  */
1143 void
1144 smb_rwx_destroy(
1145     smb_rwx_t	*rwx)
1146 {
1147 	mutex_destroy(&rwx->rwx_mutex);
1148 	cv_destroy(&rwx->rwx_cv);
1149 	rw_destroy(&rwx->rwx_lock);
1150 }
1151 
1152 /*
1153  * smb_rwx_rwexit
1154  */
1155 void
1156 smb_rwx_rwexit(
1157     smb_rwx_t	*rwx)
1158 {
1159 	if (rw_write_held(&rwx->rwx_lock)) {
1160 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1161 		mutex_enter(&rwx->rwx_mutex);
1162 		if (rwx->rwx_waiting) {
1163 			rwx->rwx_waiting = B_FALSE;
1164 			cv_broadcast(&rwx->rwx_cv);
1165 		}
1166 		mutex_exit(&rwx->rwx_mutex);
1167 	}
1168 	rw_exit(&rwx->rwx_lock);
1169 }
1170 
1171 /*
1172  * smb_rwx_rwupgrade
1173  */
1174 krw_t
1175 smb_rwx_rwupgrade(
1176     smb_rwx_t	*rwx)
1177 {
1178 	if (rw_write_held(&rwx->rwx_lock)) {
1179 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1180 		return (RW_WRITER);
1181 	}
1182 	if (!rw_tryupgrade(&rwx->rwx_lock)) {
1183 		rw_exit(&rwx->rwx_lock);
1184 		rw_enter(&rwx->rwx_lock, RW_WRITER);
1185 	}
1186 	return (RW_READER);
1187 }
1188 
1189 /*
1190  * smb_rwx_rwrestore
1191  */
1192 void
1193 smb_rwx_rwdowngrade(
1194     smb_rwx_t	*rwx,
1195     krw_t	mode)
1196 {
1197 	ASSERT(rw_write_held(&rwx->rwx_lock));
1198 	ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1199 
1200 	if (mode == RW_WRITER) {
1201 		return;
1202 	}
1203 	ASSERT(mode == RW_READER);
1204 	mutex_enter(&rwx->rwx_mutex);
1205 	if (rwx->rwx_waiting) {
1206 		rwx->rwx_waiting = B_FALSE;
1207 		cv_broadcast(&rwx->rwx_cv);
1208 	}
1209 	mutex_exit(&rwx->rwx_mutex);
1210 	rw_downgrade(&rwx->rwx_lock);
1211 }
1212 
1213 /*
1214  * smb_rwx_wait
1215  *
1216  * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER
1217  * mode. It will:
1218  *
1219  *	1) release the lock and save its current mode.
1220  *	2) wait until the condition variable is signaled. This can happen for
1221  *	   2 reasons: When a writer releases the lock or when the time out (if
1222  *	   provided) expires.
1223  *	3) re-acquire the lock in the mode saved in (1).
1224  */
1225 int
1226 smb_rwx_rwwait(
1227     smb_rwx_t	*rwx,
1228     clock_t	timeout)
1229 {
1230 	int	rc;
1231 	krw_t	mode;
1232 
1233 	mutex_enter(&rwx->rwx_mutex);
1234 	rwx->rwx_waiting = B_TRUE;
1235 	mutex_exit(&rwx->rwx_mutex);
1236 
1237 	if (rw_write_held(&rwx->rwx_lock)) {
1238 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1239 		mode = RW_WRITER;
1240 	} else {
1241 		ASSERT(rw_read_held(&rwx->rwx_lock));
1242 		mode = RW_READER;
1243 	}
1244 	rw_exit(&rwx->rwx_lock);
1245 
1246 	mutex_enter(&rwx->rwx_mutex);
1247 	if (rwx->rwx_waiting) {
1248 		if (timeout == -1) {
1249 			rc = 1;
1250 			cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
1251 		} else {
1252 			rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
1253 			    timeout, TR_CLOCK_TICK);
1254 		}
1255 	}
1256 	mutex_exit(&rwx->rwx_mutex);
1257 
1258 	rw_enter(&rwx->rwx_lock, mode);
1259 	return (rc);
1260 }
1261 
1262 /*
1263  * SMB ID mapping
1264  *
1265  * Solaris ID mapping service (aka Winchester) works with domain SIDs
1266  * and RIDs where domain SIDs are in string format. CIFS service works
1267  * with binary SIDs understandable by CIFS clients. A layer of SMB ID
1268  * mapping functions are implemeted to hide the SID conversion details
1269  * and also hide the handling of array of batch mapping requests.
1270  *
1271  * IMPORTANT NOTE The Winchester API requires a zone. Because CIFS server
1272  * currently only runs in the global zone the global zone is specified.
1273  * This needs to be fixed when the CIFS server supports zones.
1274  */
1275 
1276 static int smb_idmap_batch_binsid(smb_idmap_batch_t *sib);
1277 
1278 /*
1279  * smb_idmap_getid
1280  *
1281  * Maps the given Windows SID to a Solaris ID using the
1282  * simple mapping API.
1283  */
1284 idmap_stat
1285 smb_idmap_getid(smb_sid_t *sid, uid_t *id, int *idtype)
1286 {
1287 	smb_idmap_t sim;
1288 	char sidstr[SMB_SID_STRSZ];
1289 
1290 	smb_sid_tostr(sid, sidstr);
1291 	if (smb_sid_splitstr(sidstr, &sim.sim_rid) != 0)
1292 		return (IDMAP_ERR_SID);
1293 	sim.sim_domsid = sidstr;
1294 	sim.sim_id = id;
1295 
1296 	switch (*idtype) {
1297 	case SMB_IDMAP_USER:
1298 		sim.sim_stat = kidmap_getuidbysid(global_zone, sim.sim_domsid,
1299 		    sim.sim_rid, sim.sim_id);
1300 		break;
1301 
1302 	case SMB_IDMAP_GROUP:
1303 		sim.sim_stat = kidmap_getgidbysid(global_zone, sim.sim_domsid,
1304 		    sim.sim_rid, sim.sim_id);
1305 		break;
1306 
1307 	case SMB_IDMAP_UNKNOWN:
1308 		sim.sim_stat = kidmap_getpidbysid(global_zone, sim.sim_domsid,
1309 		    sim.sim_rid, sim.sim_id, &sim.sim_idtype);
1310 		break;
1311 
1312 	default:
1313 		ASSERT(0);
1314 		return (IDMAP_ERR_ARG);
1315 	}
1316 
1317 	*idtype = sim.sim_idtype;
1318 
1319 	return (sim.sim_stat);
1320 }
1321 
1322 /*
1323  * smb_idmap_getsid
1324  *
1325  * Maps the given Solaris ID to a Windows SID using the
1326  * simple mapping API.
1327  */
1328 idmap_stat
1329 smb_idmap_getsid(uid_t id, int idtype, smb_sid_t **sid)
1330 {
1331 	smb_idmap_t sim;
1332 
1333 	switch (idtype) {
1334 	case SMB_IDMAP_USER:
1335 		sim.sim_stat = kidmap_getsidbyuid(global_zone, id,
1336 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1337 		break;
1338 
1339 	case SMB_IDMAP_GROUP:
1340 		sim.sim_stat = kidmap_getsidbygid(global_zone, id,
1341 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1342 		break;
1343 
1344 	case SMB_IDMAP_EVERYONE:
1345 		/* Everyone S-1-1-0 */
1346 		sim.sim_domsid = "S-1-1";
1347 		sim.sim_rid = 0;
1348 		sim.sim_stat = IDMAP_SUCCESS;
1349 		break;
1350 
1351 	default:
1352 		ASSERT(0);
1353 		return (IDMAP_ERR_ARG);
1354 	}
1355 
1356 	if (sim.sim_stat != IDMAP_SUCCESS)
1357 		return (sim.sim_stat);
1358 
1359 	if (sim.sim_domsid == NULL)
1360 		return (IDMAP_ERR_NOMAPPING);
1361 
1362 	sim.sim_sid = smb_sid_fromstr(sim.sim_domsid);
1363 	if (sim.sim_sid == NULL)
1364 		return (IDMAP_ERR_INTERNAL);
1365 
1366 	*sid = smb_sid_splice(sim.sim_sid, sim.sim_rid);
1367 	smb_sid_free(sim.sim_sid);
1368 	if (*sid == NULL)
1369 		sim.sim_stat = IDMAP_ERR_INTERNAL;
1370 
1371 	return (sim.sim_stat);
1372 }
1373 
1374 /*
1375  * smb_idmap_batch_create
1376  *
1377  * Creates and initializes the context for batch ID mapping.
1378  */
1379 idmap_stat
1380 smb_idmap_batch_create(smb_idmap_batch_t *sib, uint16_t nmap, int flags)
1381 {
1382 	ASSERT(sib);
1383 
1384 	bzero(sib, sizeof (smb_idmap_batch_t));
1385 
1386 	sib->sib_idmaph = kidmap_get_create(global_zone);
1387 
1388 	sib->sib_flags = flags;
1389 	sib->sib_nmap = nmap;
1390 	sib->sib_size = nmap * sizeof (smb_idmap_t);
1391 	sib->sib_maps = kmem_zalloc(sib->sib_size, KM_SLEEP);
1392 
1393 	return (IDMAP_SUCCESS);
1394 }
1395 
1396 /*
1397  * smb_idmap_batch_destroy
1398  *
1399  * Frees the batch ID mapping context.
1400  * If ID mapping is Solaris -> Windows it frees memories
1401  * allocated for binary SIDs.
1402  */
1403 void
1404 smb_idmap_batch_destroy(smb_idmap_batch_t *sib)
1405 {
1406 	char *domsid;
1407 	int i;
1408 
1409 	ASSERT(sib);
1410 	ASSERT(sib->sib_maps);
1411 
1412 	if (sib->sib_idmaph)
1413 		kidmap_get_destroy(sib->sib_idmaph);
1414 
1415 	if (sib->sib_flags & SMB_IDMAP_ID2SID) {
1416 		/*
1417 		 * SIDs are allocated only when mapping
1418 		 * UID/GID to SIDs
1419 		 */
1420 		for (i = 0; i < sib->sib_nmap; i++)
1421 			smb_sid_free(sib->sib_maps[i].sim_sid);
1422 	} else if (sib->sib_flags & SMB_IDMAP_SID2ID) {
1423 		/*
1424 		 * SID prefixes are allocated only when mapping
1425 		 * SIDs to UID/GID
1426 		 */
1427 		for (i = 0; i < sib->sib_nmap; i++) {
1428 			domsid = sib->sib_maps[i].sim_domsid;
1429 			if (domsid)
1430 				smb_mem_free(domsid);
1431 		}
1432 	}
1433 
1434 	if (sib->sib_size && sib->sib_maps)
1435 		kmem_free(sib->sib_maps, sib->sib_size);
1436 }
1437 
1438 /*
1439  * smb_idmap_batch_getid
1440  *
1441  * Queue a request to map the given SID to a UID or GID.
1442  *
1443  * sim->sim_id should point to variable that's supposed to
1444  * hold the returned UID/GID. This needs to be setup by caller
1445  * of this function.
1446  *
1447  * If requested ID type is known, it's passed as 'idtype',
1448  * if it's unknown it'll be returned in sim->sim_idtype.
1449  */
1450 idmap_stat
1451 smb_idmap_batch_getid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1452     smb_sid_t *sid, int idtype)
1453 {
1454 	char strsid[SMB_SID_STRSZ];
1455 	idmap_stat idm_stat;
1456 
1457 	ASSERT(idmaph);
1458 	ASSERT(sim);
1459 	ASSERT(sid);
1460 
1461 	smb_sid_tostr(sid, strsid);
1462 	if (smb_sid_splitstr(strsid, &sim->sim_rid) != 0)
1463 		return (IDMAP_ERR_SID);
1464 	sim->sim_domsid = smb_mem_strdup(strsid);
1465 
1466 	switch (idtype) {
1467 	case SMB_IDMAP_USER:
1468 		idm_stat = kidmap_batch_getuidbysid(idmaph, sim->sim_domsid,
1469 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1470 		break;
1471 
1472 	case SMB_IDMAP_GROUP:
1473 		idm_stat = kidmap_batch_getgidbysid(idmaph, sim->sim_domsid,
1474 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1475 		break;
1476 
1477 	case SMB_IDMAP_UNKNOWN:
1478 		idm_stat = kidmap_batch_getpidbysid(idmaph, sim->sim_domsid,
1479 		    sim->sim_rid, sim->sim_id, &sim->sim_idtype,
1480 		    &sim->sim_stat);
1481 		break;
1482 
1483 	default:
1484 		ASSERT(0);
1485 		return (IDMAP_ERR_ARG);
1486 	}
1487 
1488 	return (idm_stat);
1489 }
1490 
1491 /*
1492  * smb_idmap_batch_getsid
1493  *
1494  * Queue a request to map the given UID/GID to a SID.
1495  *
1496  * sim->sim_domsid and sim->sim_rid will contain the mapping
1497  * result upon successful process of the batched request.
1498  */
1499 idmap_stat
1500 smb_idmap_batch_getsid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1501     uid_t id, int idtype)
1502 {
1503 	idmap_stat idm_stat;
1504 
1505 	switch (idtype) {
1506 	case SMB_IDMAP_USER:
1507 		idm_stat = kidmap_batch_getsidbyuid(idmaph, id,
1508 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1509 		    &sim->sim_stat);
1510 		break;
1511 
1512 	case SMB_IDMAP_GROUP:
1513 		idm_stat = kidmap_batch_getsidbygid(idmaph, id,
1514 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1515 		    &sim->sim_stat);
1516 		break;
1517 
1518 	case SMB_IDMAP_OWNERAT:
1519 		/* Current Owner S-1-5-32-766 */
1520 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1521 		sim->sim_rid = SECURITY_CURRENT_OWNER_RID;
1522 		sim->sim_stat = IDMAP_SUCCESS;
1523 		idm_stat = IDMAP_SUCCESS;
1524 		break;
1525 
1526 	case SMB_IDMAP_GROUPAT:
1527 		/* Current Group S-1-5-32-767 */
1528 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1529 		sim->sim_rid = SECURITY_CURRENT_GROUP_RID;
1530 		sim->sim_stat = IDMAP_SUCCESS;
1531 		idm_stat = IDMAP_SUCCESS;
1532 		break;
1533 
1534 	case SMB_IDMAP_EVERYONE:
1535 		/* Everyone S-1-1-0 */
1536 		sim->sim_domsid = NT_WORLD_AUTH_SIDSTR;
1537 		sim->sim_rid = 0;
1538 		sim->sim_stat = IDMAP_SUCCESS;
1539 		idm_stat = IDMAP_SUCCESS;
1540 		break;
1541 
1542 	default:
1543 		ASSERT(0);
1544 		return (IDMAP_ERR_ARG);
1545 	}
1546 
1547 	return (idm_stat);
1548 }
1549 
1550 /*
1551  * smb_idmap_batch_binsid
1552  *
1553  * Convert sidrids to binary sids
1554  *
1555  * Returns 0 if successful and non-zero upon failure.
1556  */
1557 static int
1558 smb_idmap_batch_binsid(smb_idmap_batch_t *sib)
1559 {
1560 	smb_sid_t *sid;
1561 	smb_idmap_t *sim;
1562 	int i;
1563 
1564 	if (sib->sib_flags & SMB_IDMAP_SID2ID)
1565 		/* This operation is not required */
1566 		return (0);
1567 
1568 	sim = sib->sib_maps;
1569 	for (i = 0; i < sib->sib_nmap; sim++, i++) {
1570 		ASSERT(sim->sim_domsid);
1571 		if (sim->sim_domsid == NULL)
1572 			return (1);
1573 
1574 		if ((sid = smb_sid_fromstr(sim->sim_domsid)) == NULL)
1575 			return (1);
1576 
1577 		sim->sim_sid = smb_sid_splice(sid, sim->sim_rid);
1578 		smb_sid_free(sid);
1579 	}
1580 
1581 	return (0);
1582 }
1583 
1584 /*
1585  * smb_idmap_batch_getmappings
1586  *
1587  * trigger ID mapping service to get the mappings for queued
1588  * requests.
1589  *
1590  * Checks the result of all the queued requests.
1591  * If this is a Solaris -> Windows mapping it generates
1592  * binary SIDs from returned (domsid, rid) pairs.
1593  */
1594 idmap_stat
1595 smb_idmap_batch_getmappings(smb_idmap_batch_t *sib)
1596 {
1597 	idmap_stat idm_stat = IDMAP_SUCCESS;
1598 	int i;
1599 
1600 	idm_stat = kidmap_get_mappings(sib->sib_idmaph);
1601 	if (idm_stat != IDMAP_SUCCESS)
1602 		return (idm_stat);
1603 
1604 	/*
1605 	 * Check the status for all the queued requests
1606 	 */
1607 	for (i = 0; i < sib->sib_nmap; i++) {
1608 		if (sib->sib_maps[i].sim_stat != IDMAP_SUCCESS)
1609 			return (sib->sib_maps[i].sim_stat);
1610 	}
1611 
1612 	if (smb_idmap_batch_binsid(sib) != 0)
1613 		idm_stat = IDMAP_ERR_OTHER;
1614 
1615 	return (idm_stat);
1616 }
1617 
1618 uint64_t
1619 smb_time_unix_to_nt(timestruc_t *unix_time)
1620 {
1621 	uint64_t nt_time;
1622 
1623 	if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
1624 		return (0);
1625 
1626 	nt_time = unix_time->tv_sec;
1627 	nt_time *= 10000000;  /* seconds to 100ns */
1628 	nt_time += unix_time->tv_nsec / 100;
1629 	return (nt_time + NT_TIME_BIAS);
1630 }
1631 
1632 void
1633 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
1634 {
1635 	uint32_t seconds;
1636 
1637 	ASSERT(unix_time);
1638 
1639 	if ((nt_time == 0) || (nt_time == -1)) {
1640 		unix_time->tv_sec = 0;
1641 		unix_time->tv_nsec = 0;
1642 		return;
1643 	}
1644 
1645 	nt_time -= NT_TIME_BIAS;
1646 	seconds = nt_time / 10000000;
1647 	unix_time->tv_sec = seconds;
1648 	unix_time->tv_nsec = (nt_time  % 10000000) * 100;
1649 }
1650 
1651 /*
1652  * smb_time_gmt_to_local, smb_time_local_to_gmt
1653  *
1654  * Apply the gmt offset to convert between local time and gmt
1655  */
1656 int32_t
1657 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
1658 {
1659 	if ((gmt == 0) || (gmt == -1))
1660 		return (0);
1661 
1662 	return (gmt - sr->sr_gmtoff);
1663 }
1664 
1665 int32_t
1666 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
1667 {
1668 	if ((local == 0) || (local == -1))
1669 		return (0);
1670 
1671 	return (local + sr->sr_gmtoff);
1672 }
1673 
1674 
1675 /*
1676  * smb_time_dos_to_unix
1677  *
1678  * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
1679  *
1680  * A date/time field of 0 means that that server file system
1681  * assigned value need not be changed. The behaviour when the
1682  * date/time field is set to -1 is not documented but is
1683  * generally treated like 0.
1684  * If date or time is 0 or -1 the unix time is returned as 0
1685  * so that the caller can identify and handle this special case.
1686  */
1687 int32_t
1688 smb_time_dos_to_unix(int16_t date, int16_t time)
1689 {
1690 	struct tm	atm;
1691 
1692 	if (((date == 0) || (time == 0)) ||
1693 	    ((date == -1) || (time == -1))) {
1694 		return (0);
1695 	}
1696 
1697 	atm.tm_year = ((date >>  9) & 0x3F) + 80;
1698 	atm.tm_mon  = ((date >>  5) & 0x0F) - 1;
1699 	atm.tm_mday = ((date >>  0) & 0x1F);
1700 	atm.tm_hour = ((time >> 11) & 0x1F);
1701 	atm.tm_min  = ((time >>  5) & 0x3F);
1702 	atm.tm_sec  = ((time >>  0) & 0x1F) << 1;
1703 
1704 	return (smb_timegm(&atm));
1705 }
1706 
1707 void
1708 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1709 {
1710 	struct tm	atm;
1711 	int		i;
1712 	time_t		tmp_time;
1713 
1714 	if (ux_time == 0) {
1715 		*date_p = 0;
1716 		*time_p = 0;
1717 		return;
1718 	}
1719 
1720 	tmp_time = (time_t)ux_time;
1721 	(void) smb_gmtime_r(&tmp_time, &atm);
1722 
1723 	if (date_p) {
1724 		i = 0;
1725 		i += atm.tm_year - 80;
1726 		i <<= 4;
1727 		i += atm.tm_mon + 1;
1728 		i <<= 5;
1729 		i += atm.tm_mday;
1730 
1731 		*date_p = (short)i;
1732 	}
1733 	if (time_p) {
1734 		i = 0;
1735 		i += atm.tm_hour;
1736 		i <<= 6;
1737 		i += atm.tm_min;
1738 		i <<= 5;
1739 		i += atm.tm_sec >> 1;
1740 
1741 		*time_p = (short)i;
1742 	}
1743 }
1744 
1745 
1746 /*
1747  * smb_gmtime_r
1748  *
1749  * Thread-safe version of smb_gmtime. Returns a null pointer if either
1750  * input parameter is a null pointer. Otherwise returns a pointer
1751  * to result.
1752  *
1753  * Day of the week calculation: the Epoch was a thursday.
1754  *
1755  * There are no timezone corrections so tm_isdst and tm_gmtoff are
1756  * always zero, and the zone is always WET.
1757  */
1758 struct tm *
1759 smb_gmtime_r(time_t *clock, struct tm *result)
1760 {
1761 	time_t tsec;
1762 	int year;
1763 	int month;
1764 	int sec_per_month;
1765 
1766 	if (clock == 0 || result == 0)
1767 		return (0);
1768 
1769 	bzero(result, sizeof (struct tm));
1770 	tsec = *clock;
1771 	tsec -= tzh_leapcnt;
1772 
1773 	result->tm_wday = tsec / SECSPERDAY;
1774 	result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1775 
1776 	year = EPOCH_YEAR;
1777 	while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1778 	    (SECSPERDAY * DAYSPERNYEAR))) {
1779 		if (isleap(year))
1780 			tsec -= SECSPERDAY * DAYSPERLYEAR;
1781 		else
1782 			tsec -= SECSPERDAY * DAYSPERNYEAR;
1783 
1784 		++year;
1785 	}
1786 
1787 	result->tm_year = year - TM_YEAR_BASE;
1788 	result->tm_yday = tsec / SECSPERDAY;
1789 
1790 	for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1791 		sec_per_month = days_in_month[month] * SECSPERDAY;
1792 
1793 		if (month == TM_FEBRUARY && isleap(year))
1794 			sec_per_month += SECSPERDAY;
1795 
1796 		if (tsec < sec_per_month)
1797 			break;
1798 
1799 		tsec -= sec_per_month;
1800 	}
1801 
1802 	result->tm_mon = month;
1803 	result->tm_mday = (tsec / SECSPERDAY) + 1;
1804 	tsec %= SECSPERDAY;
1805 	result->tm_sec = tsec % 60;
1806 	tsec /= 60;
1807 	result->tm_min = tsec % 60;
1808 	tsec /= 60;
1809 	result->tm_hour = (int)tsec;
1810 
1811 	return (result);
1812 }
1813 
1814 
1815 /*
1816  * smb_timegm
1817  *
1818  * Converts the broken-down time in tm to a time value, i.e. the number
1819  * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1820  * not a POSIX or ANSI function. Per the man page, the input values of
1821  * tm_wday and tm_yday are ignored and, as the input data is assumed to
1822  * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1823  *
1824  * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1825  * and tm_yday, and bring the other fields within normal range. I don't
1826  * think this is really how it should be done but it's convenient for
1827  * now.
1828  */
1829 time_t
1830 smb_timegm(struct tm *tm)
1831 {
1832 	time_t tsec;
1833 	int dd;
1834 	int mm;
1835 	int yy;
1836 	int year;
1837 
1838 	if (tm == 0)
1839 		return (-1);
1840 
1841 	year = tm->tm_year + TM_YEAR_BASE;
1842 	tsec = tzh_leapcnt;
1843 
1844 	for (yy = EPOCH_YEAR; yy < year; ++yy) {
1845 		if (isleap(yy))
1846 			tsec += SECSPERDAY * DAYSPERLYEAR;
1847 		else
1848 			tsec += SECSPERDAY * DAYSPERNYEAR;
1849 	}
1850 
1851 	for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1852 		dd = days_in_month[mm] * SECSPERDAY;
1853 
1854 		if (mm == TM_FEBRUARY && isleap(year))
1855 			dd += SECSPERDAY;
1856 
1857 		tsec += dd;
1858 	}
1859 
1860 	tsec += (tm->tm_mday - 1) * SECSPERDAY;
1861 	tsec += tm->tm_sec;
1862 	tsec += tm->tm_min * SECSPERMIN;
1863 	tsec += tm->tm_hour * SECSPERHOUR;
1864 
1865 	tm->tm_isdst = 0;
1866 	(void) smb_gmtime_r(&tsec, tm);
1867 	return (tsec);
1868 }
1869 
1870 /*
1871  * smb_pad_align
1872  *
1873  * Returns the number of bytes required to pad an offset to the
1874  * specified alignment.
1875  */
1876 uint32_t
1877 smb_pad_align(uint32_t offset, uint32_t align)
1878 {
1879 	uint32_t pad = offset % align;
1880 
1881 	if (pad != 0)
1882 		pad = align - pad;
1883 
1884 	return (pad);
1885 }
1886 
1887 /*
1888  * smb_panic
1889  *
1890  * Logs the file name, function name and line number passed in and panics the
1891  * system.
1892  */
1893 void
1894 smb_panic(char *file, const char *func, int line)
1895 {
1896 	cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
1897 }
1898 
1899 /*
1900  * Creates an AVL tree and initializes the given smb_avl_t
1901  * structure using the passed args
1902  */
1903 void
1904 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset, smb_avl_nops_t *ops)
1905 {
1906 	ASSERT(avl);
1907 	ASSERT(ops);
1908 
1909 	rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL);
1910 	mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL);
1911 
1912 	avl->avl_nops = ops;
1913 	avl->avl_state = SMB_AVL_STATE_READY;
1914 	avl->avl_refcnt = 0;
1915 	(void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence,
1916 	    sizeof (uint32_t));
1917 
1918 	avl_create(&avl->avl_tree, ops->avln_cmp, size, offset);
1919 }
1920 
1921 /*
1922  * Destroys the specified AVL tree.
1923  * It waits for all the in-flight operations to finish
1924  * before destroying the AVL.
1925  */
1926 void
1927 smb_avl_destroy(smb_avl_t *avl)
1928 {
1929 	void *cookie = NULL;
1930 	void *node;
1931 
1932 	ASSERT(avl);
1933 
1934 	mutex_enter(&avl->avl_mutex);
1935 	if (avl->avl_state != SMB_AVL_STATE_READY) {
1936 		mutex_exit(&avl->avl_mutex);
1937 		return;
1938 	}
1939 
1940 	avl->avl_state = SMB_AVL_STATE_DESTROYING;
1941 
1942 	while (avl->avl_refcnt > 0)
1943 		(void) cv_wait(&avl->avl_cv, &avl->avl_mutex);
1944 	mutex_exit(&avl->avl_mutex);
1945 
1946 	rw_enter(&avl->avl_lock, RW_WRITER);
1947 	while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL)
1948 		avl->avl_nops->avln_destroy(node);
1949 
1950 	avl_destroy(&avl->avl_tree);
1951 	rw_exit(&avl->avl_lock);
1952 
1953 	rw_destroy(&avl->avl_lock);
1954 
1955 	mutex_destroy(&avl->avl_mutex);
1956 	bzero(avl, sizeof (smb_avl_t));
1957 }
1958 
1959 /*
1960  * Adds the given item to the AVL if it's
1961  * not already there.
1962  *
1963  * Returns:
1964  *
1965  * 	ENOTACTIVE	AVL is not in READY state
1966  * 	EEXIST		The item is already in AVL
1967  */
1968 int
1969 smb_avl_add(smb_avl_t *avl, void *item)
1970 {
1971 	avl_index_t where;
1972 
1973 	ASSERT(avl);
1974 	ASSERT(item);
1975 
1976 	if (!smb_avl_hold(avl))
1977 		return (ENOTACTIVE);
1978 
1979 	rw_enter(&avl->avl_lock, RW_WRITER);
1980 	if (avl_find(&avl->avl_tree, item, &where) != NULL) {
1981 		rw_exit(&avl->avl_lock);
1982 		smb_avl_rele(avl);
1983 		return (EEXIST);
1984 	}
1985 
1986 	avl_insert(&avl->avl_tree, item, where);
1987 	avl->avl_sequence++;
1988 	rw_exit(&avl->avl_lock);
1989 
1990 	smb_avl_rele(avl);
1991 	return (0);
1992 }
1993 
1994 /*
1995  * Removes the given item from the AVL.
1996  * If no reference is left on the item
1997  * it will also be destroyed by calling the
1998  * registered destroy operation.
1999  */
2000 void
2001 smb_avl_remove(smb_avl_t *avl, void *item)
2002 {
2003 	avl_index_t where;
2004 	void *rm_item;
2005 
2006 	ASSERT(avl);
2007 	ASSERT(item);
2008 
2009 	if (!smb_avl_hold(avl))
2010 		return;
2011 
2012 	rw_enter(&avl->avl_lock, RW_WRITER);
2013 	if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) {
2014 		rw_exit(&avl->avl_lock);
2015 		smb_avl_rele(avl);
2016 		return;
2017 	}
2018 
2019 	avl_remove(&avl->avl_tree, rm_item);
2020 	if (avl->avl_nops->avln_rele(rm_item))
2021 		avl->avl_nops->avln_destroy(rm_item);
2022 	avl->avl_sequence++;
2023 	rw_exit(&avl->avl_lock);
2024 
2025 	smb_avl_rele(avl);
2026 }
2027 
2028 /*
2029  * Looks up the AVL for the given item.
2030  * If the item is found a hold on the object
2031  * is taken before the pointer to it is
2032  * returned to the caller. The caller MUST
2033  * always call smb_avl_release() after it's done
2034  * using the returned object to release the hold
2035  * taken on the object.
2036  */
2037 void *
2038 smb_avl_lookup(smb_avl_t *avl, void *item)
2039 {
2040 	void *node = NULL;
2041 
2042 	ASSERT(avl);
2043 	ASSERT(item);
2044 
2045 	if (!smb_avl_hold(avl))
2046 		return (NULL);
2047 
2048 	rw_enter(&avl->avl_lock, RW_READER);
2049 	node = avl_find(&avl->avl_tree, item, NULL);
2050 	if (node != NULL)
2051 		avl->avl_nops->avln_hold(node);
2052 	rw_exit(&avl->avl_lock);
2053 
2054 	if (node == NULL)
2055 		smb_avl_rele(avl);
2056 
2057 	return (node);
2058 }
2059 
2060 /*
2061  * The hold on the given object is released.
2062  * This function MUST always be called after
2063  * smb_avl_lookup() and smb_avl_iterate() for
2064  * the returned object.
2065  *
2066  * If AVL is in DESTROYING state, the destroying
2067  * thread will be notified.
2068  */
2069 void
2070 smb_avl_release(smb_avl_t *avl, void *item)
2071 {
2072 	ASSERT(avl);
2073 	ASSERT(item);
2074 
2075 	if (avl->avl_nops->avln_rele(item))
2076 		avl->avl_nops->avln_destroy(item);
2077 
2078 	smb_avl_rele(avl);
2079 }
2080 
2081 /*
2082  * Initializes the given cursor for the AVL.
2083  * The cursor will be used to iterate through the AVL
2084  */
2085 void
2086 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor)
2087 {
2088 	ASSERT(avl);
2089 	ASSERT(cursor);
2090 
2091 	cursor->avlc_next = NULL;
2092 	cursor->avlc_sequence = avl->avl_sequence;
2093 }
2094 
2095 /*
2096  * Iterates through the AVL using the given cursor.
2097  * It always starts at the beginning and then returns
2098  * a pointer to the next object on each subsequent call.
2099  *
2100  * If a new object is added to or removed from the AVL
2101  * between two calls to this function, the iteration
2102  * will terminate prematurely.
2103  *
2104  * The caller MUST always call smb_avl_release() after it's
2105  * done using the returned object to release the hold taken
2106  * on the object.
2107  */
2108 void *
2109 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor)
2110 {
2111 	void *node;
2112 
2113 	ASSERT(avl);
2114 	ASSERT(cursor);
2115 
2116 	if (!smb_avl_hold(avl))
2117 		return (NULL);
2118 
2119 	rw_enter(&avl->avl_lock, RW_READER);
2120 	if (cursor->avlc_sequence != avl->avl_sequence) {
2121 		rw_exit(&avl->avl_lock);
2122 		smb_avl_rele(avl);
2123 		return (NULL);
2124 	}
2125 
2126 	if (cursor->avlc_next == NULL)
2127 		node = avl_first(&avl->avl_tree);
2128 	else
2129 		node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next);
2130 
2131 	if (node != NULL)
2132 		avl->avl_nops->avln_hold(node);
2133 
2134 	cursor->avlc_next = node;
2135 	rw_exit(&avl->avl_lock);
2136 
2137 	if (node == NULL)
2138 		smb_avl_rele(avl);
2139 
2140 	return (node);
2141 }
2142 
2143 /*
2144  * Increments the AVL reference count in order to
2145  * prevent the avl from being destroyed while it's
2146  * being accessed.
2147  */
2148 static boolean_t
2149 smb_avl_hold(smb_avl_t *avl)
2150 {
2151 	mutex_enter(&avl->avl_mutex);
2152 	if (avl->avl_state != SMB_AVL_STATE_READY) {
2153 		mutex_exit(&avl->avl_mutex);
2154 		return (B_FALSE);
2155 	}
2156 	avl->avl_refcnt++;
2157 	mutex_exit(&avl->avl_mutex);
2158 
2159 	return (B_TRUE);
2160 }
2161 
2162 /*
2163  * Decrements the AVL reference count to release the
2164  * hold. If another thread is trying to destroy the
2165  * AVL and is waiting for the reference count to become
2166  * 0, it is signaled to wake up.
2167  */
2168 static void
2169 smb_avl_rele(smb_avl_t *avl)
2170 {
2171 	mutex_enter(&avl->avl_mutex);
2172 	ASSERT(avl->avl_refcnt > 0);
2173 	avl->avl_refcnt--;
2174 	if (avl->avl_state == SMB_AVL_STATE_DESTROYING)
2175 		cv_broadcast(&avl->avl_cv);
2176 	mutex_exit(&avl->avl_mutex);
2177 }
2178 
2179 /*
2180  * smb_latency_init
2181  */
2182 void
2183 smb_latency_init(smb_latency_t *lat)
2184 {
2185 	bzero(lat, sizeof (*lat));
2186 	mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
2187 }
2188 
2189 /*
2190  * smb_latency_destroy
2191  */
2192 void
2193 smb_latency_destroy(smb_latency_t *lat)
2194 {
2195 	mutex_destroy(&lat->ly_mutex);
2196 }
2197 
2198 /*
2199  * smb_latency_add_sample
2200  *
2201  * Uses the new sample to calculate the new mean and standard deviation. The
2202  * sample must be a scaled value.
2203  */
2204 void
2205 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample)
2206 {
2207 	hrtime_t	a_mean;
2208 	hrtime_t	d_mean;
2209 
2210 	mutex_enter(&lat->ly_mutex);
2211 	lat->ly_a_nreq++;
2212 	lat->ly_a_sum += sample;
2213 	if (lat->ly_a_nreq != 0) {
2214 		a_mean = lat->ly_a_sum / lat->ly_a_nreq;
2215 		lat->ly_a_stddev =
2216 		    (sample - a_mean) * (sample - lat->ly_a_mean);
2217 		lat->ly_a_mean = a_mean;
2218 	}
2219 	lat->ly_d_nreq++;
2220 	lat->ly_d_sum += sample;
2221 	if (lat->ly_d_nreq != 0) {
2222 		d_mean = lat->ly_d_sum / lat->ly_d_nreq;
2223 		lat->ly_d_stddev =
2224 		    (sample - d_mean) * (sample - lat->ly_d_mean);
2225 		lat->ly_d_mean = d_mean;
2226 	}
2227 	mutex_exit(&lat->ly_mutex);
2228 }
2229 
2230 /*
2231  * smb_srqueue_init
2232  */
2233 void
2234 smb_srqueue_init(smb_srqueue_t *srq)
2235 {
2236 	bzero(srq, sizeof (*srq));
2237 	mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
2238 	srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled();
2239 }
2240 
2241 /*
2242  * smb_srqueue_destroy
2243  */
2244 void
2245 smb_srqueue_destroy(smb_srqueue_t *srq)
2246 {
2247 	mutex_destroy(&srq->srq_mutex);
2248 }
2249 
2250 /*
2251  * smb_srqueue_waitq_enter
2252  */
2253 void
2254 smb_srqueue_waitq_enter(smb_srqueue_t *srq)
2255 {
2256 	hrtime_t	new;
2257 	hrtime_t	delta;
2258 	uint32_t	wcnt;
2259 
2260 	mutex_enter(&srq->srq_mutex);
2261 	new = gethrtime_unscaled();
2262 	delta = new - srq->srq_wlastupdate;
2263 	srq->srq_wlastupdate = new;
2264 	wcnt = srq->srq_wcnt++;
2265 	if (wcnt != 0) {
2266 		srq->srq_wlentime += delta * wcnt;
2267 		srq->srq_wtime += delta;
2268 	}
2269 	mutex_exit(&srq->srq_mutex);
2270 }
2271 
2272 /*
2273  * smb_srqueue_runq_exit
2274  */
2275 void
2276 smb_srqueue_runq_exit(smb_srqueue_t *srq)
2277 {
2278 	hrtime_t	new;
2279 	hrtime_t	delta;
2280 	uint32_t	rcnt;
2281 
2282 	mutex_enter(&srq->srq_mutex);
2283 	new = gethrtime_unscaled();
2284 	delta = new - srq->srq_rlastupdate;
2285 	srq->srq_rlastupdate = new;
2286 	rcnt = srq->srq_rcnt--;
2287 	ASSERT(rcnt > 0);
2288 	srq->srq_rlentime += delta * rcnt;
2289 	srq->srq_rtime += delta;
2290 	mutex_exit(&srq->srq_mutex);
2291 }
2292 
2293 /*
2294  * smb_srqueue_waitq_to_runq
2295  */
2296 void
2297 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq)
2298 {
2299 	hrtime_t	new;
2300 	hrtime_t	delta;
2301 	uint32_t	wcnt;
2302 	uint32_t	rcnt;
2303 
2304 	mutex_enter(&srq->srq_mutex);
2305 	new = gethrtime_unscaled();
2306 	delta = new - srq->srq_wlastupdate;
2307 	srq->srq_wlastupdate = new;
2308 	wcnt = srq->srq_wcnt--;
2309 	ASSERT(wcnt > 0);
2310 	srq->srq_wlentime += delta * wcnt;
2311 	srq->srq_wtime += delta;
2312 	delta = new - srq->srq_rlastupdate;
2313 	srq->srq_rlastupdate = new;
2314 	rcnt = srq->srq_rcnt++;
2315 	if (rcnt != 0) {
2316 		srq->srq_rlentime += delta * rcnt;
2317 		srq->srq_rtime += delta;
2318 	}
2319 	mutex_exit(&srq->srq_mutex);
2320 }
2321 
2322 /*
2323  * smb_srqueue_update
2324  *
2325  * Takes a snapshot of the smb_sr_stat_t structure passed in.
2326  */
2327 void
2328 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd)
2329 {
2330 	hrtime_t	delta;
2331 	hrtime_t	snaptime;
2332 
2333 	mutex_enter(&srq->srq_mutex);
2334 	snaptime = gethrtime_unscaled();
2335 	delta = snaptime - srq->srq_wlastupdate;
2336 	srq->srq_wlastupdate = snaptime;
2337 	if (srq->srq_wcnt != 0) {
2338 		srq->srq_wlentime += delta * srq->srq_wcnt;
2339 		srq->srq_wtime += delta;
2340 	}
2341 	delta = snaptime - srq->srq_rlastupdate;
2342 	srq->srq_rlastupdate = snaptime;
2343 	if (srq->srq_rcnt != 0) {
2344 		srq->srq_rlentime += delta * srq->srq_rcnt;
2345 		srq->srq_rtime += delta;
2346 	}
2347 	kd->ku_rlentime = srq->srq_rlentime;
2348 	kd->ku_rtime = srq->srq_rtime;
2349 	kd->ku_wlentime = srq->srq_wlentime;
2350 	kd->ku_wtime = srq->srq_wtime;
2351 	mutex_exit(&srq->srq_mutex);
2352 	scalehrtime(&kd->ku_rlentime);
2353 	scalehrtime(&kd->ku_rtime);
2354 	scalehrtime(&kd->ku_wlentime);
2355 	scalehrtime(&kd->ku_wtime);
2356 }
2357 
2358 void
2359 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd, int threshold,
2360     int timeout)
2361 {
2362 	bzero(ct, sizeof (smb_cmd_threshold_t));
2363 	mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL);
2364 	ct->ct_cmd = cmd;
2365 	ct->ct_threshold = threshold;
2366 	ct->ct_event = smb_event_create(timeout);
2367 	ct->ct_event_id = smb_event_txid(ct->ct_event);
2368 
2369 	if (smb_threshold_debug) {
2370 		cmn_err(CE_NOTE, "smb_threshold_init[%s]: threshold (%d), "
2371 		    "timeout (%d)", cmd, threshold, timeout);
2372 	}
2373 }
2374 
2375 /*
2376  * This function must be called prior to SMB_SERVER_STATE_STOPPING state
2377  * so that ct_event can be successfully removed from the event list.
2378  * It should not be called when the server mutex is held or when the
2379  * server is removed from the server list.
2380  */
2381 void
2382 smb_threshold_fini(smb_cmd_threshold_t *ct)
2383 {
2384 	smb_event_destroy(ct->ct_event);
2385 	mutex_destroy(&ct->ct_mutex);
2386 	bzero(ct, sizeof (smb_cmd_threshold_t));
2387 }
2388 
2389 /*
2390  * This threshold mechanism can be used to limit the number of simultaneous
2391  * requests, which serves to limit the stress that can be applied to the
2392  * service and also allows the service to respond to requests before the
2393  * client times out and reports that the server is not responding,
2394  *
2395  * If the number of requests exceeds the threshold, new requests will be
2396  * stalled until the number drops back to the threshold.  Stalled requests
2397  * will be notified as appropriate, in which case 0 will be returned.
2398  * If the timeout expires before the request is notified, a non-zero errno
2399  * value will be returned.
2400  *
2401  * To avoid a flood of messages, the message rate is throttled as well.
2402  */
2403 int
2404 smb_threshold_enter(smb_cmd_threshold_t *ct)
2405 {
2406 	int	rc;
2407 
2408 	mutex_enter(&ct->ct_mutex);
2409 	if (ct->ct_active_cnt >= ct->ct_threshold && ct->ct_event != NULL) {
2410 		atomic_inc_32(&ct->ct_blocked_cnt);
2411 
2412 		if (smb_threshold_debug) {
2413 			cmn_err(CE_NOTE, "smb_threshold_enter[%s]: blocked "
2414 			    "(blocked ops: %u, inflight ops: %u)",
2415 			    ct->ct_cmd, ct->ct_blocked_cnt, ct->ct_active_cnt);
2416 		}
2417 
2418 		mutex_exit(&ct->ct_mutex);
2419 
2420 		if ((rc = smb_event_wait(ct->ct_event)) != 0) {
2421 			if (rc == ECANCELED)
2422 				return (rc);
2423 
2424 			mutex_enter(&ct->ct_mutex);
2425 			if (ct->ct_active_cnt >= ct->ct_threshold) {
2426 
2427 				if ((ct->ct_error_cnt %
2428 				    SMB_THRESHOLD_REPORT_THROTTLE) == 0) {
2429 					cmn_err(CE_NOTE, "%s: server busy: "
2430 					    "threshold %d exceeded)",
2431 					    ct->ct_cmd, ct->ct_threshold);
2432 				}
2433 
2434 				atomic_inc_32(&ct->ct_error_cnt);
2435 				mutex_exit(&ct->ct_mutex);
2436 				return (rc);
2437 			}
2438 
2439 			mutex_exit(&ct->ct_mutex);
2440 
2441 		}
2442 
2443 		mutex_enter(&ct->ct_mutex);
2444 		atomic_dec_32(&ct->ct_blocked_cnt);
2445 		if (smb_threshold_debug) {
2446 			cmn_err(CE_NOTE, "smb_threshold_enter[%s]: resumed "
2447 			    "(blocked ops: %u, inflight ops: %u)", ct->ct_cmd,
2448 			    ct->ct_blocked_cnt, ct->ct_active_cnt);
2449 		}
2450 	}
2451 
2452 	atomic_inc_32(&ct->ct_active_cnt);
2453 	mutex_exit(&ct->ct_mutex);
2454 	return (0);
2455 }
2456 
2457 void
2458 smb_threshold_exit(smb_cmd_threshold_t *ct, smb_server_t *sv)
2459 {
2460 	mutex_enter(&ct->ct_mutex);
2461 	atomic_dec_32(&ct->ct_active_cnt);
2462 	mutex_exit(&ct->ct_mutex);
2463 	smb_event_notify(sv, ct->ct_event_id);
2464 }
2465