xref: /titanic_50/usr/src/uts/common/fs/smbsrv/smb_kutil.c (revision a307732568c3d861c38b0342ae32434226d10e94)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/param.h>
26 #include <sys/types.h>
27 #include <sys/tzfile.h>
28 #include <sys/atomic.h>
29 #include <sys/kidmap.h>
30 #include <sys/time.h>
31 #include <sys/spl.h>
32 #include <sys/cpuvar.h>
33 #include <sys/random.h>
34 #include <smbsrv/smb_kproto.h>
35 #include <smbsrv/smb_fsops.h>
36 #include <smbsrv/smbinfo.h>
37 #include <smbsrv/smb_xdr.h>
38 #include <smbsrv/smb_vops.h>
39 #include <smbsrv/smb_idmap.h>
40 
41 #include <sys/sid.h>
42 #include <sys/priv_names.h>
43 
44 static kmem_cache_t	*smb_dtor_cache;
45 static boolean_t	smb_llist_initialized = B_FALSE;
46 
47 static boolean_t smb_thread_continue_timedwait_locked(smb_thread_t *, int);
48 
49 static boolean_t smb_avl_hold(smb_avl_t *);
50 static void smb_avl_rele(smb_avl_t *);
51 
52 time_t tzh_leapcnt = 0;
53 
54 struct tm
55 *smb_gmtime_r(time_t *clock, struct tm *result);
56 
57 time_t
58 smb_timegm(struct tm *tm);
59 
60 struct	tm {
61 	int	tm_sec;
62 	int	tm_min;
63 	int	tm_hour;
64 	int	tm_mday;
65 	int	tm_mon;
66 	int	tm_year;
67 	int	tm_wday;
68 	int	tm_yday;
69 	int	tm_isdst;
70 };
71 
72 static int days_in_month[] = {
73 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
74 };
75 
76 int
77 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
78 {
79 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
80 		return (smb_wcequiv_strlen(str));
81 	return (strlen(str));
82 }
83 
84 int
85 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
86 {
87 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
88 		return (smb_wcequiv_strlen(str) + 2);
89 	return (strlen(str) + 1);
90 }
91 
92 int
93 smb_ascii_or_unicode_null_len(struct smb_request *sr)
94 {
95 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
96 		return (2);
97 	return (1);
98 }
99 
100 /*
101  * Return B_TRUE if pattern contains wildcards
102  */
103 boolean_t
104 smb_contains_wildcards(const char *pattern)
105 {
106 	static const char *wildcards = "*?";
107 
108 	return (strpbrk(pattern, wildcards) != NULL);
109 }
110 
111 /*
112  * When converting wildcards a '.' in a name is treated as a base and
113  * extension separator even if the name is longer than 8.3.
114  *
115  * The '*' character matches an entire part of the name.  For example,
116  * "*.abc" matches any name with an extension of "abc".
117  *
118  * The '?' character matches a single character.
119  * If the base contains all ? (8 or more) then it is treated as *.
120  * If the extension contains all ? (3 or more) then it is treated as *.
121  *
122  * Clients convert ASCII wildcards to Unicode wildcards as follows:
123  *
124  *	? is converted to >
125  *	. is converted to " if it is followed by ? or *
126  *	* is converted to < if it is followed by .
127  *
128  * Note that clients convert "*." to '< and drop the '.' but "*.txt"
129  * is sent as "<.TXT", i.e.
130  *
131  * 	dir *.		->	dir <
132  * 	dir *.txt	->	dir <.TXT
133  *
134  * Since " and < are illegal in Windows file names, we always convert
135  * these Unicode wildcards without checking the following character.
136  */
137 void
138 smb_convert_wildcards(char *pattern)
139 {
140 	static char *match_all[] = {
141 		"*.",
142 		"*.*"
143 	};
144 	char	*extension;
145 	char	*p;
146 	int	len;
147 	int	i;
148 
149 	/*
150 	 * Special case "<" for "dir *.", and fast-track for "*".
151 	 */
152 	if ((*pattern == '<') || (*pattern == '*')) {
153 		if (*(pattern + 1) == '\0') {
154 			*pattern = '*';
155 			return;
156 		}
157 	}
158 
159 	for (p = pattern; *p != '\0'; ++p) {
160 		switch (*p) {
161 		case '<':
162 			*p = '*';
163 			break;
164 		case '>':
165 			*p = '?';
166 			break;
167 		case '\"':
168 			*p = '.';
169 			break;
170 		default:
171 			break;
172 		}
173 	}
174 
175 	/*
176 	 * Replace "????????.ext" with "*.ext".
177 	 */
178 	p = pattern;
179 	p += strspn(p, "?");
180 	if (*p == '.') {
181 		*p = '\0';
182 		len = strlen(pattern);
183 		*p = '.';
184 		if (len >= SMB_NAME83_BASELEN) {
185 			*pattern = '*';
186 			(void) strlcpy(pattern + 1, p, MAXPATHLEN - 1);
187 		}
188 	}
189 
190 	/*
191 	 * Replace "base.???" with 'base.*'.
192 	 */
193 	if ((extension = strrchr(pattern, '.')) != NULL) {
194 		p = ++extension;
195 		p += strspn(p, "?");
196 		if (*p == '\0') {
197 			len = strlen(extension);
198 			if (len >= SMB_NAME83_EXTLEN) {
199 				*extension = '\0';
200 				(void) strlcat(pattern, "*", MAXPATHLEN);
201 			}
202 		}
203 	}
204 
205 	/*
206 	 * Replace anything that matches an entry in match_all with "*".
207 	 */
208 	for (i = 0; i < sizeof (match_all) / sizeof (match_all[0]); ++i) {
209 		if (strcmp(pattern, match_all[i]) == 0) {
210 			(void) strlcpy(pattern, "*", MAXPATHLEN);
211 			break;
212 		}
213 	}
214 }
215 
216 /*
217  * smb_sattr_check
218  *
219  * Check file attributes against a search attribute (sattr) mask.
220  *
221  * Normal files, which includes READONLY and ARCHIVE, always pass
222  * this check.  If the DIRECTORY, HIDDEN or SYSTEM special attributes
223  * are set then they must appear in the search mask.  The special
224  * attributes are inclusive, i.e. all special attributes that appear
225  * in sattr must also appear in the file attributes for the check to
226  * pass.
227  *
228  * The following examples show how this works:
229  *
230  *		fileA:	READONLY
231  *		fileB:	0 (no attributes = normal file)
232  *		fileC:	READONLY, ARCHIVE
233  *		fileD:	HIDDEN
234  *		fileE:	READONLY, HIDDEN, SYSTEM
235  *		dirA:	DIRECTORY
236  *
237  * search attribute: 0
238  *		Returns: fileA, fileB and fileC.
239  * search attribute: HIDDEN
240  *		Returns: fileA, fileB, fileC and fileD.
241  * search attribute: SYSTEM
242  *		Returns: fileA, fileB and fileC.
243  * search attribute: DIRECTORY
244  *		Returns: fileA, fileB, fileC and dirA.
245  * search attribute: HIDDEN and SYSTEM
246  *		Returns: fileA, fileB, fileC, fileD and fileE.
247  *
248  * Returns true if the file and sattr match; otherwise, returns false.
249  */
250 boolean_t
251 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
252 {
253 	if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
254 	    !(sattr & FILE_ATTRIBUTE_DIRECTORY))
255 		return (B_FALSE);
256 
257 	if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
258 	    !(sattr & FILE_ATTRIBUTE_HIDDEN))
259 		return (B_FALSE);
260 
261 	if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
262 	    !(sattr & FILE_ATTRIBUTE_SYSTEM))
263 		return (B_FALSE);
264 
265 	return (B_TRUE);
266 }
267 
268 int
269 microtime(timestruc_t *tvp)
270 {
271 	tvp->tv_sec = gethrestime_sec();
272 	tvp->tv_nsec = 0;
273 	return (0);
274 }
275 
276 int32_t
277 clock_get_milli_uptime()
278 {
279 	return (TICK_TO_MSEC(ddi_get_lbolt()));
280 }
281 
282 int /*ARGSUSED*/
283 smb_noop(void *p, size_t size, int foo)
284 {
285 	return (0);
286 }
287 
288 /*
289  * smb_idpool_increment
290  *
291  * This function increments the ID pool by doubling the current size. This
292  * function assumes the caller entered the mutex of the pool.
293  */
294 static int
295 smb_idpool_increment(
296     smb_idpool_t	*pool)
297 {
298 	uint8_t		*new_pool;
299 	uint32_t	new_size;
300 
301 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
302 
303 	new_size = pool->id_size * 2;
304 	if (new_size <= SMB_IDPOOL_MAX_SIZE) {
305 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
306 		if (new_pool) {
307 			bzero(new_pool, new_size / 8);
308 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
309 			kmem_free(pool->id_pool, pool->id_size / 8);
310 			pool->id_pool = new_pool;
311 			pool->id_free_counter += new_size - pool->id_size;
312 			pool->id_max_free_counter += new_size - pool->id_size;
313 			pool->id_size = new_size;
314 			pool->id_idx_msk = (new_size / 8) - 1;
315 			if (new_size >= SMB_IDPOOL_MAX_SIZE) {
316 				/* id -1 made unavailable */
317 				pool->id_pool[pool->id_idx_msk] = 0x80;
318 				pool->id_free_counter--;
319 				pool->id_max_free_counter--;
320 			}
321 			return (0);
322 		}
323 	}
324 	return (-1);
325 }
326 
327 /*
328  * smb_idpool_constructor
329  *
330  * This function initializes the pool structure provided.
331  */
332 int
333 smb_idpool_constructor(
334     smb_idpool_t	*pool)
335 {
336 
337 	ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
338 
339 	pool->id_size = SMB_IDPOOL_MIN_SIZE;
340 	pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
341 	pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
342 	pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
343 	pool->id_bit = 0x02;
344 	pool->id_bit_idx = 1;
345 	pool->id_idx = 0;
346 	pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
347 	    KM_SLEEP);
348 	bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
349 	/* -1 id made unavailable */
350 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
351 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
352 	pool->id_magic = SMB_IDPOOL_MAGIC;
353 	return (0);
354 }
355 
356 /*
357  * smb_idpool_destructor
358  *
359  * This function tears down and frees the resources associated with the
360  * pool provided.
361  */
362 void
363 smb_idpool_destructor(
364     smb_idpool_t	*pool)
365 {
366 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
367 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
368 	pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
369 	mutex_destroy(&pool->id_mutex);
370 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
371 }
372 
373 /*
374  * smb_idpool_alloc
375  *
376  * This function allocates an ID from the pool provided.
377  */
378 int
379 smb_idpool_alloc(
380     smb_idpool_t	*pool,
381     uint16_t		*id)
382 {
383 	uint32_t	i;
384 	uint8_t		bit;
385 	uint8_t		bit_idx;
386 	uint8_t		byte;
387 
388 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
389 
390 	mutex_enter(&pool->id_mutex);
391 	if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
392 		mutex_exit(&pool->id_mutex);
393 		return (-1);
394 	}
395 
396 	i = pool->id_size;
397 	while (i) {
398 		bit = pool->id_bit;
399 		bit_idx = pool->id_bit_idx;
400 		byte = pool->id_pool[pool->id_idx];
401 		while (bit) {
402 			if (byte & bit) {
403 				bit = bit << 1;
404 				bit_idx++;
405 				continue;
406 			}
407 			pool->id_pool[pool->id_idx] |= bit;
408 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
409 			pool->id_free_counter--;
410 			pool->id_bit = bit;
411 			pool->id_bit_idx = bit_idx;
412 			mutex_exit(&pool->id_mutex);
413 			return (0);
414 		}
415 		pool->id_bit = 1;
416 		pool->id_bit_idx = 0;
417 		pool->id_idx++;
418 		pool->id_idx &= pool->id_idx_msk;
419 		--i;
420 	}
421 	/*
422 	 * This section of code shouldn't be reached. If there are IDs
423 	 * available and none could be found there's a problem.
424 	 */
425 	ASSERT(0);
426 	mutex_exit(&pool->id_mutex);
427 	return (-1);
428 }
429 
430 /*
431  * smb_idpool_free
432  *
433  * This function frees the ID provided.
434  */
435 void
436 smb_idpool_free(
437     smb_idpool_t	*pool,
438     uint16_t		id)
439 {
440 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
441 	ASSERT(id != 0);
442 	ASSERT(id != 0xFFFF);
443 
444 	mutex_enter(&pool->id_mutex);
445 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
446 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
447 		pool->id_free_counter++;
448 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
449 		mutex_exit(&pool->id_mutex);
450 		return;
451 	}
452 	/* Freeing a free ID. */
453 	ASSERT(0);
454 	mutex_exit(&pool->id_mutex);
455 }
456 
457 /*
458  * Initialize the llist delete queue object cache.
459  */
460 void
461 smb_llist_init(void)
462 {
463 	if (smb_llist_initialized)
464 		return;
465 
466 	smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
467 	    sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
468 
469 	smb_llist_initialized = B_TRUE;
470 }
471 
472 /*
473  * Destroy the llist delete queue object cache.
474  */
475 void
476 smb_llist_fini(void)
477 {
478 	if (!smb_llist_initialized)
479 		return;
480 
481 	kmem_cache_destroy(smb_dtor_cache);
482 	smb_llist_initialized = B_FALSE;
483 }
484 
485 /*
486  * smb_llist_constructor
487  *
488  * This function initializes a locked list.
489  */
490 void
491 smb_llist_constructor(
492     smb_llist_t	*ll,
493     size_t	size,
494     size_t	offset)
495 {
496 	rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
497 	mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
498 	list_create(&ll->ll_list, size, offset);
499 	list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
500 	    offsetof(smb_dtor_t, dt_lnd));
501 	ll->ll_count = 0;
502 	ll->ll_wrop = 0;
503 	ll->ll_deleteq_count = 0;
504 }
505 
506 /*
507  * Flush the delete queue and destroy a locked list.
508  */
509 void
510 smb_llist_destructor(
511     smb_llist_t	*ll)
512 {
513 	smb_llist_flush(ll);
514 
515 	ASSERT(ll->ll_count == 0);
516 	ASSERT(ll->ll_deleteq_count == 0);
517 
518 	rw_destroy(&ll->ll_lock);
519 	list_destroy(&ll->ll_list);
520 	list_destroy(&ll->ll_deleteq);
521 	mutex_destroy(&ll->ll_mutex);
522 }
523 
524 /*
525  * Post an object to the delete queue.  The delete queue will be processed
526  * during list exit or list destruction.  Objects are often posted for
527  * deletion during list iteration (while the list is locked) but that is
528  * not required, and an object can be posted at any time.
529  */
530 void
531 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
532 {
533 	smb_dtor_t	*dtor;
534 
535 	ASSERT((object != NULL) && (dtorproc != NULL));
536 
537 	dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
538 	bzero(dtor, sizeof (smb_dtor_t));
539 	dtor->dt_magic = SMB_DTOR_MAGIC;
540 	dtor->dt_object = object;
541 	dtor->dt_proc = dtorproc;
542 
543 	mutex_enter(&ll->ll_mutex);
544 	list_insert_tail(&ll->ll_deleteq, dtor);
545 	++ll->ll_deleteq_count;
546 	mutex_exit(&ll->ll_mutex);
547 }
548 
549 /*
550  * Exit the list lock and process the delete queue.
551  */
552 void
553 smb_llist_exit(smb_llist_t *ll)
554 {
555 	rw_exit(&ll->ll_lock);
556 	smb_llist_flush(ll);
557 }
558 
559 /*
560  * Flush the list delete queue.  The mutex is dropped across the destructor
561  * call in case this leads to additional objects being posted to the delete
562  * queue.
563  */
564 void
565 smb_llist_flush(smb_llist_t *ll)
566 {
567 	smb_dtor_t    *dtor;
568 
569 	mutex_enter(&ll->ll_mutex);
570 
571 	dtor = list_head(&ll->ll_deleteq);
572 	while (dtor != NULL) {
573 		SMB_DTOR_VALID(dtor);
574 		ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
575 		list_remove(&ll->ll_deleteq, dtor);
576 		--ll->ll_deleteq_count;
577 		mutex_exit(&ll->ll_mutex);
578 
579 		dtor->dt_proc(dtor->dt_object);
580 
581 		dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
582 		kmem_cache_free(smb_dtor_cache, dtor);
583 		mutex_enter(&ll->ll_mutex);
584 		dtor = list_head(&ll->ll_deleteq);
585 	}
586 
587 	mutex_exit(&ll->ll_mutex);
588 }
589 
590 /*
591  * smb_llist_upgrade
592  *
593  * This function tries to upgrade the lock of the locked list. It assumes the
594  * locked has already been entered in RW_READER mode. It first tries using the
595  * Solaris function rw_tryupgrade(). If that call fails the lock is released
596  * and reentered in RW_WRITER mode. In that last case a window is opened during
597  * which the contents of the list may have changed. The return code indicates
598  * whether or not the list was modified when the lock was exited.
599  */
600 int smb_llist_upgrade(
601     smb_llist_t *ll)
602 {
603 	uint64_t	wrop;
604 
605 	if (rw_tryupgrade(&ll->ll_lock) != 0) {
606 		return (0);
607 	}
608 	wrop = ll->ll_wrop;
609 	rw_exit(&ll->ll_lock);
610 	rw_enter(&ll->ll_lock, RW_WRITER);
611 	return (wrop != ll->ll_wrop);
612 }
613 
614 /*
615  * smb_llist_insert_head
616  *
617  * This function inserts the object passed a the beginning of the list. This
618  * function assumes the lock of the list has already been entered.
619  */
620 void
621 smb_llist_insert_head(
622     smb_llist_t	*ll,
623     void	*obj)
624 {
625 	list_insert_head(&ll->ll_list, obj);
626 	++ll->ll_wrop;
627 	++ll->ll_count;
628 }
629 
630 /*
631  * smb_llist_insert_tail
632  *
633  * This function appends to the object passed to the list. This function assumes
634  * the lock of the list has already been entered.
635  *
636  */
637 void
638 smb_llist_insert_tail(
639     smb_llist_t	*ll,
640     void	*obj)
641 {
642 	list_insert_tail(&ll->ll_list, obj);
643 	++ll->ll_wrop;
644 	++ll->ll_count;
645 }
646 
647 /*
648  * smb_llist_remove
649  *
650  * This function removes the object passed from the list. This function assumes
651  * the lock of the list has already been entered.
652  */
653 void
654 smb_llist_remove(
655     smb_llist_t	*ll,
656     void	*obj)
657 {
658 	list_remove(&ll->ll_list, obj);
659 	++ll->ll_wrop;
660 	--ll->ll_count;
661 }
662 
663 /*
664  * smb_llist_get_count
665  *
666  * This function returns the number of elements in the specified list.
667  */
668 uint32_t
669 smb_llist_get_count(
670     smb_llist_t *ll)
671 {
672 	return (ll->ll_count);
673 }
674 
675 /*
676  * smb_slist_constructor
677  *
678  * Synchronized list constructor.
679  */
680 void
681 smb_slist_constructor(
682     smb_slist_t	*sl,
683     size_t	size,
684     size_t	offset)
685 {
686 	mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
687 	cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
688 	list_create(&sl->sl_list, size, offset);
689 	sl->sl_count = 0;
690 	sl->sl_waiting = B_FALSE;
691 }
692 
693 /*
694  * smb_slist_destructor
695  *
696  * Synchronized list destructor.
697  */
698 void
699 smb_slist_destructor(
700     smb_slist_t	*sl)
701 {
702 	VERIFY(sl->sl_count == 0);
703 
704 	mutex_destroy(&sl->sl_mutex);
705 	cv_destroy(&sl->sl_cv);
706 	list_destroy(&sl->sl_list);
707 }
708 
709 /*
710  * smb_slist_insert_head
711  *
712  * This function inserts the object passed a the beginning of the list.
713  */
714 void
715 smb_slist_insert_head(
716     smb_slist_t	*sl,
717     void	*obj)
718 {
719 	mutex_enter(&sl->sl_mutex);
720 	list_insert_head(&sl->sl_list, obj);
721 	++sl->sl_count;
722 	mutex_exit(&sl->sl_mutex);
723 }
724 
725 /*
726  * smb_slist_insert_tail
727  *
728  * This function appends the object passed to the list.
729  */
730 void
731 smb_slist_insert_tail(
732     smb_slist_t	*sl,
733     void	*obj)
734 {
735 	mutex_enter(&sl->sl_mutex);
736 	list_insert_tail(&sl->sl_list, obj);
737 	++sl->sl_count;
738 	mutex_exit(&sl->sl_mutex);
739 }
740 
741 /*
742  * smb_llist_remove
743  *
744  * This function removes the object passed by the caller from the list.
745  */
746 void
747 smb_slist_remove(
748     smb_slist_t	*sl,
749     void	*obj)
750 {
751 	mutex_enter(&sl->sl_mutex);
752 	list_remove(&sl->sl_list, obj);
753 	if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
754 		sl->sl_waiting = B_FALSE;
755 		cv_broadcast(&sl->sl_cv);
756 	}
757 	mutex_exit(&sl->sl_mutex);
758 }
759 
760 /*
761  * smb_slist_move_tail
762  *
763  * This function transfers all the contents of the synchronized list to the
764  * list_t provided. It returns the number of objects transferred.
765  */
766 uint32_t
767 smb_slist_move_tail(
768     list_t	*lst,
769     smb_slist_t	*sl)
770 {
771 	uint32_t	rv;
772 
773 	mutex_enter(&sl->sl_mutex);
774 	rv = sl->sl_count;
775 	if (sl->sl_count) {
776 		list_move_tail(lst, &sl->sl_list);
777 		sl->sl_count = 0;
778 		if (sl->sl_waiting) {
779 			sl->sl_waiting = B_FALSE;
780 			cv_broadcast(&sl->sl_cv);
781 		}
782 	}
783 	mutex_exit(&sl->sl_mutex);
784 	return (rv);
785 }
786 
787 /*
788  * smb_slist_obj_move
789  *
790  * This function moves an object from one list to the end of the other list. It
791  * assumes the mutex of each list has been entered.
792  */
793 void
794 smb_slist_obj_move(
795     smb_slist_t	*dst,
796     smb_slist_t	*src,
797     void	*obj)
798 {
799 	ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
800 	ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
801 
802 	list_remove(&src->sl_list, obj);
803 	list_insert_tail(&dst->sl_list, obj);
804 	dst->sl_count++;
805 	src->sl_count--;
806 	if ((src->sl_count == 0) && (src->sl_waiting)) {
807 		src->sl_waiting = B_FALSE;
808 		cv_broadcast(&src->sl_cv);
809 	}
810 }
811 
812 /*
813  * smb_slist_wait_for_empty
814  *
815  * This function waits for a list to be emptied.
816  */
817 void
818 smb_slist_wait_for_empty(
819     smb_slist_t	*sl)
820 {
821 	mutex_enter(&sl->sl_mutex);
822 	while (sl->sl_count) {
823 		sl->sl_waiting = B_TRUE;
824 		cv_wait(&sl->sl_cv, &sl->sl_mutex);
825 	}
826 	mutex_exit(&sl->sl_mutex);
827 }
828 
829 /*
830  * smb_slist_exit
831  *
832  * This function exits the muetx of the list and signal the condition variable
833  * if the list is empty.
834  */
835 void
836 smb_slist_exit(smb_slist_t *sl)
837 {
838 	if ((sl->sl_count == 0) && (sl->sl_waiting)) {
839 		sl->sl_waiting = B_FALSE;
840 		cv_broadcast(&sl->sl_cv);
841 	}
842 	mutex_exit(&sl->sl_mutex);
843 }
844 
845 /*
846  * smb_thread_entry_point
847  *
848  * Common entry point for all the threads created through smb_thread_start.
849  * The state of the thread is set to "running" at the beginning and moved to
850  * "exiting" just before calling thread_exit(). The condition variable is
851  *  also signaled.
852  */
853 static void
854 smb_thread_entry_point(
855     smb_thread_t	*thread)
856 {
857 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
858 	mutex_enter(&thread->sth_mtx);
859 	ASSERT(thread->sth_state == SMB_THREAD_STATE_STARTING);
860 	thread->sth_th = curthread;
861 	thread->sth_did = thread->sth_th->t_did;
862 
863 	if (!thread->sth_kill) {
864 		thread->sth_state = SMB_THREAD_STATE_RUNNING;
865 		cv_signal(&thread->sth_cv);
866 		mutex_exit(&thread->sth_mtx);
867 		thread->sth_ep(thread, thread->sth_ep_arg);
868 		mutex_enter(&thread->sth_mtx);
869 	}
870 	thread->sth_th = NULL;
871 	thread->sth_state = SMB_THREAD_STATE_EXITING;
872 	cv_broadcast(&thread->sth_cv);
873 	mutex_exit(&thread->sth_mtx);
874 	thread_exit();
875 }
876 
877 /*
878  * smb_thread_init
879  */
880 void
881 smb_thread_init(
882     smb_thread_t	*thread,
883     char		*name,
884     smb_thread_ep_t	ep,
885     void		*ep_arg,
886     smb_thread_aw_t	aw,
887     void		*aw_arg)
888 {
889 	ASSERT(thread->sth_magic != SMB_THREAD_MAGIC);
890 
891 	bzero(thread, sizeof (*thread));
892 
893 	(void) strlcpy(thread->sth_name, name, sizeof (thread->sth_name));
894 	thread->sth_ep = ep;
895 	thread->sth_ep_arg = ep_arg;
896 	thread->sth_aw = aw;
897 	thread->sth_aw_arg = aw_arg;
898 	thread->sth_state = SMB_THREAD_STATE_EXITED;
899 	mutex_init(&thread->sth_mtx, NULL, MUTEX_DEFAULT, NULL);
900 	cv_init(&thread->sth_cv, NULL, CV_DEFAULT, NULL);
901 	thread->sth_magic = SMB_THREAD_MAGIC;
902 }
903 
904 /*
905  * smb_thread_destroy
906  */
907 void
908 smb_thread_destroy(
909     smb_thread_t	*thread)
910 {
911 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
912 	ASSERT(thread->sth_state == SMB_THREAD_STATE_EXITED);
913 	thread->sth_magic = 0;
914 	mutex_destroy(&thread->sth_mtx);
915 	cv_destroy(&thread->sth_cv);
916 }
917 
918 /*
919  * smb_thread_start
920  *
921  * This function starts a thread with the parameters provided. It waits until
922  * the state of the thread has been moved to running.
923  */
924 /*ARGSUSED*/
925 int
926 smb_thread_start(
927     smb_thread_t	*thread)
928 {
929 	int		rc = 0;
930 	kthread_t	*tmpthread;
931 
932 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
933 
934 	mutex_enter(&thread->sth_mtx);
935 	switch (thread->sth_state) {
936 	case SMB_THREAD_STATE_EXITED:
937 		thread->sth_state = SMB_THREAD_STATE_STARTING;
938 		mutex_exit(&thread->sth_mtx);
939 		tmpthread = thread_create(NULL, 0, smb_thread_entry_point,
940 		    thread, 0, &p0, TS_RUN, minclsyspri);
941 		ASSERT(tmpthread != NULL);
942 		mutex_enter(&thread->sth_mtx);
943 		while (thread->sth_state == SMB_THREAD_STATE_STARTING)
944 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
945 		if (thread->sth_state != SMB_THREAD_STATE_RUNNING)
946 			rc = -1;
947 		break;
948 	default:
949 		ASSERT(0);
950 		rc = -1;
951 		break;
952 	}
953 	mutex_exit(&thread->sth_mtx);
954 	return (rc);
955 }
956 
957 /*
958  * smb_thread_stop
959  *
960  * This function signals a thread to kill itself and waits until the "exiting"
961  * state has been reached.
962  */
963 void
964 smb_thread_stop(
965     smb_thread_t	*thread)
966 {
967 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
968 
969 	mutex_enter(&thread->sth_mtx);
970 	switch (thread->sth_state) {
971 	case SMB_THREAD_STATE_RUNNING:
972 	case SMB_THREAD_STATE_STARTING:
973 		if (!thread->sth_kill) {
974 			thread->sth_kill = B_TRUE;
975 			if (thread->sth_aw)
976 				thread->sth_aw(thread, thread->sth_aw_arg);
977 			cv_broadcast(&thread->sth_cv);
978 			while (thread->sth_state != SMB_THREAD_STATE_EXITING)
979 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
980 			mutex_exit(&thread->sth_mtx);
981 			thread_join(thread->sth_did);
982 			mutex_enter(&thread->sth_mtx);
983 			thread->sth_state = SMB_THREAD_STATE_EXITED;
984 			thread->sth_did = 0;
985 			thread->sth_kill = B_FALSE;
986 			cv_broadcast(&thread->sth_cv);
987 			break;
988 		}
989 		/*FALLTHRU*/
990 
991 	case SMB_THREAD_STATE_EXITING:
992 		if (thread->sth_kill) {
993 			while (thread->sth_state != SMB_THREAD_STATE_EXITED)
994 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
995 		} else {
996 			thread->sth_state = SMB_THREAD_STATE_EXITED;
997 			thread->sth_did = 0;
998 		}
999 		break;
1000 
1001 	case SMB_THREAD_STATE_EXITED:
1002 		break;
1003 
1004 	default:
1005 		ASSERT(0);
1006 		break;
1007 	}
1008 	mutex_exit(&thread->sth_mtx);
1009 }
1010 
1011 /*
1012  * smb_thread_signal
1013  *
1014  * This function signals a thread.
1015  */
1016 void
1017 smb_thread_signal(
1018     smb_thread_t	*thread)
1019 {
1020 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1021 
1022 	mutex_enter(&thread->sth_mtx);
1023 	switch (thread->sth_state) {
1024 	case SMB_THREAD_STATE_RUNNING:
1025 		if (thread->sth_aw)
1026 			thread->sth_aw(thread, thread->sth_aw_arg);
1027 		cv_signal(&thread->sth_cv);
1028 		break;
1029 
1030 	default:
1031 		break;
1032 	}
1033 	mutex_exit(&thread->sth_mtx);
1034 }
1035 
1036 boolean_t
1037 smb_thread_continue(smb_thread_t *thread)
1038 {
1039 	boolean_t result;
1040 
1041 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1042 
1043 	mutex_enter(&thread->sth_mtx);
1044 	result = smb_thread_continue_timedwait_locked(thread, 0);
1045 	mutex_exit(&thread->sth_mtx);
1046 
1047 	return (result);
1048 }
1049 
1050 boolean_t
1051 smb_thread_continue_nowait(smb_thread_t *thread)
1052 {
1053 	boolean_t result;
1054 
1055 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1056 
1057 	mutex_enter(&thread->sth_mtx);
1058 	/*
1059 	 * Setting ticks=-1 requests a non-blocking check.  We will
1060 	 * still block if the thread is in "suspend" state.
1061 	 */
1062 	result = smb_thread_continue_timedwait_locked(thread, -1);
1063 	mutex_exit(&thread->sth_mtx);
1064 
1065 	return (result);
1066 }
1067 
1068 boolean_t
1069 smb_thread_continue_timedwait(smb_thread_t *thread, int seconds)
1070 {
1071 	boolean_t result;
1072 
1073 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1074 
1075 	mutex_enter(&thread->sth_mtx);
1076 	result = smb_thread_continue_timedwait_locked(thread,
1077 	    SEC_TO_TICK(seconds));
1078 	mutex_exit(&thread->sth_mtx);
1079 
1080 	return (result);
1081 }
1082 
1083 /*
1084  * smb_thread_continue_timedwait_locked
1085  *
1086  * Internal only.  Ticks==-1 means don't block, Ticks == 0 means wait
1087  * indefinitely
1088  */
1089 static boolean_t
1090 smb_thread_continue_timedwait_locked(smb_thread_t *thread, int ticks)
1091 {
1092 	boolean_t	result;
1093 
1094 	/* -1 means don't block */
1095 	if (ticks != -1 && !thread->sth_kill) {
1096 		if (ticks == 0) {
1097 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
1098 		} else {
1099 			(void) cv_reltimedwait(&thread->sth_cv,
1100 			    &thread->sth_mtx, (clock_t)ticks, TR_CLOCK_TICK);
1101 		}
1102 	}
1103 	result = (thread->sth_kill == 0);
1104 
1105 	return (result);
1106 }
1107 
1108 void
1109 smb_thread_set_awaken(smb_thread_t *thread, smb_thread_aw_t new_aw_fn,
1110     void *new_aw_arg)
1111 {
1112 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1113 
1114 	mutex_enter(&thread->sth_mtx);
1115 	thread->sth_aw = new_aw_fn;
1116 	thread->sth_aw_arg = new_aw_arg;
1117 	mutex_exit(&thread->sth_mtx);
1118 }
1119 
1120 /*
1121  * smb_rwx_init
1122  */
1123 void
1124 smb_rwx_init(
1125     smb_rwx_t	*rwx)
1126 {
1127 	bzero(rwx, sizeof (smb_rwx_t));
1128 	cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
1129 	mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
1130 	rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
1131 }
1132 
1133 /*
1134  * smb_rwx_destroy
1135  */
1136 void
1137 smb_rwx_destroy(
1138     smb_rwx_t	*rwx)
1139 {
1140 	mutex_destroy(&rwx->rwx_mutex);
1141 	cv_destroy(&rwx->rwx_cv);
1142 	rw_destroy(&rwx->rwx_lock);
1143 }
1144 
1145 /*
1146  * smb_rwx_rwexit
1147  */
1148 void
1149 smb_rwx_rwexit(
1150     smb_rwx_t	*rwx)
1151 {
1152 	if (rw_write_held(&rwx->rwx_lock)) {
1153 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1154 		mutex_enter(&rwx->rwx_mutex);
1155 		if (rwx->rwx_waiting) {
1156 			rwx->rwx_waiting = B_FALSE;
1157 			cv_broadcast(&rwx->rwx_cv);
1158 		}
1159 		mutex_exit(&rwx->rwx_mutex);
1160 	}
1161 	rw_exit(&rwx->rwx_lock);
1162 }
1163 
1164 /*
1165  * smb_rwx_rwupgrade
1166  */
1167 krw_t
1168 smb_rwx_rwupgrade(
1169     smb_rwx_t	*rwx)
1170 {
1171 	if (rw_write_held(&rwx->rwx_lock)) {
1172 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1173 		return (RW_WRITER);
1174 	}
1175 	if (!rw_tryupgrade(&rwx->rwx_lock)) {
1176 		rw_exit(&rwx->rwx_lock);
1177 		rw_enter(&rwx->rwx_lock, RW_WRITER);
1178 	}
1179 	return (RW_READER);
1180 }
1181 
1182 /*
1183  * smb_rwx_rwrestore
1184  */
1185 void
1186 smb_rwx_rwdowngrade(
1187     smb_rwx_t	*rwx,
1188     krw_t	mode)
1189 {
1190 	ASSERT(rw_write_held(&rwx->rwx_lock));
1191 	ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1192 
1193 	if (mode == RW_WRITER) {
1194 		return;
1195 	}
1196 	ASSERT(mode == RW_READER);
1197 	mutex_enter(&rwx->rwx_mutex);
1198 	if (rwx->rwx_waiting) {
1199 		rwx->rwx_waiting = B_FALSE;
1200 		cv_broadcast(&rwx->rwx_cv);
1201 	}
1202 	mutex_exit(&rwx->rwx_mutex);
1203 	rw_downgrade(&rwx->rwx_lock);
1204 }
1205 
1206 /*
1207  * smb_rwx_wait
1208  *
1209  * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER
1210  * mode. It will:
1211  *
1212  *	1) release the lock and save its current mode.
1213  *	2) wait until the condition variable is signaled. This can happen for
1214  *	   2 reasons: When a writer releases the lock or when the time out (if
1215  *	   provided) expires.
1216  *	3) re-acquire the lock in the mode saved in (1).
1217  */
1218 int
1219 smb_rwx_rwwait(
1220     smb_rwx_t	*rwx,
1221     clock_t	timeout)
1222 {
1223 	int	rc;
1224 	krw_t	mode;
1225 
1226 	mutex_enter(&rwx->rwx_mutex);
1227 	rwx->rwx_waiting = B_TRUE;
1228 	mutex_exit(&rwx->rwx_mutex);
1229 
1230 	if (rw_write_held(&rwx->rwx_lock)) {
1231 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1232 		mode = RW_WRITER;
1233 	} else {
1234 		ASSERT(rw_read_held(&rwx->rwx_lock));
1235 		mode = RW_READER;
1236 	}
1237 	rw_exit(&rwx->rwx_lock);
1238 
1239 	mutex_enter(&rwx->rwx_mutex);
1240 	if (rwx->rwx_waiting) {
1241 		if (timeout == -1) {
1242 			rc = 1;
1243 			cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
1244 		} else {
1245 			rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
1246 			    timeout, TR_CLOCK_TICK);
1247 		}
1248 	}
1249 	mutex_exit(&rwx->rwx_mutex);
1250 
1251 	rw_enter(&rwx->rwx_lock, mode);
1252 	return (rc);
1253 }
1254 
1255 /*
1256  * SMB ID mapping
1257  *
1258  * Solaris ID mapping service (aka Winchester) works with domain SIDs
1259  * and RIDs where domain SIDs are in string format. CIFS service works
1260  * with binary SIDs understandable by CIFS clients. A layer of SMB ID
1261  * mapping functions are implemeted to hide the SID conversion details
1262  * and also hide the handling of array of batch mapping requests.
1263  *
1264  * IMPORTANT NOTE The Winchester API requires a zone. Because CIFS server
1265  * currently only runs in the global zone the global zone is specified.
1266  * This needs to be fixed when the CIFS server supports zones.
1267  */
1268 
1269 static int smb_idmap_batch_binsid(smb_idmap_batch_t *sib);
1270 
1271 /*
1272  * smb_idmap_getid
1273  *
1274  * Maps the given Windows SID to a Solaris ID using the
1275  * simple mapping API.
1276  */
1277 idmap_stat
1278 smb_idmap_getid(smb_sid_t *sid, uid_t *id, int *idtype)
1279 {
1280 	smb_idmap_t sim;
1281 	char sidstr[SMB_SID_STRSZ];
1282 
1283 	smb_sid_tostr(sid, sidstr);
1284 	if (smb_sid_splitstr(sidstr, &sim.sim_rid) != 0)
1285 		return (IDMAP_ERR_SID);
1286 	sim.sim_domsid = sidstr;
1287 	sim.sim_id = id;
1288 
1289 	switch (*idtype) {
1290 	case SMB_IDMAP_USER:
1291 		sim.sim_stat = kidmap_getuidbysid(global_zone, sim.sim_domsid,
1292 		    sim.sim_rid, sim.sim_id);
1293 		break;
1294 
1295 	case SMB_IDMAP_GROUP:
1296 		sim.sim_stat = kidmap_getgidbysid(global_zone, sim.sim_domsid,
1297 		    sim.sim_rid, sim.sim_id);
1298 		break;
1299 
1300 	case SMB_IDMAP_UNKNOWN:
1301 		sim.sim_stat = kidmap_getpidbysid(global_zone, sim.sim_domsid,
1302 		    sim.sim_rid, sim.sim_id, &sim.sim_idtype);
1303 		break;
1304 
1305 	default:
1306 		ASSERT(0);
1307 		return (IDMAP_ERR_ARG);
1308 	}
1309 
1310 	*idtype = sim.sim_idtype;
1311 
1312 	return (sim.sim_stat);
1313 }
1314 
1315 /*
1316  * smb_idmap_getsid
1317  *
1318  * Maps the given Solaris ID to a Windows SID using the
1319  * simple mapping API.
1320  */
1321 idmap_stat
1322 smb_idmap_getsid(uid_t id, int idtype, smb_sid_t **sid)
1323 {
1324 	smb_idmap_t sim;
1325 
1326 	switch (idtype) {
1327 	case SMB_IDMAP_USER:
1328 		sim.sim_stat = kidmap_getsidbyuid(global_zone, id,
1329 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1330 		break;
1331 
1332 	case SMB_IDMAP_GROUP:
1333 		sim.sim_stat = kidmap_getsidbygid(global_zone, id,
1334 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1335 		break;
1336 
1337 	case SMB_IDMAP_EVERYONE:
1338 		/* Everyone S-1-1-0 */
1339 		sim.sim_domsid = "S-1-1";
1340 		sim.sim_rid = 0;
1341 		sim.sim_stat = IDMAP_SUCCESS;
1342 		break;
1343 
1344 	default:
1345 		ASSERT(0);
1346 		return (IDMAP_ERR_ARG);
1347 	}
1348 
1349 	if (sim.sim_stat != IDMAP_SUCCESS)
1350 		return (sim.sim_stat);
1351 
1352 	if (sim.sim_domsid == NULL)
1353 		return (IDMAP_ERR_NOMAPPING);
1354 
1355 	sim.sim_sid = smb_sid_fromstr(sim.sim_domsid);
1356 	if (sim.sim_sid == NULL)
1357 		return (IDMAP_ERR_INTERNAL);
1358 
1359 	*sid = smb_sid_splice(sim.sim_sid, sim.sim_rid);
1360 	smb_sid_free(sim.sim_sid);
1361 	if (*sid == NULL)
1362 		sim.sim_stat = IDMAP_ERR_INTERNAL;
1363 
1364 	return (sim.sim_stat);
1365 }
1366 
1367 /*
1368  * smb_idmap_batch_create
1369  *
1370  * Creates and initializes the context for batch ID mapping.
1371  */
1372 idmap_stat
1373 smb_idmap_batch_create(smb_idmap_batch_t *sib, uint16_t nmap, int flags)
1374 {
1375 	ASSERT(sib);
1376 
1377 	bzero(sib, sizeof (smb_idmap_batch_t));
1378 
1379 	sib->sib_idmaph = kidmap_get_create(global_zone);
1380 
1381 	sib->sib_flags = flags;
1382 	sib->sib_nmap = nmap;
1383 	sib->sib_size = nmap * sizeof (smb_idmap_t);
1384 	sib->sib_maps = kmem_zalloc(sib->sib_size, KM_SLEEP);
1385 
1386 	return (IDMAP_SUCCESS);
1387 }
1388 
1389 /*
1390  * smb_idmap_batch_destroy
1391  *
1392  * Frees the batch ID mapping context.
1393  * If ID mapping is Solaris -> Windows it frees memories
1394  * allocated for binary SIDs.
1395  */
1396 void
1397 smb_idmap_batch_destroy(smb_idmap_batch_t *sib)
1398 {
1399 	char *domsid;
1400 	int i;
1401 
1402 	ASSERT(sib);
1403 	ASSERT(sib->sib_maps);
1404 
1405 	if (sib->sib_idmaph)
1406 		kidmap_get_destroy(sib->sib_idmaph);
1407 
1408 	if (sib->sib_flags & SMB_IDMAP_ID2SID) {
1409 		/*
1410 		 * SIDs are allocated only when mapping
1411 		 * UID/GID to SIDs
1412 		 */
1413 		for (i = 0; i < sib->sib_nmap; i++)
1414 			smb_sid_free(sib->sib_maps[i].sim_sid);
1415 	} else if (sib->sib_flags & SMB_IDMAP_SID2ID) {
1416 		/*
1417 		 * SID prefixes are allocated only when mapping
1418 		 * SIDs to UID/GID
1419 		 */
1420 		for (i = 0; i < sib->sib_nmap; i++) {
1421 			domsid = sib->sib_maps[i].sim_domsid;
1422 			if (domsid)
1423 				smb_mem_free(domsid);
1424 		}
1425 	}
1426 
1427 	if (sib->sib_size && sib->sib_maps)
1428 		kmem_free(sib->sib_maps, sib->sib_size);
1429 }
1430 
1431 /*
1432  * smb_idmap_batch_getid
1433  *
1434  * Queue a request to map the given SID to a UID or GID.
1435  *
1436  * sim->sim_id should point to variable that's supposed to
1437  * hold the returned UID/GID. This needs to be setup by caller
1438  * of this function.
1439  *
1440  * If requested ID type is known, it's passed as 'idtype',
1441  * if it's unknown it'll be returned in sim->sim_idtype.
1442  */
1443 idmap_stat
1444 smb_idmap_batch_getid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1445     smb_sid_t *sid, int idtype)
1446 {
1447 	char strsid[SMB_SID_STRSZ];
1448 	idmap_stat idm_stat;
1449 
1450 	ASSERT(idmaph);
1451 	ASSERT(sim);
1452 	ASSERT(sid);
1453 
1454 	smb_sid_tostr(sid, strsid);
1455 	if (smb_sid_splitstr(strsid, &sim->sim_rid) != 0)
1456 		return (IDMAP_ERR_SID);
1457 	sim->sim_domsid = smb_mem_strdup(strsid);
1458 
1459 	switch (idtype) {
1460 	case SMB_IDMAP_USER:
1461 		idm_stat = kidmap_batch_getuidbysid(idmaph, sim->sim_domsid,
1462 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1463 		break;
1464 
1465 	case SMB_IDMAP_GROUP:
1466 		idm_stat = kidmap_batch_getgidbysid(idmaph, sim->sim_domsid,
1467 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1468 		break;
1469 
1470 	case SMB_IDMAP_UNKNOWN:
1471 		idm_stat = kidmap_batch_getpidbysid(idmaph, sim->sim_domsid,
1472 		    sim->sim_rid, sim->sim_id, &sim->sim_idtype,
1473 		    &sim->sim_stat);
1474 		break;
1475 
1476 	default:
1477 		ASSERT(0);
1478 		return (IDMAP_ERR_ARG);
1479 	}
1480 
1481 	return (idm_stat);
1482 }
1483 
1484 /*
1485  * smb_idmap_batch_getsid
1486  *
1487  * Queue a request to map the given UID/GID to a SID.
1488  *
1489  * sim->sim_domsid and sim->sim_rid will contain the mapping
1490  * result upon successful process of the batched request.
1491  */
1492 idmap_stat
1493 smb_idmap_batch_getsid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1494     uid_t id, int idtype)
1495 {
1496 	idmap_stat idm_stat;
1497 
1498 	switch (idtype) {
1499 	case SMB_IDMAP_USER:
1500 		idm_stat = kidmap_batch_getsidbyuid(idmaph, id,
1501 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1502 		    &sim->sim_stat);
1503 		break;
1504 
1505 	case SMB_IDMAP_GROUP:
1506 		idm_stat = kidmap_batch_getsidbygid(idmaph, id,
1507 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1508 		    &sim->sim_stat);
1509 		break;
1510 
1511 	case SMB_IDMAP_OWNERAT:
1512 		/* Current Owner S-1-5-32-766 */
1513 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1514 		sim->sim_rid = SECURITY_CURRENT_OWNER_RID;
1515 		sim->sim_stat = IDMAP_SUCCESS;
1516 		idm_stat = IDMAP_SUCCESS;
1517 		break;
1518 
1519 	case SMB_IDMAP_GROUPAT:
1520 		/* Current Group S-1-5-32-767 */
1521 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1522 		sim->sim_rid = SECURITY_CURRENT_GROUP_RID;
1523 		sim->sim_stat = IDMAP_SUCCESS;
1524 		idm_stat = IDMAP_SUCCESS;
1525 		break;
1526 
1527 	case SMB_IDMAP_EVERYONE:
1528 		/* Everyone S-1-1-0 */
1529 		sim->sim_domsid = NT_WORLD_AUTH_SIDSTR;
1530 		sim->sim_rid = 0;
1531 		sim->sim_stat = IDMAP_SUCCESS;
1532 		idm_stat = IDMAP_SUCCESS;
1533 		break;
1534 
1535 	default:
1536 		ASSERT(0);
1537 		return (IDMAP_ERR_ARG);
1538 	}
1539 
1540 	return (idm_stat);
1541 }
1542 
1543 /*
1544  * smb_idmap_batch_binsid
1545  *
1546  * Convert sidrids to binary sids
1547  *
1548  * Returns 0 if successful and non-zero upon failure.
1549  */
1550 static int
1551 smb_idmap_batch_binsid(smb_idmap_batch_t *sib)
1552 {
1553 	smb_sid_t *sid;
1554 	smb_idmap_t *sim;
1555 	int i;
1556 
1557 	if (sib->sib_flags & SMB_IDMAP_SID2ID)
1558 		/* This operation is not required */
1559 		return (0);
1560 
1561 	sim = sib->sib_maps;
1562 	for (i = 0; i < sib->sib_nmap; sim++, i++) {
1563 		ASSERT(sim->sim_domsid);
1564 		if (sim->sim_domsid == NULL)
1565 			return (1);
1566 
1567 		if ((sid = smb_sid_fromstr(sim->sim_domsid)) == NULL)
1568 			return (1);
1569 
1570 		sim->sim_sid = smb_sid_splice(sid, sim->sim_rid);
1571 		smb_sid_free(sid);
1572 	}
1573 
1574 	return (0);
1575 }
1576 
1577 /*
1578  * smb_idmap_batch_getmappings
1579  *
1580  * trigger ID mapping service to get the mappings for queued
1581  * requests.
1582  *
1583  * Checks the result of all the queued requests.
1584  * If this is a Solaris -> Windows mapping it generates
1585  * binary SIDs from returned (domsid, rid) pairs.
1586  */
1587 idmap_stat
1588 smb_idmap_batch_getmappings(smb_idmap_batch_t *sib)
1589 {
1590 	idmap_stat idm_stat = IDMAP_SUCCESS;
1591 	int i;
1592 
1593 	idm_stat = kidmap_get_mappings(sib->sib_idmaph);
1594 	if (idm_stat != IDMAP_SUCCESS)
1595 		return (idm_stat);
1596 
1597 	/*
1598 	 * Check the status for all the queued requests
1599 	 */
1600 	for (i = 0; i < sib->sib_nmap; i++) {
1601 		if (sib->sib_maps[i].sim_stat != IDMAP_SUCCESS)
1602 			return (sib->sib_maps[i].sim_stat);
1603 	}
1604 
1605 	if (smb_idmap_batch_binsid(sib) != 0)
1606 		idm_stat = IDMAP_ERR_OTHER;
1607 
1608 	return (idm_stat);
1609 }
1610 
1611 uint64_t
1612 smb_time_unix_to_nt(timestruc_t *unix_time)
1613 {
1614 	uint64_t nt_time;
1615 
1616 	if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
1617 		return (0);
1618 
1619 	nt_time = unix_time->tv_sec;
1620 	nt_time *= 10000000;  /* seconds to 100ns */
1621 	nt_time += unix_time->tv_nsec / 100;
1622 	return (nt_time + NT_TIME_BIAS);
1623 }
1624 
1625 void
1626 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
1627 {
1628 	uint32_t seconds;
1629 
1630 	ASSERT(unix_time);
1631 
1632 	if ((nt_time == 0) || (nt_time == -1)) {
1633 		unix_time->tv_sec = 0;
1634 		unix_time->tv_nsec = 0;
1635 		return;
1636 	}
1637 
1638 	nt_time -= NT_TIME_BIAS;
1639 	seconds = nt_time / 10000000;
1640 	unix_time->tv_sec = seconds;
1641 	unix_time->tv_nsec = (nt_time  % 10000000) * 100;
1642 }
1643 
1644 /*
1645  * smb_time_gmt_to_local, smb_time_local_to_gmt
1646  *
1647  * Apply the gmt offset to convert between local time and gmt
1648  */
1649 int32_t
1650 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
1651 {
1652 	if ((gmt == 0) || (gmt == -1))
1653 		return (0);
1654 
1655 	return (gmt - sr->sr_gmtoff);
1656 }
1657 
1658 int32_t
1659 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
1660 {
1661 	if ((local == 0) || (local == -1))
1662 		return (0);
1663 
1664 	return (local + sr->sr_gmtoff);
1665 }
1666 
1667 
1668 /*
1669  * smb_time_dos_to_unix
1670  *
1671  * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
1672  *
1673  * A date/time field of 0 means that that server file system
1674  * assigned value need not be changed. The behaviour when the
1675  * date/time field is set to -1 is not documented but is
1676  * generally treated like 0.
1677  * If date or time is 0 or -1 the unix time is returned as 0
1678  * so that the caller can identify and handle this special case.
1679  */
1680 int32_t
1681 smb_time_dos_to_unix(int16_t date, int16_t time)
1682 {
1683 	struct tm	atm;
1684 
1685 	if (((date == 0) || (time == 0)) ||
1686 	    ((date == -1) || (time == -1))) {
1687 		return (0);
1688 	}
1689 
1690 	atm.tm_year = ((date >>  9) & 0x3F) + 80;
1691 	atm.tm_mon  = ((date >>  5) & 0x0F) - 1;
1692 	atm.tm_mday = ((date >>  0) & 0x1F);
1693 	atm.tm_hour = ((time >> 11) & 0x1F);
1694 	atm.tm_min  = ((time >>  5) & 0x3F);
1695 	atm.tm_sec  = ((time >>  0) & 0x1F) << 1;
1696 
1697 	return (smb_timegm(&atm));
1698 }
1699 
1700 void
1701 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1702 {
1703 	struct tm	atm;
1704 	int		i;
1705 	time_t		tmp_time;
1706 
1707 	if (ux_time == 0) {
1708 		*date_p = 0;
1709 		*time_p = 0;
1710 		return;
1711 	}
1712 
1713 	tmp_time = (time_t)ux_time;
1714 	(void) smb_gmtime_r(&tmp_time, &atm);
1715 
1716 	if (date_p) {
1717 		i = 0;
1718 		i += atm.tm_year - 80;
1719 		i <<= 4;
1720 		i += atm.tm_mon + 1;
1721 		i <<= 5;
1722 		i += atm.tm_mday;
1723 
1724 		*date_p = (short)i;
1725 	}
1726 	if (time_p) {
1727 		i = 0;
1728 		i += atm.tm_hour;
1729 		i <<= 6;
1730 		i += atm.tm_min;
1731 		i <<= 5;
1732 		i += atm.tm_sec >> 1;
1733 
1734 		*time_p = (short)i;
1735 	}
1736 }
1737 
1738 
1739 /*
1740  * smb_gmtime_r
1741  *
1742  * Thread-safe version of smb_gmtime. Returns a null pointer if either
1743  * input parameter is a null pointer. Otherwise returns a pointer
1744  * to result.
1745  *
1746  * Day of the week calculation: the Epoch was a thursday.
1747  *
1748  * There are no timezone corrections so tm_isdst and tm_gmtoff are
1749  * always zero, and the zone is always WET.
1750  */
1751 struct tm *
1752 smb_gmtime_r(time_t *clock, struct tm *result)
1753 {
1754 	time_t tsec;
1755 	int year;
1756 	int month;
1757 	int sec_per_month;
1758 
1759 	if (clock == 0 || result == 0)
1760 		return (0);
1761 
1762 	bzero(result, sizeof (struct tm));
1763 	tsec = *clock;
1764 	tsec -= tzh_leapcnt;
1765 
1766 	result->tm_wday = tsec / SECSPERDAY;
1767 	result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1768 
1769 	year = EPOCH_YEAR;
1770 	while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1771 	    (SECSPERDAY * DAYSPERNYEAR))) {
1772 		if (isleap(year))
1773 			tsec -= SECSPERDAY * DAYSPERLYEAR;
1774 		else
1775 			tsec -= SECSPERDAY * DAYSPERNYEAR;
1776 
1777 		++year;
1778 	}
1779 
1780 	result->tm_year = year - TM_YEAR_BASE;
1781 	result->tm_yday = tsec / SECSPERDAY;
1782 
1783 	for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1784 		sec_per_month = days_in_month[month] * SECSPERDAY;
1785 
1786 		if (month == TM_FEBRUARY && isleap(year))
1787 			sec_per_month += SECSPERDAY;
1788 
1789 		if (tsec < sec_per_month)
1790 			break;
1791 
1792 		tsec -= sec_per_month;
1793 	}
1794 
1795 	result->tm_mon = month;
1796 	result->tm_mday = (tsec / SECSPERDAY) + 1;
1797 	tsec %= SECSPERDAY;
1798 	result->tm_sec = tsec % 60;
1799 	tsec /= 60;
1800 	result->tm_min = tsec % 60;
1801 	tsec /= 60;
1802 	result->tm_hour = (int)tsec;
1803 
1804 	return (result);
1805 }
1806 
1807 
1808 /*
1809  * smb_timegm
1810  *
1811  * Converts the broken-down time in tm to a time value, i.e. the number
1812  * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1813  * not a POSIX or ANSI function. Per the man page, the input values of
1814  * tm_wday and tm_yday are ignored and, as the input data is assumed to
1815  * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1816  *
1817  * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1818  * and tm_yday, and bring the other fields within normal range. I don't
1819  * think this is really how it should be done but it's convenient for
1820  * now.
1821  */
1822 time_t
1823 smb_timegm(struct tm *tm)
1824 {
1825 	time_t tsec;
1826 	int dd;
1827 	int mm;
1828 	int yy;
1829 	int year;
1830 
1831 	if (tm == 0)
1832 		return (-1);
1833 
1834 	year = tm->tm_year + TM_YEAR_BASE;
1835 	tsec = tzh_leapcnt;
1836 
1837 	for (yy = EPOCH_YEAR; yy < year; ++yy) {
1838 		if (isleap(yy))
1839 			tsec += SECSPERDAY * DAYSPERLYEAR;
1840 		else
1841 			tsec += SECSPERDAY * DAYSPERNYEAR;
1842 	}
1843 
1844 	for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1845 		dd = days_in_month[mm] * SECSPERDAY;
1846 
1847 		if (mm == TM_FEBRUARY && isleap(year))
1848 			dd += SECSPERDAY;
1849 
1850 		tsec += dd;
1851 	}
1852 
1853 	tsec += (tm->tm_mday - 1) * SECSPERDAY;
1854 	tsec += tm->tm_sec;
1855 	tsec += tm->tm_min * SECSPERMIN;
1856 	tsec += tm->tm_hour * SECSPERHOUR;
1857 
1858 	tm->tm_isdst = 0;
1859 	(void) smb_gmtime_r(&tsec, tm);
1860 	return (tsec);
1861 }
1862 
1863 /*
1864  * smb_pad_align
1865  *
1866  * Returns the number of bytes required to pad an offset to the
1867  * specified alignment.
1868  */
1869 uint32_t
1870 smb_pad_align(uint32_t offset, uint32_t align)
1871 {
1872 	uint32_t pad = offset % align;
1873 
1874 	if (pad != 0)
1875 		pad = align - pad;
1876 
1877 	return (pad);
1878 }
1879 
1880 /*
1881  * smb_panic
1882  *
1883  * Logs the file name, function name and line number passed in and panics the
1884  * system.
1885  */
1886 void
1887 smb_panic(char *file, const char *func, int line)
1888 {
1889 	cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
1890 }
1891 
1892 /*
1893  * Creates an AVL tree and initializes the given smb_avl_t
1894  * structure using the passed args
1895  */
1896 void
1897 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset, smb_avl_nops_t *ops)
1898 {
1899 	ASSERT(avl);
1900 	ASSERT(ops);
1901 
1902 	rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL);
1903 	mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL);
1904 
1905 	avl->avl_nops = ops;
1906 	avl->avl_state = SMB_AVL_STATE_READY;
1907 	avl->avl_refcnt = 0;
1908 	(void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence,
1909 	    sizeof (uint32_t));
1910 
1911 	avl_create(&avl->avl_tree, ops->avln_cmp, size, offset);
1912 }
1913 
1914 /*
1915  * Destroys the specified AVL tree.
1916  * It waits for all the in-flight operations to finish
1917  * before destroying the AVL.
1918  */
1919 void
1920 smb_avl_destroy(smb_avl_t *avl)
1921 {
1922 	void *cookie = NULL;
1923 	void *node;
1924 
1925 	ASSERT(avl);
1926 
1927 	mutex_enter(&avl->avl_mutex);
1928 	if (avl->avl_state != SMB_AVL_STATE_READY) {
1929 		mutex_exit(&avl->avl_mutex);
1930 		return;
1931 	}
1932 
1933 	avl->avl_state = SMB_AVL_STATE_DESTROYING;
1934 
1935 	while (avl->avl_refcnt > 0)
1936 		(void) cv_wait(&avl->avl_cv, &avl->avl_mutex);
1937 	mutex_exit(&avl->avl_mutex);
1938 
1939 	rw_enter(&avl->avl_lock, RW_WRITER);
1940 	while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL)
1941 		avl->avl_nops->avln_destroy(node);
1942 
1943 	avl_destroy(&avl->avl_tree);
1944 	rw_exit(&avl->avl_lock);
1945 
1946 	rw_destroy(&avl->avl_lock);
1947 
1948 	mutex_destroy(&avl->avl_mutex);
1949 	bzero(avl, sizeof (smb_avl_t));
1950 }
1951 
1952 /*
1953  * Adds the given item to the AVL if it's
1954  * not already there.
1955  *
1956  * Returns:
1957  *
1958  * 	ENOTACTIVE	AVL is not in READY state
1959  * 	EEXIST		The item is already in AVL
1960  */
1961 int
1962 smb_avl_add(smb_avl_t *avl, void *item)
1963 {
1964 	avl_index_t where;
1965 
1966 	ASSERT(avl);
1967 	ASSERT(item);
1968 
1969 	if (!smb_avl_hold(avl))
1970 		return (ENOTACTIVE);
1971 
1972 	rw_enter(&avl->avl_lock, RW_WRITER);
1973 	if (avl_find(&avl->avl_tree, item, &where) != NULL) {
1974 		rw_exit(&avl->avl_lock);
1975 		smb_avl_rele(avl);
1976 		return (EEXIST);
1977 	}
1978 
1979 	avl_insert(&avl->avl_tree, item, where);
1980 	avl->avl_sequence++;
1981 	rw_exit(&avl->avl_lock);
1982 
1983 	smb_avl_rele(avl);
1984 	return (0);
1985 }
1986 
1987 /*
1988  * Removes the given item from the AVL.
1989  * If no reference is left on the item
1990  * it will also be destroyed by calling the
1991  * registered destroy operation.
1992  */
1993 void
1994 smb_avl_remove(smb_avl_t *avl, void *item)
1995 {
1996 	avl_index_t where;
1997 	void *rm_item;
1998 
1999 	ASSERT(avl);
2000 	ASSERT(item);
2001 
2002 	if (!smb_avl_hold(avl))
2003 		return;
2004 
2005 	rw_enter(&avl->avl_lock, RW_WRITER);
2006 	if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) {
2007 		rw_exit(&avl->avl_lock);
2008 		smb_avl_rele(avl);
2009 		return;
2010 	}
2011 
2012 	avl_remove(&avl->avl_tree, rm_item);
2013 	if (avl->avl_nops->avln_rele(rm_item))
2014 		avl->avl_nops->avln_destroy(rm_item);
2015 	avl->avl_sequence++;
2016 	rw_exit(&avl->avl_lock);
2017 
2018 	smb_avl_rele(avl);
2019 }
2020 
2021 /*
2022  * Looks up the AVL for the given item.
2023  * If the item is found a hold on the object
2024  * is taken before the pointer to it is
2025  * returned to the caller. The caller MUST
2026  * always call smb_avl_release() after it's done
2027  * using the returned object to release the hold
2028  * taken on the object.
2029  */
2030 void *
2031 smb_avl_lookup(smb_avl_t *avl, void *item)
2032 {
2033 	void *node = NULL;
2034 
2035 	ASSERT(avl);
2036 	ASSERT(item);
2037 
2038 	if (!smb_avl_hold(avl))
2039 		return (NULL);
2040 
2041 	rw_enter(&avl->avl_lock, RW_READER);
2042 	node = avl_find(&avl->avl_tree, item, NULL);
2043 	if (node != NULL)
2044 		avl->avl_nops->avln_hold(node);
2045 	rw_exit(&avl->avl_lock);
2046 
2047 	if (node == NULL)
2048 		smb_avl_rele(avl);
2049 
2050 	return (node);
2051 }
2052 
2053 /*
2054  * The hold on the given object is released.
2055  * This function MUST always be called after
2056  * smb_avl_lookup() and smb_avl_iterate() for
2057  * the returned object.
2058  *
2059  * If AVL is in DESTROYING state, the destroying
2060  * thread will be notified.
2061  */
2062 void
2063 smb_avl_release(smb_avl_t *avl, void *item)
2064 {
2065 	ASSERT(avl);
2066 	ASSERT(item);
2067 
2068 	if (avl->avl_nops->avln_rele(item))
2069 		avl->avl_nops->avln_destroy(item);
2070 
2071 	smb_avl_rele(avl);
2072 }
2073 
2074 /*
2075  * Initializes the given cursor for the AVL.
2076  * The cursor will be used to iterate through the AVL
2077  */
2078 void
2079 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor)
2080 {
2081 	ASSERT(avl);
2082 	ASSERT(cursor);
2083 
2084 	cursor->avlc_next = NULL;
2085 	cursor->avlc_sequence = avl->avl_sequence;
2086 }
2087 
2088 /*
2089  * Iterates through the AVL using the given cursor.
2090  * It always starts at the beginning and then returns
2091  * a pointer to the next object on each subsequent call.
2092  *
2093  * If a new object is added to or removed from the AVL
2094  * between two calls to this function, the iteration
2095  * will terminate prematurely.
2096  *
2097  * The caller MUST always call smb_avl_release() after it's
2098  * done using the returned object to release the hold taken
2099  * on the object.
2100  */
2101 void *
2102 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor)
2103 {
2104 	void *node;
2105 
2106 	ASSERT(avl);
2107 	ASSERT(cursor);
2108 
2109 	if (!smb_avl_hold(avl))
2110 		return (NULL);
2111 
2112 	rw_enter(&avl->avl_lock, RW_READER);
2113 	if (cursor->avlc_sequence != avl->avl_sequence) {
2114 		rw_exit(&avl->avl_lock);
2115 		smb_avl_rele(avl);
2116 		return (NULL);
2117 	}
2118 
2119 	if (cursor->avlc_next == NULL)
2120 		node = avl_first(&avl->avl_tree);
2121 	else
2122 		node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next);
2123 
2124 	if (node != NULL)
2125 		avl->avl_nops->avln_hold(node);
2126 
2127 	cursor->avlc_next = node;
2128 	rw_exit(&avl->avl_lock);
2129 
2130 	if (node == NULL)
2131 		smb_avl_rele(avl);
2132 
2133 	return (node);
2134 }
2135 
2136 /*
2137  * Increments the AVL reference count in order to
2138  * prevent the avl from being destroyed while it's
2139  * being accessed.
2140  */
2141 static boolean_t
2142 smb_avl_hold(smb_avl_t *avl)
2143 {
2144 	mutex_enter(&avl->avl_mutex);
2145 	if (avl->avl_state != SMB_AVL_STATE_READY) {
2146 		mutex_exit(&avl->avl_mutex);
2147 		return (B_FALSE);
2148 	}
2149 	avl->avl_refcnt++;
2150 	mutex_exit(&avl->avl_mutex);
2151 
2152 	return (B_TRUE);
2153 }
2154 
2155 /*
2156  * Decrements the AVL reference count to release the
2157  * hold. If another thread is trying to destroy the
2158  * AVL and is waiting for the reference count to become
2159  * 0, it is signaled to wake up.
2160  */
2161 static void
2162 smb_avl_rele(smb_avl_t *avl)
2163 {
2164 	mutex_enter(&avl->avl_mutex);
2165 	ASSERT(avl->avl_refcnt > 0);
2166 	avl->avl_refcnt--;
2167 	if (avl->avl_state == SMB_AVL_STATE_DESTROYING)
2168 		cv_broadcast(&avl->avl_cv);
2169 	mutex_exit(&avl->avl_mutex);
2170 }
2171 
2172 /*
2173  * smb_latency_init
2174  */
2175 void
2176 smb_latency_init(smb_latency_t *lat)
2177 {
2178 	bzero(lat, sizeof (*lat));
2179 	mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
2180 }
2181 
2182 /*
2183  * smb_latency_destroy
2184  */
2185 void
2186 smb_latency_destroy(smb_latency_t *lat)
2187 {
2188 	mutex_destroy(&lat->ly_mutex);
2189 }
2190 
2191 /*
2192  * smb_latency_add_sample
2193  *
2194  * Uses the new sample to calculate the new mean and standard deviation. The
2195  * sample must be a scaled value.
2196  */
2197 void
2198 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample)
2199 {
2200 	hrtime_t	a_mean;
2201 	hrtime_t	d_mean;
2202 
2203 	mutex_enter(&lat->ly_mutex);
2204 	lat->ly_a_nreq++;
2205 	lat->ly_a_sum += sample;
2206 	if (lat->ly_a_nreq != 0) {
2207 		a_mean = lat->ly_a_sum / lat->ly_a_nreq;
2208 		lat->ly_a_stddev =
2209 		    (sample - a_mean) * (sample - lat->ly_a_mean);
2210 		lat->ly_a_mean = a_mean;
2211 	}
2212 	lat->ly_d_nreq++;
2213 	lat->ly_d_sum += sample;
2214 	if (lat->ly_d_nreq != 0) {
2215 		d_mean = lat->ly_d_sum / lat->ly_d_nreq;
2216 		lat->ly_d_stddev =
2217 		    (sample - d_mean) * (sample - lat->ly_d_mean);
2218 		lat->ly_d_mean = d_mean;
2219 	}
2220 	mutex_exit(&lat->ly_mutex);
2221 }
2222 
2223 /*
2224  * smb_srqueue_init
2225  */
2226 void
2227 smb_srqueue_init(smb_srqueue_t *srq)
2228 {
2229 	bzero(srq, sizeof (*srq));
2230 	mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
2231 	srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled();
2232 }
2233 
2234 /*
2235  * smb_srqueue_destroy
2236  */
2237 void
2238 smb_srqueue_destroy(smb_srqueue_t *srq)
2239 {
2240 	mutex_destroy(&srq->srq_mutex);
2241 }
2242 
2243 /*
2244  * smb_srqueue_waitq_enter
2245  */
2246 void
2247 smb_srqueue_waitq_enter(smb_srqueue_t *srq)
2248 {
2249 	hrtime_t	new;
2250 	hrtime_t	delta;
2251 	uint32_t	wcnt;
2252 
2253 	mutex_enter(&srq->srq_mutex);
2254 	new = gethrtime_unscaled();
2255 	delta = new - srq->srq_wlastupdate;
2256 	srq->srq_wlastupdate = new;
2257 	wcnt = srq->srq_wcnt++;
2258 	if (wcnt != 0) {
2259 		srq->srq_wlentime += delta * wcnt;
2260 		srq->srq_wtime += delta;
2261 	}
2262 	mutex_exit(&srq->srq_mutex);
2263 }
2264 
2265 /*
2266  * smb_srqueue_runq_exit
2267  */
2268 void
2269 smb_srqueue_runq_exit(smb_srqueue_t *srq)
2270 {
2271 	hrtime_t	new;
2272 	hrtime_t	delta;
2273 	uint32_t	rcnt;
2274 
2275 	mutex_enter(&srq->srq_mutex);
2276 	new = gethrtime_unscaled();
2277 	delta = new - srq->srq_rlastupdate;
2278 	srq->srq_rlastupdate = new;
2279 	rcnt = srq->srq_rcnt--;
2280 	ASSERT(rcnt > 0);
2281 	srq->srq_rlentime += delta * rcnt;
2282 	srq->srq_rtime += delta;
2283 	mutex_exit(&srq->srq_mutex);
2284 }
2285 
2286 /*
2287  * smb_srqueue_waitq_to_runq
2288  */
2289 void
2290 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq)
2291 {
2292 	hrtime_t	new;
2293 	hrtime_t	delta;
2294 	uint32_t	wcnt;
2295 	uint32_t	rcnt;
2296 
2297 	mutex_enter(&srq->srq_mutex);
2298 	new = gethrtime_unscaled();
2299 	delta = new - srq->srq_wlastupdate;
2300 	srq->srq_wlastupdate = new;
2301 	wcnt = srq->srq_wcnt--;
2302 	ASSERT(wcnt > 0);
2303 	srq->srq_wlentime += delta * wcnt;
2304 	srq->srq_wtime += delta;
2305 	delta = new - srq->srq_rlastupdate;
2306 	srq->srq_rlastupdate = new;
2307 	rcnt = srq->srq_rcnt++;
2308 	if (rcnt != 0) {
2309 		srq->srq_rlentime += delta * rcnt;
2310 		srq->srq_rtime += delta;
2311 	}
2312 	mutex_exit(&srq->srq_mutex);
2313 }
2314 
2315 /*
2316  * smb_srqueue_update
2317  *
2318  * Takes a snapshot of the smb_sr_stat_t structure passed in.
2319  */
2320 void
2321 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd)
2322 {
2323 	hrtime_t	delta;
2324 	hrtime_t	snaptime;
2325 
2326 	mutex_enter(&srq->srq_mutex);
2327 	snaptime = gethrtime_unscaled();
2328 	delta = snaptime - srq->srq_wlastupdate;
2329 	srq->srq_wlastupdate = snaptime;
2330 	if (srq->srq_wcnt != 0) {
2331 		srq->srq_wlentime += delta * srq->srq_wcnt;
2332 		srq->srq_wtime += delta;
2333 	}
2334 	delta = snaptime - srq->srq_rlastupdate;
2335 	srq->srq_rlastupdate = snaptime;
2336 	if (srq->srq_rcnt != 0) {
2337 		srq->srq_rlentime += delta * srq->srq_rcnt;
2338 		srq->srq_rtime += delta;
2339 	}
2340 	kd->ku_rlentime = srq->srq_rlentime;
2341 	kd->ku_rtime = srq->srq_rtime;
2342 	kd->ku_wlentime = srq->srq_wlentime;
2343 	kd->ku_wtime = srq->srq_wtime;
2344 	mutex_exit(&srq->srq_mutex);
2345 	scalehrtime(&kd->ku_rlentime);
2346 	scalehrtime(&kd->ku_rtime);
2347 	scalehrtime(&kd->ku_wlentime);
2348 	scalehrtime(&kd->ku_wtime);
2349 }
2350