xref: /titanic_44/usr/src/uts/common/fs/smbsrv/smb_kutil.c (revision cf2fa554171e6c444f2d90f4a58aea8926c7ffae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/types.h>
29 #include <sys/tzfile.h>
30 #include <sys/atomic.h>
31 #include <sys/kidmap.h>
32 #include <sys/time.h>
33 #include <sys/spl.h>
34 #include <sys/cpuvar.h>
35 #include <sys/random.h>
36 #include <smbsrv/smb_kproto.h>
37 #include <smbsrv/smb_fsops.h>
38 #include <smbsrv/smbinfo.h>
39 #include <smbsrv/smb_xdr.h>
40 #include <smbsrv/smb_vops.h>
41 #include <smbsrv/smb_idmap.h>
42 
43 #include <sys/sid.h>
44 #include <sys/priv_names.h>
45 
46 static kmem_cache_t	*smb_dtor_cache = NULL;
47 
48 static boolean_t smb_thread_continue_timedwait_locked(smb_thread_t *, int);
49 
50 static boolean_t smb_avl_hold(smb_avl_t *);
51 static void smb_avl_rele(smb_avl_t *);
52 
53 time_t tzh_leapcnt = 0;
54 
55 struct tm
56 *smb_gmtime_r(time_t *clock, struct tm *result);
57 
58 time_t
59 smb_timegm(struct tm *tm);
60 
61 struct	tm {
62 	int	tm_sec;
63 	int	tm_min;
64 	int	tm_hour;
65 	int	tm_mday;
66 	int	tm_mon;
67 	int	tm_year;
68 	int	tm_wday;
69 	int	tm_yday;
70 	int	tm_isdst;
71 };
72 
73 static const int days_in_month[] = {
74 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
75 };
76 
77 int
78 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
79 {
80 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
81 		return (smb_wcequiv_strlen(str));
82 	return (strlen(str));
83 }
84 
85 int
86 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
87 {
88 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
89 		return (smb_wcequiv_strlen(str) + 2);
90 	return (strlen(str) + 1);
91 }
92 
93 int
94 smb_ascii_or_unicode_null_len(struct smb_request *sr)
95 {
96 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
97 		return (2);
98 	return (1);
99 }
100 
101 /*
102  *
103  * Convert old-style (DOS, LanMan) wildcard strings to NT style.
104  * This should ONLY happen to patterns that come from old clients,
105  * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12).
106  *
107  *	? is converted to >
108  *	* is converted to < if it is followed by .
109  *	. is converted to " if it is followed by ? or * or end of pattern
110  *
111  * Note: modifies pattern in place.
112  */
113 void
114 smb_convert_wildcards(char *pattern)
115 {
116 	char	*p;
117 
118 	for (p = pattern; *p != '\0'; p++) {
119 		switch (*p) {
120 		case '?':
121 			*p = '>';
122 			break;
123 		case '*':
124 			if (p[1] == '.')
125 				*p = '<';
126 			break;
127 		case '.':
128 			if (p[1] == '?' || p[1] == '*' || p[1] == '\0')
129 				*p = '\"';
130 			break;
131 		}
132 	}
133 }
134 
135 /*
136  * smb_sattr_check
137  *
138  * Check file attributes against a search attribute (sattr) mask.
139  *
140  * Normal files, which includes READONLY and ARCHIVE, always pass
141  * this check.  If the DIRECTORY, HIDDEN or SYSTEM special attributes
142  * are set then they must appear in the search mask.  The special
143  * attributes are inclusive, i.e. all special attributes that appear
144  * in sattr must also appear in the file attributes for the check to
145  * pass.
146  *
147  * The following examples show how this works:
148  *
149  *		fileA:	READONLY
150  *		fileB:	0 (no attributes = normal file)
151  *		fileC:	READONLY, ARCHIVE
152  *		fileD:	HIDDEN
153  *		fileE:	READONLY, HIDDEN, SYSTEM
154  *		dirA:	DIRECTORY
155  *
156  * search attribute: 0
157  *		Returns: fileA, fileB and fileC.
158  * search attribute: HIDDEN
159  *		Returns: fileA, fileB, fileC and fileD.
160  * search attribute: SYSTEM
161  *		Returns: fileA, fileB and fileC.
162  * search attribute: DIRECTORY
163  *		Returns: fileA, fileB, fileC and dirA.
164  * search attribute: HIDDEN and SYSTEM
165  *		Returns: fileA, fileB, fileC, fileD and fileE.
166  *
167  * Returns true if the file and sattr match; otherwise, returns false.
168  */
169 boolean_t
170 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
171 {
172 	if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
173 	    !(sattr & FILE_ATTRIBUTE_DIRECTORY))
174 		return (B_FALSE);
175 
176 	if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
177 	    !(sattr & FILE_ATTRIBUTE_HIDDEN))
178 		return (B_FALSE);
179 
180 	if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
181 	    !(sattr & FILE_ATTRIBUTE_SYSTEM))
182 		return (B_FALSE);
183 
184 	return (B_TRUE);
185 }
186 
187 int
188 microtime(timestruc_t *tvp)
189 {
190 	tvp->tv_sec = gethrestime_sec();
191 	tvp->tv_nsec = 0;
192 	return (0);
193 }
194 
195 int32_t
196 clock_get_milli_uptime()
197 {
198 	return (TICK_TO_MSEC(ddi_get_lbolt()));
199 }
200 
201 /*
202  * smb_idpool_increment
203  *
204  * This function increments the ID pool by doubling the current size. This
205  * function assumes the caller entered the mutex of the pool.
206  */
207 static int
208 smb_idpool_increment(
209     smb_idpool_t	*pool)
210 {
211 	uint8_t		*new_pool;
212 	uint32_t	new_size;
213 
214 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
215 
216 	new_size = pool->id_size * 2;
217 	if (new_size <= SMB_IDPOOL_MAX_SIZE) {
218 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
219 		if (new_pool) {
220 			bzero(new_pool, new_size / 8);
221 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
222 			kmem_free(pool->id_pool, pool->id_size / 8);
223 			pool->id_pool = new_pool;
224 			pool->id_free_counter += new_size - pool->id_size;
225 			pool->id_max_free_counter += new_size - pool->id_size;
226 			pool->id_size = new_size;
227 			pool->id_idx_msk = (new_size / 8) - 1;
228 			if (new_size >= SMB_IDPOOL_MAX_SIZE) {
229 				/* id -1 made unavailable */
230 				pool->id_pool[pool->id_idx_msk] = 0x80;
231 				pool->id_free_counter--;
232 				pool->id_max_free_counter--;
233 			}
234 			return (0);
235 		}
236 	}
237 	return (-1);
238 }
239 
240 /*
241  * smb_idpool_constructor
242  *
243  * This function initializes the pool structure provided.
244  */
245 int
246 smb_idpool_constructor(
247     smb_idpool_t	*pool)
248 {
249 
250 	ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
251 
252 	pool->id_size = SMB_IDPOOL_MIN_SIZE;
253 	pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
254 	pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
255 	pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
256 	pool->id_bit = 0x02;
257 	pool->id_bit_idx = 1;
258 	pool->id_idx = 0;
259 	pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
260 	    KM_SLEEP);
261 	bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
262 	/* -1 id made unavailable */
263 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
264 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
265 	pool->id_magic = SMB_IDPOOL_MAGIC;
266 	return (0);
267 }
268 
269 /*
270  * smb_idpool_destructor
271  *
272  * This function tears down and frees the resources associated with the
273  * pool provided.
274  */
275 void
276 smb_idpool_destructor(
277     smb_idpool_t	*pool)
278 {
279 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
280 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
281 	pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
282 	mutex_destroy(&pool->id_mutex);
283 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
284 }
285 
286 /*
287  * smb_idpool_alloc
288  *
289  * This function allocates an ID from the pool provided.
290  */
291 int
292 smb_idpool_alloc(
293     smb_idpool_t	*pool,
294     uint16_t		*id)
295 {
296 	uint32_t	i;
297 	uint8_t		bit;
298 	uint8_t		bit_idx;
299 	uint8_t		byte;
300 
301 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
302 
303 	mutex_enter(&pool->id_mutex);
304 	if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
305 		mutex_exit(&pool->id_mutex);
306 		return (-1);
307 	}
308 
309 	i = pool->id_size;
310 	while (i) {
311 		bit = pool->id_bit;
312 		bit_idx = pool->id_bit_idx;
313 		byte = pool->id_pool[pool->id_idx];
314 		while (bit) {
315 			if (byte & bit) {
316 				bit = bit << 1;
317 				bit_idx++;
318 				continue;
319 			}
320 			pool->id_pool[pool->id_idx] |= bit;
321 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
322 			pool->id_free_counter--;
323 			pool->id_bit = bit;
324 			pool->id_bit_idx = bit_idx;
325 			mutex_exit(&pool->id_mutex);
326 			return (0);
327 		}
328 		pool->id_bit = 1;
329 		pool->id_bit_idx = 0;
330 		pool->id_idx++;
331 		pool->id_idx &= pool->id_idx_msk;
332 		--i;
333 	}
334 	/*
335 	 * This section of code shouldn't be reached. If there are IDs
336 	 * available and none could be found there's a problem.
337 	 */
338 	ASSERT(0);
339 	mutex_exit(&pool->id_mutex);
340 	return (-1);
341 }
342 
343 /*
344  * smb_idpool_free
345  *
346  * This function frees the ID provided.
347  */
348 void
349 smb_idpool_free(
350     smb_idpool_t	*pool,
351     uint16_t		id)
352 {
353 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
354 	ASSERT(id != 0);
355 	ASSERT(id != 0xFFFF);
356 
357 	mutex_enter(&pool->id_mutex);
358 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
359 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
360 		pool->id_free_counter++;
361 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
362 		mutex_exit(&pool->id_mutex);
363 		return;
364 	}
365 	/* Freeing a free ID. */
366 	ASSERT(0);
367 	mutex_exit(&pool->id_mutex);
368 }
369 
370 /*
371  * Initialize the llist delete queue object cache.
372  */
373 void
374 smb_llist_init(void)
375 {
376 	if (smb_dtor_cache != NULL)
377 		return;
378 
379 	smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
380 	    sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
381 }
382 
383 /*
384  * Destroy the llist delete queue object cache.
385  */
386 void
387 smb_llist_fini(void)
388 {
389 	if (smb_dtor_cache != NULL) {
390 		kmem_cache_destroy(smb_dtor_cache);
391 		smb_dtor_cache = NULL;
392 	}
393 }
394 
395 /*
396  * smb_llist_constructor
397  *
398  * This function initializes a locked list.
399  */
400 void
401 smb_llist_constructor(
402     smb_llist_t	*ll,
403     size_t	size,
404     size_t	offset)
405 {
406 	rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
407 	mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
408 	list_create(&ll->ll_list, size, offset);
409 	list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
410 	    offsetof(smb_dtor_t, dt_lnd));
411 	ll->ll_count = 0;
412 	ll->ll_wrop = 0;
413 	ll->ll_deleteq_count = 0;
414 	ll->ll_flushing = B_FALSE;
415 }
416 
417 /*
418  * Flush the delete queue and destroy a locked list.
419  */
420 void
421 smb_llist_destructor(
422     smb_llist_t	*ll)
423 {
424 	smb_llist_flush(ll);
425 
426 	ASSERT(ll->ll_count == 0);
427 	ASSERT(ll->ll_deleteq_count == 0);
428 
429 	rw_destroy(&ll->ll_lock);
430 	list_destroy(&ll->ll_list);
431 	list_destroy(&ll->ll_deleteq);
432 	mutex_destroy(&ll->ll_mutex);
433 }
434 
435 /*
436  * Post an object to the delete queue.  The delete queue will be processed
437  * during list exit or list destruction.  Objects are often posted for
438  * deletion during list iteration (while the list is locked) but that is
439  * not required, and an object can be posted at any time.
440  */
441 void
442 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
443 {
444 	smb_dtor_t	*dtor;
445 
446 	ASSERT((object != NULL) && (dtorproc != NULL));
447 
448 	dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
449 	bzero(dtor, sizeof (smb_dtor_t));
450 	dtor->dt_magic = SMB_DTOR_MAGIC;
451 	dtor->dt_object = object;
452 	dtor->dt_proc = dtorproc;
453 
454 	mutex_enter(&ll->ll_mutex);
455 	list_insert_tail(&ll->ll_deleteq, dtor);
456 	++ll->ll_deleteq_count;
457 	mutex_exit(&ll->ll_mutex);
458 }
459 
460 /*
461  * Exit the list lock and process the delete queue.
462  */
463 void
464 smb_llist_exit(smb_llist_t *ll)
465 {
466 	rw_exit(&ll->ll_lock);
467 	smb_llist_flush(ll);
468 }
469 
470 /*
471  * Flush the list delete queue.  The mutex is dropped across the destructor
472  * call in case this leads to additional objects being posted to the delete
473  * queue.
474  */
475 void
476 smb_llist_flush(smb_llist_t *ll)
477 {
478 	smb_dtor_t    *dtor;
479 
480 	mutex_enter(&ll->ll_mutex);
481 	if (ll->ll_flushing) {
482 		mutex_exit(&ll->ll_mutex);
483 		return;
484 	}
485 	ll->ll_flushing = B_TRUE;
486 
487 	dtor = list_head(&ll->ll_deleteq);
488 	while (dtor != NULL) {
489 		SMB_DTOR_VALID(dtor);
490 		ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
491 		list_remove(&ll->ll_deleteq, dtor);
492 		--ll->ll_deleteq_count;
493 		mutex_exit(&ll->ll_mutex);
494 
495 		dtor->dt_proc(dtor->dt_object);
496 
497 		dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
498 		kmem_cache_free(smb_dtor_cache, dtor);
499 		mutex_enter(&ll->ll_mutex);
500 		dtor = list_head(&ll->ll_deleteq);
501 	}
502 	ll->ll_flushing = B_FALSE;
503 
504 	mutex_exit(&ll->ll_mutex);
505 }
506 
507 /*
508  * smb_llist_upgrade
509  *
510  * This function tries to upgrade the lock of the locked list. It assumes the
511  * locked has already been entered in RW_READER mode. It first tries using the
512  * Solaris function rw_tryupgrade(). If that call fails the lock is released
513  * and reentered in RW_WRITER mode. In that last case a window is opened during
514  * which the contents of the list may have changed. The return code indicates
515  * whether or not the list was modified when the lock was exited.
516  */
517 int smb_llist_upgrade(
518     smb_llist_t *ll)
519 {
520 	uint64_t	wrop;
521 
522 	if (rw_tryupgrade(&ll->ll_lock) != 0) {
523 		return (0);
524 	}
525 	wrop = ll->ll_wrop;
526 	rw_exit(&ll->ll_lock);
527 	rw_enter(&ll->ll_lock, RW_WRITER);
528 	return (wrop != ll->ll_wrop);
529 }
530 
531 /*
532  * smb_llist_insert_head
533  *
534  * This function inserts the object passed a the beginning of the list. This
535  * function assumes the lock of the list has already been entered.
536  */
537 void
538 smb_llist_insert_head(
539     smb_llist_t	*ll,
540     void	*obj)
541 {
542 	list_insert_head(&ll->ll_list, obj);
543 	++ll->ll_wrop;
544 	++ll->ll_count;
545 }
546 
547 /*
548  * smb_llist_insert_tail
549  *
550  * This function appends to the object passed to the list. This function assumes
551  * the lock of the list has already been entered.
552  *
553  */
554 void
555 smb_llist_insert_tail(
556     smb_llist_t	*ll,
557     void	*obj)
558 {
559 	list_insert_tail(&ll->ll_list, obj);
560 	++ll->ll_wrop;
561 	++ll->ll_count;
562 }
563 
564 /*
565  * smb_llist_remove
566  *
567  * This function removes the object passed from the list. This function assumes
568  * the lock of the list has already been entered.
569  */
570 void
571 smb_llist_remove(
572     smb_llist_t	*ll,
573     void	*obj)
574 {
575 	list_remove(&ll->ll_list, obj);
576 	++ll->ll_wrop;
577 	--ll->ll_count;
578 }
579 
580 /*
581  * smb_llist_get_count
582  *
583  * This function returns the number of elements in the specified list.
584  */
585 uint32_t
586 smb_llist_get_count(
587     smb_llist_t *ll)
588 {
589 	return (ll->ll_count);
590 }
591 
592 /*
593  * smb_slist_constructor
594  *
595  * Synchronized list constructor.
596  */
597 void
598 smb_slist_constructor(
599     smb_slist_t	*sl,
600     size_t	size,
601     size_t	offset)
602 {
603 	mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
604 	cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
605 	list_create(&sl->sl_list, size, offset);
606 	sl->sl_count = 0;
607 	sl->sl_waiting = B_FALSE;
608 }
609 
610 /*
611  * smb_slist_destructor
612  *
613  * Synchronized list destructor.
614  */
615 void
616 smb_slist_destructor(
617     smb_slist_t	*sl)
618 {
619 	VERIFY(sl->sl_count == 0);
620 
621 	mutex_destroy(&sl->sl_mutex);
622 	cv_destroy(&sl->sl_cv);
623 	list_destroy(&sl->sl_list);
624 }
625 
626 /*
627  * smb_slist_insert_head
628  *
629  * This function inserts the object passed a the beginning of the list.
630  */
631 void
632 smb_slist_insert_head(
633     smb_slist_t	*sl,
634     void	*obj)
635 {
636 	mutex_enter(&sl->sl_mutex);
637 	list_insert_head(&sl->sl_list, obj);
638 	++sl->sl_count;
639 	mutex_exit(&sl->sl_mutex);
640 }
641 
642 /*
643  * smb_slist_insert_tail
644  *
645  * This function appends the object passed to the list.
646  */
647 void
648 smb_slist_insert_tail(
649     smb_slist_t	*sl,
650     void	*obj)
651 {
652 	mutex_enter(&sl->sl_mutex);
653 	list_insert_tail(&sl->sl_list, obj);
654 	++sl->sl_count;
655 	mutex_exit(&sl->sl_mutex);
656 }
657 
658 /*
659  * smb_llist_remove
660  *
661  * This function removes the object passed by the caller from the list.
662  */
663 void
664 smb_slist_remove(
665     smb_slist_t	*sl,
666     void	*obj)
667 {
668 	mutex_enter(&sl->sl_mutex);
669 	list_remove(&sl->sl_list, obj);
670 	if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
671 		sl->sl_waiting = B_FALSE;
672 		cv_broadcast(&sl->sl_cv);
673 	}
674 	mutex_exit(&sl->sl_mutex);
675 }
676 
677 /*
678  * smb_slist_move_tail
679  *
680  * This function transfers all the contents of the synchronized list to the
681  * list_t provided. It returns the number of objects transferred.
682  */
683 uint32_t
684 smb_slist_move_tail(
685     list_t	*lst,
686     smb_slist_t	*sl)
687 {
688 	uint32_t	rv;
689 
690 	mutex_enter(&sl->sl_mutex);
691 	rv = sl->sl_count;
692 	if (sl->sl_count) {
693 		list_move_tail(lst, &sl->sl_list);
694 		sl->sl_count = 0;
695 		if (sl->sl_waiting) {
696 			sl->sl_waiting = B_FALSE;
697 			cv_broadcast(&sl->sl_cv);
698 		}
699 	}
700 	mutex_exit(&sl->sl_mutex);
701 	return (rv);
702 }
703 
704 /*
705  * smb_slist_obj_move
706  *
707  * This function moves an object from one list to the end of the other list. It
708  * assumes the mutex of each list has been entered.
709  */
710 void
711 smb_slist_obj_move(
712     smb_slist_t	*dst,
713     smb_slist_t	*src,
714     void	*obj)
715 {
716 	ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
717 	ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
718 
719 	list_remove(&src->sl_list, obj);
720 	list_insert_tail(&dst->sl_list, obj);
721 	dst->sl_count++;
722 	src->sl_count--;
723 	if ((src->sl_count == 0) && (src->sl_waiting)) {
724 		src->sl_waiting = B_FALSE;
725 		cv_broadcast(&src->sl_cv);
726 	}
727 }
728 
729 /*
730  * smb_slist_wait_for_empty
731  *
732  * This function waits for a list to be emptied.
733  */
734 void
735 smb_slist_wait_for_empty(
736     smb_slist_t	*sl)
737 {
738 	mutex_enter(&sl->sl_mutex);
739 	while (sl->sl_count) {
740 		sl->sl_waiting = B_TRUE;
741 		cv_wait(&sl->sl_cv, &sl->sl_mutex);
742 	}
743 	mutex_exit(&sl->sl_mutex);
744 }
745 
746 /*
747  * smb_slist_exit
748  *
749  * This function exits the muetx of the list and signal the condition variable
750  * if the list is empty.
751  */
752 void
753 smb_slist_exit(smb_slist_t *sl)
754 {
755 	if ((sl->sl_count == 0) && (sl->sl_waiting)) {
756 		sl->sl_waiting = B_FALSE;
757 		cv_broadcast(&sl->sl_cv);
758 	}
759 	mutex_exit(&sl->sl_mutex);
760 }
761 
762 /*
763  * smb_thread_entry_point
764  *
765  * Common entry point for all the threads created through smb_thread_start.
766  * The state of the thread is set to "running" at the beginning and moved to
767  * "exiting" just before calling thread_exit(). The condition variable is
768  *  also signaled.
769  */
770 static void
771 smb_thread_entry_point(
772     smb_thread_t	*thread)
773 {
774 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
775 	mutex_enter(&thread->sth_mtx);
776 	ASSERT(thread->sth_state == SMB_THREAD_STATE_STARTING);
777 	thread->sth_th = curthread;
778 	thread->sth_did = thread->sth_th->t_did;
779 
780 	if (!thread->sth_kill) {
781 		thread->sth_state = SMB_THREAD_STATE_RUNNING;
782 		cv_signal(&thread->sth_cv);
783 		mutex_exit(&thread->sth_mtx);
784 		thread->sth_ep(thread, thread->sth_ep_arg);
785 		mutex_enter(&thread->sth_mtx);
786 	}
787 	thread->sth_th = NULL;
788 	thread->sth_state = SMB_THREAD_STATE_EXITING;
789 	cv_broadcast(&thread->sth_cv);
790 	mutex_exit(&thread->sth_mtx);
791 	zthread_exit();
792 }
793 
794 /*
795  * smb_thread_init
796  */
797 void
798 smb_thread_init(
799     smb_thread_t	*thread,
800     char		*name,
801     smb_thread_ep_t	ep,
802     void		*ep_arg,
803     pri_t		pri)
804 {
805 	ASSERT(thread->sth_magic != SMB_THREAD_MAGIC);
806 
807 	bzero(thread, sizeof (*thread));
808 
809 	(void) strlcpy(thread->sth_name, name, sizeof (thread->sth_name));
810 	thread->sth_ep = ep;
811 	thread->sth_ep_arg = ep_arg;
812 	thread->sth_state = SMB_THREAD_STATE_EXITED;
813 	thread->sth_pri = pri;
814 	mutex_init(&thread->sth_mtx, NULL, MUTEX_DEFAULT, NULL);
815 	cv_init(&thread->sth_cv, NULL, CV_DEFAULT, NULL);
816 	thread->sth_magic = SMB_THREAD_MAGIC;
817 }
818 
819 /*
820  * smb_thread_destroy
821  */
822 void
823 smb_thread_destroy(
824     smb_thread_t	*thread)
825 {
826 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
827 	ASSERT(thread->sth_state == SMB_THREAD_STATE_EXITED);
828 	thread->sth_magic = 0;
829 	mutex_destroy(&thread->sth_mtx);
830 	cv_destroy(&thread->sth_cv);
831 }
832 
833 /*
834  * smb_thread_start
835  *
836  * This function starts a thread with the parameters provided. It waits until
837  * the state of the thread has been moved to running.
838  */
839 /*ARGSUSED*/
840 int
841 smb_thread_start(
842     smb_thread_t	*thread)
843 {
844 	int		rc = 0;
845 	kthread_t	*tmpthread;
846 
847 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
848 
849 	mutex_enter(&thread->sth_mtx);
850 	switch (thread->sth_state) {
851 	case SMB_THREAD_STATE_EXITED:
852 		thread->sth_state = SMB_THREAD_STATE_STARTING;
853 		mutex_exit(&thread->sth_mtx);
854 		tmpthread = zthread_create(NULL, 0, smb_thread_entry_point,
855 		    thread, 0, thread->sth_pri);
856 		ASSERT(tmpthread != NULL);
857 		mutex_enter(&thread->sth_mtx);
858 		while (thread->sth_state == SMB_THREAD_STATE_STARTING)
859 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
860 		if (thread->sth_state != SMB_THREAD_STATE_RUNNING)
861 			rc = -1;
862 		break;
863 	default:
864 		ASSERT(0);
865 		rc = -1;
866 		break;
867 	}
868 	mutex_exit(&thread->sth_mtx);
869 	return (rc);
870 }
871 
872 /*
873  * smb_thread_stop
874  *
875  * This function signals a thread to kill itself and waits until the "exiting"
876  * state has been reached.
877  */
878 void
879 smb_thread_stop(smb_thread_t *thread)
880 {
881 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
882 
883 	mutex_enter(&thread->sth_mtx);
884 	switch (thread->sth_state) {
885 	case SMB_THREAD_STATE_RUNNING:
886 	case SMB_THREAD_STATE_STARTING:
887 		if (!thread->sth_kill) {
888 			thread->sth_kill = B_TRUE;
889 			cv_broadcast(&thread->sth_cv);
890 			while (thread->sth_state != SMB_THREAD_STATE_EXITING)
891 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
892 			mutex_exit(&thread->sth_mtx);
893 			thread_join(thread->sth_did);
894 			mutex_enter(&thread->sth_mtx);
895 			thread->sth_state = SMB_THREAD_STATE_EXITED;
896 			thread->sth_did = 0;
897 			thread->sth_kill = B_FALSE;
898 			cv_broadcast(&thread->sth_cv);
899 			break;
900 		}
901 		/*FALLTHRU*/
902 
903 	case SMB_THREAD_STATE_EXITING:
904 		if (thread->sth_kill) {
905 			while (thread->sth_state != SMB_THREAD_STATE_EXITED)
906 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
907 		} else {
908 			thread->sth_state = SMB_THREAD_STATE_EXITED;
909 			thread->sth_did = 0;
910 		}
911 		break;
912 
913 	case SMB_THREAD_STATE_EXITED:
914 		break;
915 
916 	default:
917 		ASSERT(0);
918 		break;
919 	}
920 	mutex_exit(&thread->sth_mtx);
921 }
922 
923 /*
924  * smb_thread_signal
925  *
926  * This function signals a thread.
927  */
928 void
929 smb_thread_signal(smb_thread_t *thread)
930 {
931 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
932 
933 	mutex_enter(&thread->sth_mtx);
934 	switch (thread->sth_state) {
935 	case SMB_THREAD_STATE_RUNNING:
936 		cv_signal(&thread->sth_cv);
937 		break;
938 
939 	default:
940 		break;
941 	}
942 	mutex_exit(&thread->sth_mtx);
943 }
944 
945 boolean_t
946 smb_thread_continue(smb_thread_t *thread)
947 {
948 	boolean_t result;
949 
950 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
951 
952 	mutex_enter(&thread->sth_mtx);
953 	result = smb_thread_continue_timedwait_locked(thread, 0);
954 	mutex_exit(&thread->sth_mtx);
955 
956 	return (result);
957 }
958 
959 boolean_t
960 smb_thread_continue_nowait(smb_thread_t *thread)
961 {
962 	boolean_t result;
963 
964 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
965 
966 	mutex_enter(&thread->sth_mtx);
967 	/*
968 	 * Setting ticks=-1 requests a non-blocking check.  We will
969 	 * still block if the thread is in "suspend" state.
970 	 */
971 	result = smb_thread_continue_timedwait_locked(thread, -1);
972 	mutex_exit(&thread->sth_mtx);
973 
974 	return (result);
975 }
976 
977 boolean_t
978 smb_thread_continue_timedwait(smb_thread_t *thread, int seconds)
979 {
980 	boolean_t result;
981 
982 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
983 
984 	mutex_enter(&thread->sth_mtx);
985 	result = smb_thread_continue_timedwait_locked(thread,
986 	    SEC_TO_TICK(seconds));
987 	mutex_exit(&thread->sth_mtx);
988 
989 	return (result);
990 }
991 
992 /*
993  * smb_thread_continue_timedwait_locked
994  *
995  * Internal only.  Ticks==-1 means don't block, Ticks == 0 means wait
996  * indefinitely
997  */
998 static boolean_t
999 smb_thread_continue_timedwait_locked(smb_thread_t *thread, int ticks)
1000 {
1001 	boolean_t	result;
1002 
1003 	/* -1 means don't block */
1004 	if (ticks != -1 && !thread->sth_kill) {
1005 		if (ticks == 0) {
1006 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
1007 		} else {
1008 			(void) cv_reltimedwait(&thread->sth_cv,
1009 			    &thread->sth_mtx, (clock_t)ticks, TR_CLOCK_TICK);
1010 		}
1011 	}
1012 	result = (thread->sth_kill == 0);
1013 
1014 	return (result);
1015 }
1016 
1017 /*
1018  * smb_rwx_init
1019  */
1020 void
1021 smb_rwx_init(
1022     smb_rwx_t	*rwx)
1023 {
1024 	bzero(rwx, sizeof (smb_rwx_t));
1025 	cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
1026 	mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
1027 	rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
1028 }
1029 
1030 /*
1031  * smb_rwx_destroy
1032  */
1033 void
1034 smb_rwx_destroy(
1035     smb_rwx_t	*rwx)
1036 {
1037 	mutex_destroy(&rwx->rwx_mutex);
1038 	cv_destroy(&rwx->rwx_cv);
1039 	rw_destroy(&rwx->rwx_lock);
1040 }
1041 
1042 /*
1043  * smb_rwx_rwexit
1044  */
1045 void
1046 smb_rwx_rwexit(
1047     smb_rwx_t	*rwx)
1048 {
1049 	if (rw_write_held(&rwx->rwx_lock)) {
1050 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1051 		mutex_enter(&rwx->rwx_mutex);
1052 		if (rwx->rwx_waiting) {
1053 			rwx->rwx_waiting = B_FALSE;
1054 			cv_broadcast(&rwx->rwx_cv);
1055 		}
1056 		mutex_exit(&rwx->rwx_mutex);
1057 	}
1058 	rw_exit(&rwx->rwx_lock);
1059 }
1060 
1061 /*
1062  * smb_rwx_rwupgrade
1063  */
1064 krw_t
1065 smb_rwx_rwupgrade(
1066     smb_rwx_t	*rwx)
1067 {
1068 	if (rw_write_held(&rwx->rwx_lock)) {
1069 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1070 		return (RW_WRITER);
1071 	}
1072 	if (!rw_tryupgrade(&rwx->rwx_lock)) {
1073 		rw_exit(&rwx->rwx_lock);
1074 		rw_enter(&rwx->rwx_lock, RW_WRITER);
1075 	}
1076 	return (RW_READER);
1077 }
1078 
1079 /*
1080  * smb_rwx_rwrestore
1081  */
1082 void
1083 smb_rwx_rwdowngrade(
1084     smb_rwx_t	*rwx,
1085     krw_t	mode)
1086 {
1087 	ASSERT(rw_write_held(&rwx->rwx_lock));
1088 	ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1089 
1090 	if (mode == RW_WRITER) {
1091 		return;
1092 	}
1093 	ASSERT(mode == RW_READER);
1094 	mutex_enter(&rwx->rwx_mutex);
1095 	if (rwx->rwx_waiting) {
1096 		rwx->rwx_waiting = B_FALSE;
1097 		cv_broadcast(&rwx->rwx_cv);
1098 	}
1099 	mutex_exit(&rwx->rwx_mutex);
1100 	rw_downgrade(&rwx->rwx_lock);
1101 }
1102 
1103 /*
1104  * smb_rwx_wait
1105  *
1106  * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER
1107  * mode. It will:
1108  *
1109  *	1) release the lock and save its current mode.
1110  *	2) wait until the condition variable is signaled. This can happen for
1111  *	   2 reasons: When a writer releases the lock or when the time out (if
1112  *	   provided) expires.
1113  *	3) re-acquire the lock in the mode saved in (1).
1114  */
1115 int
1116 smb_rwx_rwwait(
1117     smb_rwx_t	*rwx,
1118     clock_t	timeout)
1119 {
1120 	int	rc;
1121 	krw_t	mode;
1122 
1123 	mutex_enter(&rwx->rwx_mutex);
1124 	rwx->rwx_waiting = B_TRUE;
1125 	mutex_exit(&rwx->rwx_mutex);
1126 
1127 	if (rw_write_held(&rwx->rwx_lock)) {
1128 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1129 		mode = RW_WRITER;
1130 	} else {
1131 		ASSERT(rw_read_held(&rwx->rwx_lock));
1132 		mode = RW_READER;
1133 	}
1134 	rw_exit(&rwx->rwx_lock);
1135 
1136 	mutex_enter(&rwx->rwx_mutex);
1137 	if (rwx->rwx_waiting) {
1138 		if (timeout == -1) {
1139 			rc = 1;
1140 			cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
1141 		} else {
1142 			rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
1143 			    timeout, TR_CLOCK_TICK);
1144 		}
1145 	}
1146 	mutex_exit(&rwx->rwx_mutex);
1147 
1148 	rw_enter(&rwx->rwx_lock, mode);
1149 	return (rc);
1150 }
1151 
1152 /*
1153  * SMB ID mapping
1154  *
1155  * Solaris ID mapping service (aka Winchester) works with domain SIDs
1156  * and RIDs where domain SIDs are in string format. CIFS service works
1157  * with binary SIDs understandable by CIFS clients. A layer of SMB ID
1158  * mapping functions are implemeted to hide the SID conversion details
1159  * and also hide the handling of array of batch mapping requests.
1160  *
1161  * IMPORTANT NOTE The Winchester API requires a zone. Because CIFS server
1162  * currently only runs in the global zone the global zone is specified.
1163  * This needs to be fixed when the CIFS server supports zones.
1164  */
1165 
1166 static int smb_idmap_batch_binsid(smb_idmap_batch_t *sib);
1167 
1168 /*
1169  * smb_idmap_getid
1170  *
1171  * Maps the given Windows SID to a Solaris ID using the
1172  * simple mapping API.
1173  */
1174 idmap_stat
1175 smb_idmap_getid(smb_sid_t *sid, uid_t *id, int *idtype)
1176 {
1177 	smb_idmap_t sim;
1178 	char sidstr[SMB_SID_STRSZ];
1179 
1180 	smb_sid_tostr(sid, sidstr);
1181 	if (smb_sid_splitstr(sidstr, &sim.sim_rid) != 0)
1182 		return (IDMAP_ERR_SID);
1183 	sim.sim_domsid = sidstr;
1184 	sim.sim_id = id;
1185 
1186 	switch (*idtype) {
1187 	case SMB_IDMAP_USER:
1188 		sim.sim_stat = kidmap_getuidbysid(global_zone, sim.sim_domsid,
1189 		    sim.sim_rid, sim.sim_id);
1190 		break;
1191 
1192 	case SMB_IDMAP_GROUP:
1193 		sim.sim_stat = kidmap_getgidbysid(global_zone, sim.sim_domsid,
1194 		    sim.sim_rid, sim.sim_id);
1195 		break;
1196 
1197 	case SMB_IDMAP_UNKNOWN:
1198 		sim.sim_stat = kidmap_getpidbysid(global_zone, sim.sim_domsid,
1199 		    sim.sim_rid, sim.sim_id, &sim.sim_idtype);
1200 		break;
1201 
1202 	default:
1203 		ASSERT(0);
1204 		return (IDMAP_ERR_ARG);
1205 	}
1206 
1207 	*idtype = sim.sim_idtype;
1208 
1209 	return (sim.sim_stat);
1210 }
1211 
1212 /*
1213  * smb_idmap_getsid
1214  *
1215  * Maps the given Solaris ID to a Windows SID using the
1216  * simple mapping API.
1217  */
1218 idmap_stat
1219 smb_idmap_getsid(uid_t id, int idtype, smb_sid_t **sid)
1220 {
1221 	smb_idmap_t sim;
1222 
1223 	switch (idtype) {
1224 	case SMB_IDMAP_USER:
1225 		sim.sim_stat = kidmap_getsidbyuid(global_zone, id,
1226 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1227 		break;
1228 
1229 	case SMB_IDMAP_GROUP:
1230 		sim.sim_stat = kidmap_getsidbygid(global_zone, id,
1231 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1232 		break;
1233 
1234 	case SMB_IDMAP_EVERYONE:
1235 		/* Everyone S-1-1-0 */
1236 		sim.sim_domsid = "S-1-1";
1237 		sim.sim_rid = 0;
1238 		sim.sim_stat = IDMAP_SUCCESS;
1239 		break;
1240 
1241 	default:
1242 		ASSERT(0);
1243 		return (IDMAP_ERR_ARG);
1244 	}
1245 
1246 	if (sim.sim_stat != IDMAP_SUCCESS)
1247 		return (sim.sim_stat);
1248 
1249 	if (sim.sim_domsid == NULL)
1250 		return (IDMAP_ERR_NOMAPPING);
1251 
1252 	sim.sim_sid = smb_sid_fromstr(sim.sim_domsid);
1253 	if (sim.sim_sid == NULL)
1254 		return (IDMAP_ERR_INTERNAL);
1255 
1256 	*sid = smb_sid_splice(sim.sim_sid, sim.sim_rid);
1257 	smb_sid_free(sim.sim_sid);
1258 	if (*sid == NULL)
1259 		sim.sim_stat = IDMAP_ERR_INTERNAL;
1260 
1261 	return (sim.sim_stat);
1262 }
1263 
1264 /*
1265  * smb_idmap_batch_create
1266  *
1267  * Creates and initializes the context for batch ID mapping.
1268  */
1269 idmap_stat
1270 smb_idmap_batch_create(smb_idmap_batch_t *sib, uint16_t nmap, int flags)
1271 {
1272 	ASSERT(sib);
1273 
1274 	bzero(sib, sizeof (smb_idmap_batch_t));
1275 
1276 	sib->sib_idmaph = kidmap_get_create(global_zone);
1277 
1278 	sib->sib_flags = flags;
1279 	sib->sib_nmap = nmap;
1280 	sib->sib_size = nmap * sizeof (smb_idmap_t);
1281 	sib->sib_maps = kmem_zalloc(sib->sib_size, KM_SLEEP);
1282 
1283 	return (IDMAP_SUCCESS);
1284 }
1285 
1286 /*
1287  * smb_idmap_batch_destroy
1288  *
1289  * Frees the batch ID mapping context.
1290  * If ID mapping is Solaris -> Windows it frees memories
1291  * allocated for binary SIDs.
1292  */
1293 void
1294 smb_idmap_batch_destroy(smb_idmap_batch_t *sib)
1295 {
1296 	char *domsid;
1297 	int i;
1298 
1299 	ASSERT(sib);
1300 	ASSERT(sib->sib_maps);
1301 
1302 	if (sib->sib_idmaph)
1303 		kidmap_get_destroy(sib->sib_idmaph);
1304 
1305 	if (sib->sib_flags & SMB_IDMAP_ID2SID) {
1306 		/*
1307 		 * SIDs are allocated only when mapping
1308 		 * UID/GID to SIDs
1309 		 */
1310 		for (i = 0; i < sib->sib_nmap; i++)
1311 			smb_sid_free(sib->sib_maps[i].sim_sid);
1312 	} else if (sib->sib_flags & SMB_IDMAP_SID2ID) {
1313 		/*
1314 		 * SID prefixes are allocated only when mapping
1315 		 * SIDs to UID/GID
1316 		 */
1317 		for (i = 0; i < sib->sib_nmap; i++) {
1318 			domsid = sib->sib_maps[i].sim_domsid;
1319 			if (domsid)
1320 				smb_mem_free(domsid);
1321 		}
1322 	}
1323 
1324 	if (sib->sib_size && sib->sib_maps)
1325 		kmem_free(sib->sib_maps, sib->sib_size);
1326 }
1327 
1328 /*
1329  * smb_idmap_batch_getid
1330  *
1331  * Queue a request to map the given SID to a UID or GID.
1332  *
1333  * sim->sim_id should point to variable that's supposed to
1334  * hold the returned UID/GID. This needs to be setup by caller
1335  * of this function.
1336  *
1337  * If requested ID type is known, it's passed as 'idtype',
1338  * if it's unknown it'll be returned in sim->sim_idtype.
1339  */
1340 idmap_stat
1341 smb_idmap_batch_getid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1342     smb_sid_t *sid, int idtype)
1343 {
1344 	char strsid[SMB_SID_STRSZ];
1345 	idmap_stat idm_stat;
1346 
1347 	ASSERT(idmaph);
1348 	ASSERT(sim);
1349 	ASSERT(sid);
1350 
1351 	smb_sid_tostr(sid, strsid);
1352 	if (smb_sid_splitstr(strsid, &sim->sim_rid) != 0)
1353 		return (IDMAP_ERR_SID);
1354 	sim->sim_domsid = smb_mem_strdup(strsid);
1355 
1356 	switch (idtype) {
1357 	case SMB_IDMAP_USER:
1358 		idm_stat = kidmap_batch_getuidbysid(idmaph, sim->sim_domsid,
1359 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1360 		break;
1361 
1362 	case SMB_IDMAP_GROUP:
1363 		idm_stat = kidmap_batch_getgidbysid(idmaph, sim->sim_domsid,
1364 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1365 		break;
1366 
1367 	case SMB_IDMAP_UNKNOWN:
1368 		idm_stat = kidmap_batch_getpidbysid(idmaph, sim->sim_domsid,
1369 		    sim->sim_rid, sim->sim_id, &sim->sim_idtype,
1370 		    &sim->sim_stat);
1371 		break;
1372 
1373 	default:
1374 		ASSERT(0);
1375 		return (IDMAP_ERR_ARG);
1376 	}
1377 
1378 	return (idm_stat);
1379 }
1380 
1381 /*
1382  * smb_idmap_batch_getsid
1383  *
1384  * Queue a request to map the given UID/GID to a SID.
1385  *
1386  * sim->sim_domsid and sim->sim_rid will contain the mapping
1387  * result upon successful process of the batched request.
1388  */
1389 idmap_stat
1390 smb_idmap_batch_getsid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1391     uid_t id, int idtype)
1392 {
1393 	idmap_stat idm_stat;
1394 
1395 	switch (idtype) {
1396 	case SMB_IDMAP_USER:
1397 		idm_stat = kidmap_batch_getsidbyuid(idmaph, id,
1398 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1399 		    &sim->sim_stat);
1400 		break;
1401 
1402 	case SMB_IDMAP_GROUP:
1403 		idm_stat = kidmap_batch_getsidbygid(idmaph, id,
1404 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1405 		    &sim->sim_stat);
1406 		break;
1407 
1408 	case SMB_IDMAP_OWNERAT:
1409 		/* Current Owner S-1-5-32-766 */
1410 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1411 		sim->sim_rid = SECURITY_CURRENT_OWNER_RID;
1412 		sim->sim_stat = IDMAP_SUCCESS;
1413 		idm_stat = IDMAP_SUCCESS;
1414 		break;
1415 
1416 	case SMB_IDMAP_GROUPAT:
1417 		/* Current Group S-1-5-32-767 */
1418 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1419 		sim->sim_rid = SECURITY_CURRENT_GROUP_RID;
1420 		sim->sim_stat = IDMAP_SUCCESS;
1421 		idm_stat = IDMAP_SUCCESS;
1422 		break;
1423 
1424 	case SMB_IDMAP_EVERYONE:
1425 		/* Everyone S-1-1-0 */
1426 		sim->sim_domsid = NT_WORLD_AUTH_SIDSTR;
1427 		sim->sim_rid = 0;
1428 		sim->sim_stat = IDMAP_SUCCESS;
1429 		idm_stat = IDMAP_SUCCESS;
1430 		break;
1431 
1432 	default:
1433 		ASSERT(0);
1434 		return (IDMAP_ERR_ARG);
1435 	}
1436 
1437 	return (idm_stat);
1438 }
1439 
1440 /*
1441  * smb_idmap_batch_binsid
1442  *
1443  * Convert sidrids to binary sids
1444  *
1445  * Returns 0 if successful and non-zero upon failure.
1446  */
1447 static int
1448 smb_idmap_batch_binsid(smb_idmap_batch_t *sib)
1449 {
1450 	smb_sid_t *sid;
1451 	smb_idmap_t *sim;
1452 	int i;
1453 
1454 	if (sib->sib_flags & SMB_IDMAP_SID2ID)
1455 		/* This operation is not required */
1456 		return (0);
1457 
1458 	sim = sib->sib_maps;
1459 	for (i = 0; i < sib->sib_nmap; sim++, i++) {
1460 		ASSERT(sim->sim_domsid);
1461 		if (sim->sim_domsid == NULL)
1462 			return (1);
1463 
1464 		if ((sid = smb_sid_fromstr(sim->sim_domsid)) == NULL)
1465 			return (1);
1466 
1467 		sim->sim_sid = smb_sid_splice(sid, sim->sim_rid);
1468 		smb_sid_free(sid);
1469 	}
1470 
1471 	return (0);
1472 }
1473 
1474 /*
1475  * smb_idmap_batch_getmappings
1476  *
1477  * trigger ID mapping service to get the mappings for queued
1478  * requests.
1479  *
1480  * Checks the result of all the queued requests.
1481  * If this is a Solaris -> Windows mapping it generates
1482  * binary SIDs from returned (domsid, rid) pairs.
1483  */
1484 idmap_stat
1485 smb_idmap_batch_getmappings(smb_idmap_batch_t *sib)
1486 {
1487 	idmap_stat idm_stat = IDMAP_SUCCESS;
1488 	int i;
1489 
1490 	idm_stat = kidmap_get_mappings(sib->sib_idmaph);
1491 	if (idm_stat != IDMAP_SUCCESS)
1492 		return (idm_stat);
1493 
1494 	/*
1495 	 * Check the status for all the queued requests
1496 	 */
1497 	for (i = 0; i < sib->sib_nmap; i++) {
1498 		if (sib->sib_maps[i].sim_stat != IDMAP_SUCCESS)
1499 			return (sib->sib_maps[i].sim_stat);
1500 	}
1501 
1502 	if (smb_idmap_batch_binsid(sib) != 0)
1503 		idm_stat = IDMAP_ERR_OTHER;
1504 
1505 	return (idm_stat);
1506 }
1507 
1508 uint64_t
1509 smb_time_unix_to_nt(timestruc_t *unix_time)
1510 {
1511 	uint64_t nt_time;
1512 
1513 	if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
1514 		return (0);
1515 
1516 	nt_time = unix_time->tv_sec;
1517 	nt_time *= 10000000;  /* seconds to 100ns */
1518 	nt_time += unix_time->tv_nsec / 100;
1519 	return (nt_time + NT_TIME_BIAS);
1520 }
1521 
1522 void
1523 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
1524 {
1525 	uint32_t seconds;
1526 
1527 	ASSERT(unix_time);
1528 
1529 	if ((nt_time == 0) || (nt_time == -1)) {
1530 		unix_time->tv_sec = 0;
1531 		unix_time->tv_nsec = 0;
1532 		return;
1533 	}
1534 
1535 	nt_time -= NT_TIME_BIAS;
1536 	seconds = nt_time / 10000000;
1537 	unix_time->tv_sec = seconds;
1538 	unix_time->tv_nsec = (nt_time  % 10000000) * 100;
1539 }
1540 
1541 /*
1542  * smb_time_gmt_to_local, smb_time_local_to_gmt
1543  *
1544  * Apply the gmt offset to convert between local time and gmt
1545  */
1546 int32_t
1547 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
1548 {
1549 	if ((gmt == 0) || (gmt == -1))
1550 		return (0);
1551 
1552 	return (gmt - sr->sr_gmtoff);
1553 }
1554 
1555 int32_t
1556 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
1557 {
1558 	if ((local == 0) || (local == -1))
1559 		return (0);
1560 
1561 	return (local + sr->sr_gmtoff);
1562 }
1563 
1564 
1565 /*
1566  * smb_time_dos_to_unix
1567  *
1568  * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
1569  *
1570  * A date/time field of 0 means that that server file system
1571  * assigned value need not be changed. The behaviour when the
1572  * date/time field is set to -1 is not documented but is
1573  * generally treated like 0.
1574  * If date or time is 0 or -1 the unix time is returned as 0
1575  * so that the caller can identify and handle this special case.
1576  */
1577 int32_t
1578 smb_time_dos_to_unix(int16_t date, int16_t time)
1579 {
1580 	struct tm	atm;
1581 
1582 	if (((date == 0) || (time == 0)) ||
1583 	    ((date == -1) || (time == -1))) {
1584 		return (0);
1585 	}
1586 
1587 	atm.tm_year = ((date >>  9) & 0x3F) + 80;
1588 	atm.tm_mon  = ((date >>  5) & 0x0F) - 1;
1589 	atm.tm_mday = ((date >>  0) & 0x1F);
1590 	atm.tm_hour = ((time >> 11) & 0x1F);
1591 	atm.tm_min  = ((time >>  5) & 0x3F);
1592 	atm.tm_sec  = ((time >>  0) & 0x1F) << 1;
1593 
1594 	return (smb_timegm(&atm));
1595 }
1596 
1597 void
1598 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1599 {
1600 	struct tm	atm;
1601 	int		i;
1602 	time_t		tmp_time;
1603 
1604 	if (ux_time == 0) {
1605 		*date_p = 0;
1606 		*time_p = 0;
1607 		return;
1608 	}
1609 
1610 	tmp_time = (time_t)ux_time;
1611 	(void) smb_gmtime_r(&tmp_time, &atm);
1612 
1613 	if (date_p) {
1614 		i = 0;
1615 		i += atm.tm_year - 80;
1616 		i <<= 4;
1617 		i += atm.tm_mon + 1;
1618 		i <<= 5;
1619 		i += atm.tm_mday;
1620 
1621 		*date_p = (short)i;
1622 	}
1623 	if (time_p) {
1624 		i = 0;
1625 		i += atm.tm_hour;
1626 		i <<= 6;
1627 		i += atm.tm_min;
1628 		i <<= 5;
1629 		i += atm.tm_sec >> 1;
1630 
1631 		*time_p = (short)i;
1632 	}
1633 }
1634 
1635 
1636 /*
1637  * smb_gmtime_r
1638  *
1639  * Thread-safe version of smb_gmtime. Returns a null pointer if either
1640  * input parameter is a null pointer. Otherwise returns a pointer
1641  * to result.
1642  *
1643  * Day of the week calculation: the Epoch was a thursday.
1644  *
1645  * There are no timezone corrections so tm_isdst and tm_gmtoff are
1646  * always zero, and the zone is always WET.
1647  */
1648 struct tm *
1649 smb_gmtime_r(time_t *clock, struct tm *result)
1650 {
1651 	time_t tsec;
1652 	int year;
1653 	int month;
1654 	int sec_per_month;
1655 
1656 	if (clock == 0 || result == 0)
1657 		return (0);
1658 
1659 	bzero(result, sizeof (struct tm));
1660 	tsec = *clock;
1661 	tsec -= tzh_leapcnt;
1662 
1663 	result->tm_wday = tsec / SECSPERDAY;
1664 	result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1665 
1666 	year = EPOCH_YEAR;
1667 	while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1668 	    (SECSPERDAY * DAYSPERNYEAR))) {
1669 		if (isleap(year))
1670 			tsec -= SECSPERDAY * DAYSPERLYEAR;
1671 		else
1672 			tsec -= SECSPERDAY * DAYSPERNYEAR;
1673 
1674 		++year;
1675 	}
1676 
1677 	result->tm_year = year - TM_YEAR_BASE;
1678 	result->tm_yday = tsec / SECSPERDAY;
1679 
1680 	for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1681 		sec_per_month = days_in_month[month] * SECSPERDAY;
1682 
1683 		if (month == TM_FEBRUARY && isleap(year))
1684 			sec_per_month += SECSPERDAY;
1685 
1686 		if (tsec < sec_per_month)
1687 			break;
1688 
1689 		tsec -= sec_per_month;
1690 	}
1691 
1692 	result->tm_mon = month;
1693 	result->tm_mday = (tsec / SECSPERDAY) + 1;
1694 	tsec %= SECSPERDAY;
1695 	result->tm_sec = tsec % 60;
1696 	tsec /= 60;
1697 	result->tm_min = tsec % 60;
1698 	tsec /= 60;
1699 	result->tm_hour = (int)tsec;
1700 
1701 	return (result);
1702 }
1703 
1704 
1705 /*
1706  * smb_timegm
1707  *
1708  * Converts the broken-down time in tm to a time value, i.e. the number
1709  * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1710  * not a POSIX or ANSI function. Per the man page, the input values of
1711  * tm_wday and tm_yday are ignored and, as the input data is assumed to
1712  * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1713  *
1714  * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1715  * and tm_yday, and bring the other fields within normal range. I don't
1716  * think this is really how it should be done but it's convenient for
1717  * now.
1718  */
1719 time_t
1720 smb_timegm(struct tm *tm)
1721 {
1722 	time_t tsec;
1723 	int dd;
1724 	int mm;
1725 	int yy;
1726 	int year;
1727 
1728 	if (tm == 0)
1729 		return (-1);
1730 
1731 	year = tm->tm_year + TM_YEAR_BASE;
1732 	tsec = tzh_leapcnt;
1733 
1734 	for (yy = EPOCH_YEAR; yy < year; ++yy) {
1735 		if (isleap(yy))
1736 			tsec += SECSPERDAY * DAYSPERLYEAR;
1737 		else
1738 			tsec += SECSPERDAY * DAYSPERNYEAR;
1739 	}
1740 
1741 	for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1742 		dd = days_in_month[mm] * SECSPERDAY;
1743 
1744 		if (mm == TM_FEBRUARY && isleap(year))
1745 			dd += SECSPERDAY;
1746 
1747 		tsec += dd;
1748 	}
1749 
1750 	tsec += (tm->tm_mday - 1) * SECSPERDAY;
1751 	tsec += tm->tm_sec;
1752 	tsec += tm->tm_min * SECSPERMIN;
1753 	tsec += tm->tm_hour * SECSPERHOUR;
1754 
1755 	tm->tm_isdst = 0;
1756 	(void) smb_gmtime_r(&tsec, tm);
1757 	return (tsec);
1758 }
1759 
1760 /*
1761  * smb_pad_align
1762  *
1763  * Returns the number of bytes required to pad an offset to the
1764  * specified alignment.
1765  */
1766 uint32_t
1767 smb_pad_align(uint32_t offset, uint32_t align)
1768 {
1769 	uint32_t pad = offset % align;
1770 
1771 	if (pad != 0)
1772 		pad = align - pad;
1773 
1774 	return (pad);
1775 }
1776 
1777 /*
1778  * smb_panic
1779  *
1780  * Logs the file name, function name and line number passed in and panics the
1781  * system.
1782  */
1783 void
1784 smb_panic(char *file, const char *func, int line)
1785 {
1786 	cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
1787 }
1788 
1789 /*
1790  * Creates an AVL tree and initializes the given smb_avl_t
1791  * structure using the passed args
1792  */
1793 void
1794 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset,
1795 	const smb_avl_nops_t *ops)
1796 {
1797 	ASSERT(avl);
1798 	ASSERT(ops);
1799 
1800 	rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL);
1801 	mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL);
1802 
1803 	avl->avl_nops = ops;
1804 	avl->avl_state = SMB_AVL_STATE_READY;
1805 	avl->avl_refcnt = 0;
1806 	(void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence,
1807 	    sizeof (uint32_t));
1808 
1809 	avl_create(&avl->avl_tree, ops->avln_cmp, size, offset);
1810 }
1811 
1812 /*
1813  * Destroys the specified AVL tree.
1814  * It waits for all the in-flight operations to finish
1815  * before destroying the AVL.
1816  */
1817 void
1818 smb_avl_destroy(smb_avl_t *avl)
1819 {
1820 	void *cookie = NULL;
1821 	void *node;
1822 
1823 	ASSERT(avl);
1824 
1825 	mutex_enter(&avl->avl_mutex);
1826 	if (avl->avl_state != SMB_AVL_STATE_READY) {
1827 		mutex_exit(&avl->avl_mutex);
1828 		return;
1829 	}
1830 
1831 	avl->avl_state = SMB_AVL_STATE_DESTROYING;
1832 
1833 	while (avl->avl_refcnt > 0)
1834 		(void) cv_wait(&avl->avl_cv, &avl->avl_mutex);
1835 	mutex_exit(&avl->avl_mutex);
1836 
1837 	rw_enter(&avl->avl_lock, RW_WRITER);
1838 	while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL)
1839 		avl->avl_nops->avln_destroy(node);
1840 
1841 	avl_destroy(&avl->avl_tree);
1842 	rw_exit(&avl->avl_lock);
1843 
1844 	rw_destroy(&avl->avl_lock);
1845 
1846 	mutex_destroy(&avl->avl_mutex);
1847 	bzero(avl, sizeof (smb_avl_t));
1848 }
1849 
1850 /*
1851  * Adds the given item to the AVL if it's
1852  * not already there.
1853  *
1854  * Returns:
1855  *
1856  * 	ENOTACTIVE	AVL is not in READY state
1857  * 	EEXIST		The item is already in AVL
1858  */
1859 int
1860 smb_avl_add(smb_avl_t *avl, void *item)
1861 {
1862 	avl_index_t where;
1863 
1864 	ASSERT(avl);
1865 	ASSERT(item);
1866 
1867 	if (!smb_avl_hold(avl))
1868 		return (ENOTACTIVE);
1869 
1870 	rw_enter(&avl->avl_lock, RW_WRITER);
1871 	if (avl_find(&avl->avl_tree, item, &where) != NULL) {
1872 		rw_exit(&avl->avl_lock);
1873 		smb_avl_rele(avl);
1874 		return (EEXIST);
1875 	}
1876 
1877 	avl_insert(&avl->avl_tree, item, where);
1878 	avl->avl_sequence++;
1879 	rw_exit(&avl->avl_lock);
1880 
1881 	smb_avl_rele(avl);
1882 	return (0);
1883 }
1884 
1885 /*
1886  * Removes the given item from the AVL.
1887  * If no reference is left on the item
1888  * it will also be destroyed by calling the
1889  * registered destroy operation.
1890  */
1891 void
1892 smb_avl_remove(smb_avl_t *avl, void *item)
1893 {
1894 	avl_index_t where;
1895 	void *rm_item;
1896 
1897 	ASSERT(avl);
1898 	ASSERT(item);
1899 
1900 	if (!smb_avl_hold(avl))
1901 		return;
1902 
1903 	rw_enter(&avl->avl_lock, RW_WRITER);
1904 	if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) {
1905 		rw_exit(&avl->avl_lock);
1906 		smb_avl_rele(avl);
1907 		return;
1908 	}
1909 
1910 	avl_remove(&avl->avl_tree, rm_item);
1911 	if (avl->avl_nops->avln_rele(rm_item))
1912 		avl->avl_nops->avln_destroy(rm_item);
1913 	avl->avl_sequence++;
1914 	rw_exit(&avl->avl_lock);
1915 
1916 	smb_avl_rele(avl);
1917 }
1918 
1919 /*
1920  * Looks up the AVL for the given item.
1921  * If the item is found a hold on the object
1922  * is taken before the pointer to it is
1923  * returned to the caller. The caller MUST
1924  * always call smb_avl_release() after it's done
1925  * using the returned object to release the hold
1926  * taken on the object.
1927  */
1928 void *
1929 smb_avl_lookup(smb_avl_t *avl, void *item)
1930 {
1931 	void *node = NULL;
1932 
1933 	ASSERT(avl);
1934 	ASSERT(item);
1935 
1936 	if (!smb_avl_hold(avl))
1937 		return (NULL);
1938 
1939 	rw_enter(&avl->avl_lock, RW_READER);
1940 	node = avl_find(&avl->avl_tree, item, NULL);
1941 	if (node != NULL)
1942 		avl->avl_nops->avln_hold(node);
1943 	rw_exit(&avl->avl_lock);
1944 
1945 	if (node == NULL)
1946 		smb_avl_rele(avl);
1947 
1948 	return (node);
1949 }
1950 
1951 /*
1952  * The hold on the given object is released.
1953  * This function MUST always be called after
1954  * smb_avl_lookup() and smb_avl_iterate() for
1955  * the returned object.
1956  *
1957  * If AVL is in DESTROYING state, the destroying
1958  * thread will be notified.
1959  */
1960 void
1961 smb_avl_release(smb_avl_t *avl, void *item)
1962 {
1963 	ASSERT(avl);
1964 	ASSERT(item);
1965 
1966 	if (avl->avl_nops->avln_rele(item))
1967 		avl->avl_nops->avln_destroy(item);
1968 
1969 	smb_avl_rele(avl);
1970 }
1971 
1972 /*
1973  * Initializes the given cursor for the AVL.
1974  * The cursor will be used to iterate through the AVL
1975  */
1976 void
1977 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1978 {
1979 	ASSERT(avl);
1980 	ASSERT(cursor);
1981 
1982 	cursor->avlc_next = NULL;
1983 	cursor->avlc_sequence = avl->avl_sequence;
1984 }
1985 
1986 /*
1987  * Iterates through the AVL using the given cursor.
1988  * It always starts at the beginning and then returns
1989  * a pointer to the next object on each subsequent call.
1990  *
1991  * If a new object is added to or removed from the AVL
1992  * between two calls to this function, the iteration
1993  * will terminate prematurely.
1994  *
1995  * The caller MUST always call smb_avl_release() after it's
1996  * done using the returned object to release the hold taken
1997  * on the object.
1998  */
1999 void *
2000 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor)
2001 {
2002 	void *node;
2003 
2004 	ASSERT(avl);
2005 	ASSERT(cursor);
2006 
2007 	if (!smb_avl_hold(avl))
2008 		return (NULL);
2009 
2010 	rw_enter(&avl->avl_lock, RW_READER);
2011 	if (cursor->avlc_sequence != avl->avl_sequence) {
2012 		rw_exit(&avl->avl_lock);
2013 		smb_avl_rele(avl);
2014 		return (NULL);
2015 	}
2016 
2017 	if (cursor->avlc_next == NULL)
2018 		node = avl_first(&avl->avl_tree);
2019 	else
2020 		node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next);
2021 
2022 	if (node != NULL)
2023 		avl->avl_nops->avln_hold(node);
2024 
2025 	cursor->avlc_next = node;
2026 	rw_exit(&avl->avl_lock);
2027 
2028 	if (node == NULL)
2029 		smb_avl_rele(avl);
2030 
2031 	return (node);
2032 }
2033 
2034 /*
2035  * Increments the AVL reference count in order to
2036  * prevent the avl from being destroyed while it's
2037  * being accessed.
2038  */
2039 static boolean_t
2040 smb_avl_hold(smb_avl_t *avl)
2041 {
2042 	mutex_enter(&avl->avl_mutex);
2043 	if (avl->avl_state != SMB_AVL_STATE_READY) {
2044 		mutex_exit(&avl->avl_mutex);
2045 		return (B_FALSE);
2046 	}
2047 	avl->avl_refcnt++;
2048 	mutex_exit(&avl->avl_mutex);
2049 
2050 	return (B_TRUE);
2051 }
2052 
2053 /*
2054  * Decrements the AVL reference count to release the
2055  * hold. If another thread is trying to destroy the
2056  * AVL and is waiting for the reference count to become
2057  * 0, it is signaled to wake up.
2058  */
2059 static void
2060 smb_avl_rele(smb_avl_t *avl)
2061 {
2062 	mutex_enter(&avl->avl_mutex);
2063 	ASSERT(avl->avl_refcnt > 0);
2064 	avl->avl_refcnt--;
2065 	if (avl->avl_state == SMB_AVL_STATE_DESTROYING)
2066 		cv_broadcast(&avl->avl_cv);
2067 	mutex_exit(&avl->avl_mutex);
2068 }
2069 
2070 /*
2071  * smb_latency_init
2072  */
2073 void
2074 smb_latency_init(smb_latency_t *lat)
2075 {
2076 	bzero(lat, sizeof (*lat));
2077 	mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
2078 }
2079 
2080 /*
2081  * smb_latency_destroy
2082  */
2083 void
2084 smb_latency_destroy(smb_latency_t *lat)
2085 {
2086 	mutex_destroy(&lat->ly_mutex);
2087 }
2088 
2089 /*
2090  * smb_latency_add_sample
2091  *
2092  * Uses the new sample to calculate the new mean and standard deviation. The
2093  * sample must be a scaled value.
2094  */
2095 void
2096 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample)
2097 {
2098 	hrtime_t	a_mean;
2099 	hrtime_t	d_mean;
2100 
2101 	mutex_enter(&lat->ly_mutex);
2102 	lat->ly_a_nreq++;
2103 	lat->ly_a_sum += sample;
2104 	if (lat->ly_a_nreq != 0) {
2105 		a_mean = lat->ly_a_sum / lat->ly_a_nreq;
2106 		lat->ly_a_stddev =
2107 		    (sample - a_mean) * (sample - lat->ly_a_mean);
2108 		lat->ly_a_mean = a_mean;
2109 	}
2110 	lat->ly_d_nreq++;
2111 	lat->ly_d_sum += sample;
2112 	if (lat->ly_d_nreq != 0) {
2113 		d_mean = lat->ly_d_sum / lat->ly_d_nreq;
2114 		lat->ly_d_stddev =
2115 		    (sample - d_mean) * (sample - lat->ly_d_mean);
2116 		lat->ly_d_mean = d_mean;
2117 	}
2118 	mutex_exit(&lat->ly_mutex);
2119 }
2120 
2121 /*
2122  * smb_srqueue_init
2123  */
2124 void
2125 smb_srqueue_init(smb_srqueue_t *srq)
2126 {
2127 	bzero(srq, sizeof (*srq));
2128 	mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
2129 	srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled();
2130 }
2131 
2132 /*
2133  * smb_srqueue_destroy
2134  */
2135 void
2136 smb_srqueue_destroy(smb_srqueue_t *srq)
2137 {
2138 	mutex_destroy(&srq->srq_mutex);
2139 }
2140 
2141 /*
2142  * smb_srqueue_waitq_enter
2143  */
2144 void
2145 smb_srqueue_waitq_enter(smb_srqueue_t *srq)
2146 {
2147 	hrtime_t	new;
2148 	hrtime_t	delta;
2149 	uint32_t	wcnt;
2150 
2151 	mutex_enter(&srq->srq_mutex);
2152 	new = gethrtime_unscaled();
2153 	delta = new - srq->srq_wlastupdate;
2154 	srq->srq_wlastupdate = new;
2155 	wcnt = srq->srq_wcnt++;
2156 	if (wcnt != 0) {
2157 		srq->srq_wlentime += delta * wcnt;
2158 		srq->srq_wtime += delta;
2159 	}
2160 	mutex_exit(&srq->srq_mutex);
2161 }
2162 
2163 /*
2164  * smb_srqueue_runq_exit
2165  */
2166 void
2167 smb_srqueue_runq_exit(smb_srqueue_t *srq)
2168 {
2169 	hrtime_t	new;
2170 	hrtime_t	delta;
2171 	uint32_t	rcnt;
2172 
2173 	mutex_enter(&srq->srq_mutex);
2174 	new = gethrtime_unscaled();
2175 	delta = new - srq->srq_rlastupdate;
2176 	srq->srq_rlastupdate = new;
2177 	rcnt = srq->srq_rcnt--;
2178 	ASSERT(rcnt > 0);
2179 	srq->srq_rlentime += delta * rcnt;
2180 	srq->srq_rtime += delta;
2181 	mutex_exit(&srq->srq_mutex);
2182 }
2183 
2184 /*
2185  * smb_srqueue_waitq_to_runq
2186  */
2187 void
2188 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq)
2189 {
2190 	hrtime_t	new;
2191 	hrtime_t	delta;
2192 	uint32_t	wcnt;
2193 	uint32_t	rcnt;
2194 
2195 	mutex_enter(&srq->srq_mutex);
2196 	new = gethrtime_unscaled();
2197 	delta = new - srq->srq_wlastupdate;
2198 	srq->srq_wlastupdate = new;
2199 	wcnt = srq->srq_wcnt--;
2200 	ASSERT(wcnt > 0);
2201 	srq->srq_wlentime += delta * wcnt;
2202 	srq->srq_wtime += delta;
2203 	delta = new - srq->srq_rlastupdate;
2204 	srq->srq_rlastupdate = new;
2205 	rcnt = srq->srq_rcnt++;
2206 	if (rcnt != 0) {
2207 		srq->srq_rlentime += delta * rcnt;
2208 		srq->srq_rtime += delta;
2209 	}
2210 	mutex_exit(&srq->srq_mutex);
2211 }
2212 
2213 /*
2214  * smb_srqueue_update
2215  *
2216  * Takes a snapshot of the smb_sr_stat_t structure passed in.
2217  */
2218 void
2219 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd)
2220 {
2221 	hrtime_t	delta;
2222 	hrtime_t	snaptime;
2223 
2224 	mutex_enter(&srq->srq_mutex);
2225 	snaptime = gethrtime_unscaled();
2226 	delta = snaptime - srq->srq_wlastupdate;
2227 	srq->srq_wlastupdate = snaptime;
2228 	if (srq->srq_wcnt != 0) {
2229 		srq->srq_wlentime += delta * srq->srq_wcnt;
2230 		srq->srq_wtime += delta;
2231 	}
2232 	delta = snaptime - srq->srq_rlastupdate;
2233 	srq->srq_rlastupdate = snaptime;
2234 	if (srq->srq_rcnt != 0) {
2235 		srq->srq_rlentime += delta * srq->srq_rcnt;
2236 		srq->srq_rtime += delta;
2237 	}
2238 	kd->ku_rlentime = srq->srq_rlentime;
2239 	kd->ku_rtime = srq->srq_rtime;
2240 	kd->ku_wlentime = srq->srq_wlentime;
2241 	kd->ku_wtime = srq->srq_wtime;
2242 	mutex_exit(&srq->srq_mutex);
2243 	scalehrtime(&kd->ku_rlentime);
2244 	scalehrtime(&kd->ku_rtime);
2245 	scalehrtime(&kd->ku_wlentime);
2246 	scalehrtime(&kd->ku_wtime);
2247 }
2248 
2249 void
2250 smb_threshold_init(smb_cmd_threshold_t *ct, smb_server_t *sv, char *cmd,
2251     int threshold, int timeout)
2252 {
2253 	bzero(ct, sizeof (smb_cmd_threshold_t));
2254 	mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL);
2255 	ct->ct_cmd = cmd;
2256 	ct->ct_threshold = threshold;
2257 	ct->ct_event = smb_event_create(sv, timeout);
2258 	ct->ct_event_id = smb_event_txid(ct->ct_event);
2259 
2260 	if (smb_threshold_debug) {
2261 		cmn_err(CE_NOTE, "smb_threshold_init[%s]: threshold (%d), "
2262 		    "timeout (%d)", cmd, threshold, timeout);
2263 	}
2264 }
2265 
2266 /*
2267  * This function must be called prior to SMB_SERVER_STATE_STOPPING state
2268  * so that ct_event can be successfully removed from the event list.
2269  * It should not be called when the server mutex is held or when the
2270  * server is removed from the server list.
2271  */
2272 void
2273 smb_threshold_fini(smb_cmd_threshold_t *ct)
2274 {
2275 	smb_event_destroy(ct->ct_event);
2276 	mutex_destroy(&ct->ct_mutex);
2277 	bzero(ct, sizeof (smb_cmd_threshold_t));
2278 }
2279 
2280 /*
2281  * This threshold mechanism can be used to limit the number of simultaneous
2282  * requests, which serves to limit the stress that can be applied to the
2283  * service and also allows the service to respond to requests before the
2284  * client times out and reports that the server is not responding,
2285  *
2286  * If the number of requests exceeds the threshold, new requests will be
2287  * stalled until the number drops back to the threshold.  Stalled requests
2288  * will be notified as appropriate, in which case 0 will be returned.
2289  * If the timeout expires before the request is notified, a non-zero errno
2290  * value will be returned.
2291  *
2292  * To avoid a flood of messages, the message rate is throttled as well.
2293  */
2294 int
2295 smb_threshold_enter(smb_cmd_threshold_t *ct)
2296 {
2297 	int	rc;
2298 
2299 	mutex_enter(&ct->ct_mutex);
2300 	if (ct->ct_active_cnt >= ct->ct_threshold && ct->ct_event != NULL) {
2301 		atomic_inc_32(&ct->ct_blocked_cnt);
2302 
2303 		if (smb_threshold_debug) {
2304 			cmn_err(CE_NOTE, "smb_threshold_enter[%s]: blocked "
2305 			    "(blocked ops: %u, inflight ops: %u)",
2306 			    ct->ct_cmd, ct->ct_blocked_cnt, ct->ct_active_cnt);
2307 		}
2308 
2309 		mutex_exit(&ct->ct_mutex);
2310 
2311 		if ((rc = smb_event_wait(ct->ct_event)) != 0) {
2312 			if (rc == ECANCELED)
2313 				return (rc);
2314 
2315 			mutex_enter(&ct->ct_mutex);
2316 			if (ct->ct_active_cnt >= ct->ct_threshold) {
2317 
2318 				if ((ct->ct_error_cnt %
2319 				    SMB_THRESHOLD_REPORT_THROTTLE) == 0) {
2320 					cmn_err(CE_NOTE, "%s: server busy: "
2321 					    "threshold %d exceeded)",
2322 					    ct->ct_cmd, ct->ct_threshold);
2323 				}
2324 
2325 				atomic_inc_32(&ct->ct_error_cnt);
2326 				mutex_exit(&ct->ct_mutex);
2327 				return (rc);
2328 			}
2329 
2330 			mutex_exit(&ct->ct_mutex);
2331 
2332 		}
2333 
2334 		mutex_enter(&ct->ct_mutex);
2335 		atomic_dec_32(&ct->ct_blocked_cnt);
2336 		if (smb_threshold_debug) {
2337 			cmn_err(CE_NOTE, "smb_threshold_enter[%s]: resumed "
2338 			    "(blocked ops: %u, inflight ops: %u)", ct->ct_cmd,
2339 			    ct->ct_blocked_cnt, ct->ct_active_cnt);
2340 		}
2341 	}
2342 
2343 	atomic_inc_32(&ct->ct_active_cnt);
2344 	mutex_exit(&ct->ct_mutex);
2345 	return (0);
2346 }
2347 
2348 void
2349 smb_threshold_exit(smb_cmd_threshold_t *ct, smb_server_t *sv)
2350 {
2351 	mutex_enter(&ct->ct_mutex);
2352 	atomic_dec_32(&ct->ct_active_cnt);
2353 	mutex_exit(&ct->ct_mutex);
2354 	smb_event_notify(sv, ct->ct_event_id);
2355 }
2356