xref: /titanic_41/usr/src/uts/common/fs/smbsrv/smb_kutil.c (revision f3312ec0e8acbd249df97358fb8c3ca92f4e089c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/types.h>
29 #include <sys/tzfile.h>
30 #include <sys/atomic.h>
31 #include <sys/kidmap.h>
32 #include <sys/time.h>
33 #include <sys/spl.h>
34 #include <sys/cpuvar.h>
35 #include <sys/random.h>
36 #include <smbsrv/smb_kproto.h>
37 #include <smbsrv/smb_fsops.h>
38 #include <smbsrv/smbinfo.h>
39 #include <smbsrv/smb_xdr.h>
40 #include <smbsrv/smb_vops.h>
41 #include <smbsrv/smb_idmap.h>
42 
43 #include <sys/sid.h>
44 #include <sys/priv_names.h>
45 
46 static kmem_cache_t	*smb_dtor_cache;
47 static boolean_t	smb_llist_initialized = B_FALSE;
48 
49 static boolean_t smb_thread_continue_timedwait_locked(smb_thread_t *, int);
50 
51 static boolean_t smb_avl_hold(smb_avl_t *);
52 static void smb_avl_rele(smb_avl_t *);
53 
54 time_t tzh_leapcnt = 0;
55 
56 struct tm
57 *smb_gmtime_r(time_t *clock, struct tm *result);
58 
59 time_t
60 smb_timegm(struct tm *tm);
61 
62 struct	tm {
63 	int	tm_sec;
64 	int	tm_min;
65 	int	tm_hour;
66 	int	tm_mday;
67 	int	tm_mon;
68 	int	tm_year;
69 	int	tm_wday;
70 	int	tm_yday;
71 	int	tm_isdst;
72 };
73 
74 static int days_in_month[] = {
75 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
76 };
77 
78 int
79 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
80 {
81 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
82 		return (smb_wcequiv_strlen(str));
83 	return (strlen(str));
84 }
85 
86 int
87 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
88 {
89 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
90 		return (smb_wcequiv_strlen(str) + 2);
91 	return (strlen(str) + 1);
92 }
93 
94 int
95 smb_ascii_or_unicode_null_len(struct smb_request *sr)
96 {
97 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
98 		return (2);
99 	return (1);
100 }
101 
102 /*
103  *
104  * Convert old-style (DOS, LanMan) wildcard strings to NT style.
105  * This should ONLY happen to patterns that come from old clients,
106  * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12).
107  *
108  *	? is converted to >
109  *	* is converted to < if it is followed by .
110  *	. is converted to " if it is followed by ? or * or end of pattern
111  *
112  * Note: modifies pattern in place.
113  */
114 void
115 smb_convert_wildcards(char *pattern)
116 {
117 	char	*p;
118 
119 	for (p = pattern; *p != '\0'; p++) {
120 		switch (*p) {
121 		case '?':
122 			*p = '>';
123 			break;
124 		case '*':
125 			if (p[1] == '.')
126 				*p = '<';
127 			break;
128 		case '.':
129 			if (p[1] == '?' || p[1] == '*' || p[1] == '\0')
130 				*p = '\"';
131 			break;
132 		}
133 	}
134 }
135 
136 /*
137  * smb_sattr_check
138  *
139  * Check file attributes against a search attribute (sattr) mask.
140  *
141  * Normal files, which includes READONLY and ARCHIVE, always pass
142  * this check.  If the DIRECTORY, HIDDEN or SYSTEM special attributes
143  * are set then they must appear in the search mask.  The special
144  * attributes are inclusive, i.e. all special attributes that appear
145  * in sattr must also appear in the file attributes for the check to
146  * pass.
147  *
148  * The following examples show how this works:
149  *
150  *		fileA:	READONLY
151  *		fileB:	0 (no attributes = normal file)
152  *		fileC:	READONLY, ARCHIVE
153  *		fileD:	HIDDEN
154  *		fileE:	READONLY, HIDDEN, SYSTEM
155  *		dirA:	DIRECTORY
156  *
157  * search attribute: 0
158  *		Returns: fileA, fileB and fileC.
159  * search attribute: HIDDEN
160  *		Returns: fileA, fileB, fileC and fileD.
161  * search attribute: SYSTEM
162  *		Returns: fileA, fileB and fileC.
163  * search attribute: DIRECTORY
164  *		Returns: fileA, fileB, fileC and dirA.
165  * search attribute: HIDDEN and SYSTEM
166  *		Returns: fileA, fileB, fileC, fileD and fileE.
167  *
168  * Returns true if the file and sattr match; otherwise, returns false.
169  */
170 boolean_t
171 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
172 {
173 	if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
174 	    !(sattr & FILE_ATTRIBUTE_DIRECTORY))
175 		return (B_FALSE);
176 
177 	if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
178 	    !(sattr & FILE_ATTRIBUTE_HIDDEN))
179 		return (B_FALSE);
180 
181 	if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
182 	    !(sattr & FILE_ATTRIBUTE_SYSTEM))
183 		return (B_FALSE);
184 
185 	return (B_TRUE);
186 }
187 
188 int
189 microtime(timestruc_t *tvp)
190 {
191 	tvp->tv_sec = gethrestime_sec();
192 	tvp->tv_nsec = 0;
193 	return (0);
194 }
195 
196 int32_t
197 clock_get_milli_uptime()
198 {
199 	return (TICK_TO_MSEC(ddi_get_lbolt()));
200 }
201 
202 int /*ARGSUSED*/
203 smb_noop(void *p, size_t size, int foo)
204 {
205 	return (0);
206 }
207 
208 /*
209  * smb_idpool_increment
210  *
211  * This function increments the ID pool by doubling the current size. This
212  * function assumes the caller entered the mutex of the pool.
213  */
214 static int
215 smb_idpool_increment(
216     smb_idpool_t	*pool)
217 {
218 	uint8_t		*new_pool;
219 	uint32_t	new_size;
220 
221 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
222 
223 	new_size = pool->id_size * 2;
224 	if (new_size <= SMB_IDPOOL_MAX_SIZE) {
225 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
226 		if (new_pool) {
227 			bzero(new_pool, new_size / 8);
228 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
229 			kmem_free(pool->id_pool, pool->id_size / 8);
230 			pool->id_pool = new_pool;
231 			pool->id_free_counter += new_size - pool->id_size;
232 			pool->id_max_free_counter += new_size - pool->id_size;
233 			pool->id_size = new_size;
234 			pool->id_idx_msk = (new_size / 8) - 1;
235 			if (new_size >= SMB_IDPOOL_MAX_SIZE) {
236 				/* id -1 made unavailable */
237 				pool->id_pool[pool->id_idx_msk] = 0x80;
238 				pool->id_free_counter--;
239 				pool->id_max_free_counter--;
240 			}
241 			return (0);
242 		}
243 	}
244 	return (-1);
245 }
246 
247 /*
248  * smb_idpool_constructor
249  *
250  * This function initializes the pool structure provided.
251  */
252 int
253 smb_idpool_constructor(
254     smb_idpool_t	*pool)
255 {
256 
257 	ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
258 
259 	pool->id_size = SMB_IDPOOL_MIN_SIZE;
260 	pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
261 	pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
262 	pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
263 	pool->id_bit = 0x02;
264 	pool->id_bit_idx = 1;
265 	pool->id_idx = 0;
266 	pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
267 	    KM_SLEEP);
268 	bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
269 	/* -1 id made unavailable */
270 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
271 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
272 	pool->id_magic = SMB_IDPOOL_MAGIC;
273 	return (0);
274 }
275 
276 /*
277  * smb_idpool_destructor
278  *
279  * This function tears down and frees the resources associated with the
280  * pool provided.
281  */
282 void
283 smb_idpool_destructor(
284     smb_idpool_t	*pool)
285 {
286 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
287 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
288 	pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
289 	mutex_destroy(&pool->id_mutex);
290 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
291 }
292 
293 /*
294  * smb_idpool_alloc
295  *
296  * This function allocates an ID from the pool provided.
297  */
298 int
299 smb_idpool_alloc(
300     smb_idpool_t	*pool,
301     uint16_t		*id)
302 {
303 	uint32_t	i;
304 	uint8_t		bit;
305 	uint8_t		bit_idx;
306 	uint8_t		byte;
307 
308 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
309 
310 	mutex_enter(&pool->id_mutex);
311 	if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
312 		mutex_exit(&pool->id_mutex);
313 		return (-1);
314 	}
315 
316 	i = pool->id_size;
317 	while (i) {
318 		bit = pool->id_bit;
319 		bit_idx = pool->id_bit_idx;
320 		byte = pool->id_pool[pool->id_idx];
321 		while (bit) {
322 			if (byte & bit) {
323 				bit = bit << 1;
324 				bit_idx++;
325 				continue;
326 			}
327 			pool->id_pool[pool->id_idx] |= bit;
328 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
329 			pool->id_free_counter--;
330 			pool->id_bit = bit;
331 			pool->id_bit_idx = bit_idx;
332 			mutex_exit(&pool->id_mutex);
333 			return (0);
334 		}
335 		pool->id_bit = 1;
336 		pool->id_bit_idx = 0;
337 		pool->id_idx++;
338 		pool->id_idx &= pool->id_idx_msk;
339 		--i;
340 	}
341 	/*
342 	 * This section of code shouldn't be reached. If there are IDs
343 	 * available and none could be found there's a problem.
344 	 */
345 	ASSERT(0);
346 	mutex_exit(&pool->id_mutex);
347 	return (-1);
348 }
349 
350 /*
351  * smb_idpool_free
352  *
353  * This function frees the ID provided.
354  */
355 void
356 smb_idpool_free(
357     smb_idpool_t	*pool,
358     uint16_t		id)
359 {
360 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
361 	ASSERT(id != 0);
362 	ASSERT(id != 0xFFFF);
363 
364 	mutex_enter(&pool->id_mutex);
365 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
366 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
367 		pool->id_free_counter++;
368 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
369 		mutex_exit(&pool->id_mutex);
370 		return;
371 	}
372 	/* Freeing a free ID. */
373 	ASSERT(0);
374 	mutex_exit(&pool->id_mutex);
375 }
376 
377 /*
378  * Initialize the llist delete queue object cache.
379  */
380 void
381 smb_llist_init(void)
382 {
383 	if (smb_llist_initialized)
384 		return;
385 
386 	smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
387 	    sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
388 
389 	smb_llist_initialized = B_TRUE;
390 }
391 
392 /*
393  * Destroy the llist delete queue object cache.
394  */
395 void
396 smb_llist_fini(void)
397 {
398 	if (!smb_llist_initialized)
399 		return;
400 
401 	kmem_cache_destroy(smb_dtor_cache);
402 	smb_llist_initialized = B_FALSE;
403 }
404 
405 /*
406  * smb_llist_constructor
407  *
408  * This function initializes a locked list.
409  */
410 void
411 smb_llist_constructor(
412     smb_llist_t	*ll,
413     size_t	size,
414     size_t	offset)
415 {
416 	rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
417 	mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
418 	list_create(&ll->ll_list, size, offset);
419 	list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
420 	    offsetof(smb_dtor_t, dt_lnd));
421 	ll->ll_count = 0;
422 	ll->ll_wrop = 0;
423 	ll->ll_deleteq_count = 0;
424 	ll->ll_flushing = B_FALSE;
425 }
426 
427 /*
428  * Flush the delete queue and destroy a locked list.
429  */
430 void
431 smb_llist_destructor(
432     smb_llist_t	*ll)
433 {
434 	smb_llist_flush(ll);
435 
436 	ASSERT(ll->ll_count == 0);
437 	ASSERT(ll->ll_deleteq_count == 0);
438 
439 	rw_destroy(&ll->ll_lock);
440 	list_destroy(&ll->ll_list);
441 	list_destroy(&ll->ll_deleteq);
442 	mutex_destroy(&ll->ll_mutex);
443 }
444 
445 /*
446  * Post an object to the delete queue.  The delete queue will be processed
447  * during list exit or list destruction.  Objects are often posted for
448  * deletion during list iteration (while the list is locked) but that is
449  * not required, and an object can be posted at any time.
450  */
451 void
452 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
453 {
454 	smb_dtor_t	*dtor;
455 
456 	ASSERT((object != NULL) && (dtorproc != NULL));
457 
458 	dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
459 	bzero(dtor, sizeof (smb_dtor_t));
460 	dtor->dt_magic = SMB_DTOR_MAGIC;
461 	dtor->dt_object = object;
462 	dtor->dt_proc = dtorproc;
463 
464 	mutex_enter(&ll->ll_mutex);
465 	list_insert_tail(&ll->ll_deleteq, dtor);
466 	++ll->ll_deleteq_count;
467 	mutex_exit(&ll->ll_mutex);
468 }
469 
470 /*
471  * Exit the list lock and process the delete queue.
472  */
473 void
474 smb_llist_exit(smb_llist_t *ll)
475 {
476 	rw_exit(&ll->ll_lock);
477 	smb_llist_flush(ll);
478 }
479 
480 /*
481  * Flush the list delete queue.  The mutex is dropped across the destructor
482  * call in case this leads to additional objects being posted to the delete
483  * queue.
484  */
485 void
486 smb_llist_flush(smb_llist_t *ll)
487 {
488 	smb_dtor_t    *dtor;
489 
490 	mutex_enter(&ll->ll_mutex);
491 	if (ll->ll_flushing) {
492 		mutex_exit(&ll->ll_mutex);
493 		return;
494 	}
495 	ll->ll_flushing = B_TRUE;
496 
497 	dtor = list_head(&ll->ll_deleteq);
498 	while (dtor != NULL) {
499 		SMB_DTOR_VALID(dtor);
500 		ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
501 		list_remove(&ll->ll_deleteq, dtor);
502 		--ll->ll_deleteq_count;
503 		mutex_exit(&ll->ll_mutex);
504 
505 		dtor->dt_proc(dtor->dt_object);
506 
507 		dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
508 		kmem_cache_free(smb_dtor_cache, dtor);
509 		mutex_enter(&ll->ll_mutex);
510 		dtor = list_head(&ll->ll_deleteq);
511 	}
512 	ll->ll_flushing = B_FALSE;
513 
514 	mutex_exit(&ll->ll_mutex);
515 }
516 
517 /*
518  * smb_llist_upgrade
519  *
520  * This function tries to upgrade the lock of the locked list. It assumes the
521  * locked has already been entered in RW_READER mode. It first tries using the
522  * Solaris function rw_tryupgrade(). If that call fails the lock is released
523  * and reentered in RW_WRITER mode. In that last case a window is opened during
524  * which the contents of the list may have changed. The return code indicates
525  * whether or not the list was modified when the lock was exited.
526  */
527 int smb_llist_upgrade(
528     smb_llist_t *ll)
529 {
530 	uint64_t	wrop;
531 
532 	if (rw_tryupgrade(&ll->ll_lock) != 0) {
533 		return (0);
534 	}
535 	wrop = ll->ll_wrop;
536 	rw_exit(&ll->ll_lock);
537 	rw_enter(&ll->ll_lock, RW_WRITER);
538 	return (wrop != ll->ll_wrop);
539 }
540 
541 /*
542  * smb_llist_insert_head
543  *
544  * This function inserts the object passed a the beginning of the list. This
545  * function assumes the lock of the list has already been entered.
546  */
547 void
548 smb_llist_insert_head(
549     smb_llist_t	*ll,
550     void	*obj)
551 {
552 	list_insert_head(&ll->ll_list, obj);
553 	++ll->ll_wrop;
554 	++ll->ll_count;
555 }
556 
557 /*
558  * smb_llist_insert_tail
559  *
560  * This function appends to the object passed to the list. This function assumes
561  * the lock of the list has already been entered.
562  *
563  */
564 void
565 smb_llist_insert_tail(
566     smb_llist_t	*ll,
567     void	*obj)
568 {
569 	list_insert_tail(&ll->ll_list, obj);
570 	++ll->ll_wrop;
571 	++ll->ll_count;
572 }
573 
574 /*
575  * smb_llist_remove
576  *
577  * This function removes the object passed from the list. This function assumes
578  * the lock of the list has already been entered.
579  */
580 void
581 smb_llist_remove(
582     smb_llist_t	*ll,
583     void	*obj)
584 {
585 	list_remove(&ll->ll_list, obj);
586 	++ll->ll_wrop;
587 	--ll->ll_count;
588 }
589 
590 /*
591  * smb_llist_get_count
592  *
593  * This function returns the number of elements in the specified list.
594  */
595 uint32_t
596 smb_llist_get_count(
597     smb_llist_t *ll)
598 {
599 	return (ll->ll_count);
600 }
601 
602 /*
603  * smb_slist_constructor
604  *
605  * Synchronized list constructor.
606  */
607 void
608 smb_slist_constructor(
609     smb_slist_t	*sl,
610     size_t	size,
611     size_t	offset)
612 {
613 	mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
614 	cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
615 	list_create(&sl->sl_list, size, offset);
616 	sl->sl_count = 0;
617 	sl->sl_waiting = B_FALSE;
618 }
619 
620 /*
621  * smb_slist_destructor
622  *
623  * Synchronized list destructor.
624  */
625 void
626 smb_slist_destructor(
627     smb_slist_t	*sl)
628 {
629 	VERIFY(sl->sl_count == 0);
630 
631 	mutex_destroy(&sl->sl_mutex);
632 	cv_destroy(&sl->sl_cv);
633 	list_destroy(&sl->sl_list);
634 }
635 
636 /*
637  * smb_slist_insert_head
638  *
639  * This function inserts the object passed a the beginning of the list.
640  */
641 void
642 smb_slist_insert_head(
643     smb_slist_t	*sl,
644     void	*obj)
645 {
646 	mutex_enter(&sl->sl_mutex);
647 	list_insert_head(&sl->sl_list, obj);
648 	++sl->sl_count;
649 	mutex_exit(&sl->sl_mutex);
650 }
651 
652 /*
653  * smb_slist_insert_tail
654  *
655  * This function appends the object passed to the list.
656  */
657 void
658 smb_slist_insert_tail(
659     smb_slist_t	*sl,
660     void	*obj)
661 {
662 	mutex_enter(&sl->sl_mutex);
663 	list_insert_tail(&sl->sl_list, obj);
664 	++sl->sl_count;
665 	mutex_exit(&sl->sl_mutex);
666 }
667 
668 /*
669  * smb_llist_remove
670  *
671  * This function removes the object passed by the caller from the list.
672  */
673 void
674 smb_slist_remove(
675     smb_slist_t	*sl,
676     void	*obj)
677 {
678 	mutex_enter(&sl->sl_mutex);
679 	list_remove(&sl->sl_list, obj);
680 	if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
681 		sl->sl_waiting = B_FALSE;
682 		cv_broadcast(&sl->sl_cv);
683 	}
684 	mutex_exit(&sl->sl_mutex);
685 }
686 
687 /*
688  * smb_slist_move_tail
689  *
690  * This function transfers all the contents of the synchronized list to the
691  * list_t provided. It returns the number of objects transferred.
692  */
693 uint32_t
694 smb_slist_move_tail(
695     list_t	*lst,
696     smb_slist_t	*sl)
697 {
698 	uint32_t	rv;
699 
700 	mutex_enter(&sl->sl_mutex);
701 	rv = sl->sl_count;
702 	if (sl->sl_count) {
703 		list_move_tail(lst, &sl->sl_list);
704 		sl->sl_count = 0;
705 		if (sl->sl_waiting) {
706 			sl->sl_waiting = B_FALSE;
707 			cv_broadcast(&sl->sl_cv);
708 		}
709 	}
710 	mutex_exit(&sl->sl_mutex);
711 	return (rv);
712 }
713 
714 /*
715  * smb_slist_obj_move
716  *
717  * This function moves an object from one list to the end of the other list. It
718  * assumes the mutex of each list has been entered.
719  */
720 void
721 smb_slist_obj_move(
722     smb_slist_t	*dst,
723     smb_slist_t	*src,
724     void	*obj)
725 {
726 	ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
727 	ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
728 
729 	list_remove(&src->sl_list, obj);
730 	list_insert_tail(&dst->sl_list, obj);
731 	dst->sl_count++;
732 	src->sl_count--;
733 	if ((src->sl_count == 0) && (src->sl_waiting)) {
734 		src->sl_waiting = B_FALSE;
735 		cv_broadcast(&src->sl_cv);
736 	}
737 }
738 
739 /*
740  * smb_slist_wait_for_empty
741  *
742  * This function waits for a list to be emptied.
743  */
744 void
745 smb_slist_wait_for_empty(
746     smb_slist_t	*sl)
747 {
748 	mutex_enter(&sl->sl_mutex);
749 	while (sl->sl_count) {
750 		sl->sl_waiting = B_TRUE;
751 		cv_wait(&sl->sl_cv, &sl->sl_mutex);
752 	}
753 	mutex_exit(&sl->sl_mutex);
754 }
755 
756 /*
757  * smb_slist_exit
758  *
759  * This function exits the muetx of the list and signal the condition variable
760  * if the list is empty.
761  */
762 void
763 smb_slist_exit(smb_slist_t *sl)
764 {
765 	if ((sl->sl_count == 0) && (sl->sl_waiting)) {
766 		sl->sl_waiting = B_FALSE;
767 		cv_broadcast(&sl->sl_cv);
768 	}
769 	mutex_exit(&sl->sl_mutex);
770 }
771 
772 /*
773  * smb_thread_entry_point
774  *
775  * Common entry point for all the threads created through smb_thread_start.
776  * The state of the thread is set to "running" at the beginning and moved to
777  * "exiting" just before calling thread_exit(). The condition variable is
778  *  also signaled.
779  */
780 static void
781 smb_thread_entry_point(
782     smb_thread_t	*thread)
783 {
784 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
785 	mutex_enter(&thread->sth_mtx);
786 	ASSERT(thread->sth_state == SMB_THREAD_STATE_STARTING);
787 	thread->sth_th = curthread;
788 	thread->sth_did = thread->sth_th->t_did;
789 
790 	if (!thread->sth_kill) {
791 		thread->sth_state = SMB_THREAD_STATE_RUNNING;
792 		cv_signal(&thread->sth_cv);
793 		mutex_exit(&thread->sth_mtx);
794 		thread->sth_ep(thread, thread->sth_ep_arg);
795 		mutex_enter(&thread->sth_mtx);
796 	}
797 	thread->sth_th = NULL;
798 	thread->sth_state = SMB_THREAD_STATE_EXITING;
799 	cv_broadcast(&thread->sth_cv);
800 	mutex_exit(&thread->sth_mtx);
801 	thread_exit();
802 }
803 
804 /*
805  * smb_thread_init
806  */
807 void
808 smb_thread_init(
809     smb_thread_t	*thread,
810     char		*name,
811     smb_thread_ep_t	ep,
812     void		*ep_arg)
813 {
814 	ASSERT(thread->sth_magic != SMB_THREAD_MAGIC);
815 
816 	bzero(thread, sizeof (*thread));
817 
818 	(void) strlcpy(thread->sth_name, name, sizeof (thread->sth_name));
819 	thread->sth_ep = ep;
820 	thread->sth_ep_arg = ep_arg;
821 	thread->sth_state = SMB_THREAD_STATE_EXITED;
822 	mutex_init(&thread->sth_mtx, NULL, MUTEX_DEFAULT, NULL);
823 	cv_init(&thread->sth_cv, NULL, CV_DEFAULT, NULL);
824 	thread->sth_magic = SMB_THREAD_MAGIC;
825 }
826 
827 /*
828  * smb_thread_destroy
829  */
830 void
831 smb_thread_destroy(
832     smb_thread_t	*thread)
833 {
834 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
835 	ASSERT(thread->sth_state == SMB_THREAD_STATE_EXITED);
836 	thread->sth_magic = 0;
837 	mutex_destroy(&thread->sth_mtx);
838 	cv_destroy(&thread->sth_cv);
839 }
840 
841 /*
842  * smb_thread_start
843  *
844  * This function starts a thread with the parameters provided. It waits until
845  * the state of the thread has been moved to running.
846  */
847 /*ARGSUSED*/
848 int
849 smb_thread_start(
850     smb_thread_t	*thread)
851 {
852 	int		rc = 0;
853 	kthread_t	*tmpthread;
854 
855 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
856 
857 	mutex_enter(&thread->sth_mtx);
858 	switch (thread->sth_state) {
859 	case SMB_THREAD_STATE_EXITED:
860 		thread->sth_state = SMB_THREAD_STATE_STARTING;
861 		mutex_exit(&thread->sth_mtx);
862 		tmpthread = thread_create(NULL, 0, smb_thread_entry_point,
863 		    thread, 0, &p0, TS_RUN, minclsyspri);
864 		ASSERT(tmpthread != NULL);
865 		mutex_enter(&thread->sth_mtx);
866 		while (thread->sth_state == SMB_THREAD_STATE_STARTING)
867 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
868 		if (thread->sth_state != SMB_THREAD_STATE_RUNNING)
869 			rc = -1;
870 		break;
871 	default:
872 		ASSERT(0);
873 		rc = -1;
874 		break;
875 	}
876 	mutex_exit(&thread->sth_mtx);
877 	return (rc);
878 }
879 
880 /*
881  * smb_thread_stop
882  *
883  * This function signals a thread to kill itself and waits until the "exiting"
884  * state has been reached.
885  */
886 void
887 smb_thread_stop(smb_thread_t *thread)
888 {
889 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
890 
891 	mutex_enter(&thread->sth_mtx);
892 	switch (thread->sth_state) {
893 	case SMB_THREAD_STATE_RUNNING:
894 	case SMB_THREAD_STATE_STARTING:
895 		if (!thread->sth_kill) {
896 			thread->sth_kill = B_TRUE;
897 			cv_broadcast(&thread->sth_cv);
898 			while (thread->sth_state != SMB_THREAD_STATE_EXITING)
899 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
900 			mutex_exit(&thread->sth_mtx);
901 			thread_join(thread->sth_did);
902 			mutex_enter(&thread->sth_mtx);
903 			thread->sth_state = SMB_THREAD_STATE_EXITED;
904 			thread->sth_did = 0;
905 			thread->sth_kill = B_FALSE;
906 			cv_broadcast(&thread->sth_cv);
907 			break;
908 		}
909 		/*FALLTHRU*/
910 
911 	case SMB_THREAD_STATE_EXITING:
912 		if (thread->sth_kill) {
913 			while (thread->sth_state != SMB_THREAD_STATE_EXITED)
914 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
915 		} else {
916 			thread->sth_state = SMB_THREAD_STATE_EXITED;
917 			thread->sth_did = 0;
918 		}
919 		break;
920 
921 	case SMB_THREAD_STATE_EXITED:
922 		break;
923 
924 	default:
925 		ASSERT(0);
926 		break;
927 	}
928 	mutex_exit(&thread->sth_mtx);
929 }
930 
931 /*
932  * smb_thread_signal
933  *
934  * This function signals a thread.
935  */
936 void
937 smb_thread_signal(smb_thread_t *thread)
938 {
939 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
940 
941 	mutex_enter(&thread->sth_mtx);
942 	switch (thread->sth_state) {
943 	case SMB_THREAD_STATE_RUNNING:
944 		cv_signal(&thread->sth_cv);
945 		break;
946 
947 	default:
948 		break;
949 	}
950 	mutex_exit(&thread->sth_mtx);
951 }
952 
953 boolean_t
954 smb_thread_continue(smb_thread_t *thread)
955 {
956 	boolean_t result;
957 
958 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
959 
960 	mutex_enter(&thread->sth_mtx);
961 	result = smb_thread_continue_timedwait_locked(thread, 0);
962 	mutex_exit(&thread->sth_mtx);
963 
964 	return (result);
965 }
966 
967 boolean_t
968 smb_thread_continue_nowait(smb_thread_t *thread)
969 {
970 	boolean_t result;
971 
972 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
973 
974 	mutex_enter(&thread->sth_mtx);
975 	/*
976 	 * Setting ticks=-1 requests a non-blocking check.  We will
977 	 * still block if the thread is in "suspend" state.
978 	 */
979 	result = smb_thread_continue_timedwait_locked(thread, -1);
980 	mutex_exit(&thread->sth_mtx);
981 
982 	return (result);
983 }
984 
985 boolean_t
986 smb_thread_continue_timedwait(smb_thread_t *thread, int seconds)
987 {
988 	boolean_t result;
989 
990 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
991 
992 	mutex_enter(&thread->sth_mtx);
993 	result = smb_thread_continue_timedwait_locked(thread,
994 	    SEC_TO_TICK(seconds));
995 	mutex_exit(&thread->sth_mtx);
996 
997 	return (result);
998 }
999 
1000 /*
1001  * smb_thread_continue_timedwait_locked
1002  *
1003  * Internal only.  Ticks==-1 means don't block, Ticks == 0 means wait
1004  * indefinitely
1005  */
1006 static boolean_t
1007 smb_thread_continue_timedwait_locked(smb_thread_t *thread, int ticks)
1008 {
1009 	boolean_t	result;
1010 
1011 	/* -1 means don't block */
1012 	if (ticks != -1 && !thread->sth_kill) {
1013 		if (ticks == 0) {
1014 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
1015 		} else {
1016 			(void) cv_reltimedwait(&thread->sth_cv,
1017 			    &thread->sth_mtx, (clock_t)ticks, TR_CLOCK_TICK);
1018 		}
1019 	}
1020 	result = (thread->sth_kill == 0);
1021 
1022 	return (result);
1023 }
1024 
1025 /*
1026  * smb_rwx_init
1027  */
1028 void
1029 smb_rwx_init(
1030     smb_rwx_t	*rwx)
1031 {
1032 	bzero(rwx, sizeof (smb_rwx_t));
1033 	cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
1034 	mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
1035 	rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
1036 }
1037 
1038 /*
1039  * smb_rwx_destroy
1040  */
1041 void
1042 smb_rwx_destroy(
1043     smb_rwx_t	*rwx)
1044 {
1045 	mutex_destroy(&rwx->rwx_mutex);
1046 	cv_destroy(&rwx->rwx_cv);
1047 	rw_destroy(&rwx->rwx_lock);
1048 }
1049 
1050 /*
1051  * smb_rwx_rwexit
1052  */
1053 void
1054 smb_rwx_rwexit(
1055     smb_rwx_t	*rwx)
1056 {
1057 	if (rw_write_held(&rwx->rwx_lock)) {
1058 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1059 		mutex_enter(&rwx->rwx_mutex);
1060 		if (rwx->rwx_waiting) {
1061 			rwx->rwx_waiting = B_FALSE;
1062 			cv_broadcast(&rwx->rwx_cv);
1063 		}
1064 		mutex_exit(&rwx->rwx_mutex);
1065 	}
1066 	rw_exit(&rwx->rwx_lock);
1067 }
1068 
1069 /*
1070  * smb_rwx_rwupgrade
1071  */
1072 krw_t
1073 smb_rwx_rwupgrade(
1074     smb_rwx_t	*rwx)
1075 {
1076 	if (rw_write_held(&rwx->rwx_lock)) {
1077 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1078 		return (RW_WRITER);
1079 	}
1080 	if (!rw_tryupgrade(&rwx->rwx_lock)) {
1081 		rw_exit(&rwx->rwx_lock);
1082 		rw_enter(&rwx->rwx_lock, RW_WRITER);
1083 	}
1084 	return (RW_READER);
1085 }
1086 
1087 /*
1088  * smb_rwx_rwrestore
1089  */
1090 void
1091 smb_rwx_rwdowngrade(
1092     smb_rwx_t	*rwx,
1093     krw_t	mode)
1094 {
1095 	ASSERT(rw_write_held(&rwx->rwx_lock));
1096 	ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1097 
1098 	if (mode == RW_WRITER) {
1099 		return;
1100 	}
1101 	ASSERT(mode == RW_READER);
1102 	mutex_enter(&rwx->rwx_mutex);
1103 	if (rwx->rwx_waiting) {
1104 		rwx->rwx_waiting = B_FALSE;
1105 		cv_broadcast(&rwx->rwx_cv);
1106 	}
1107 	mutex_exit(&rwx->rwx_mutex);
1108 	rw_downgrade(&rwx->rwx_lock);
1109 }
1110 
1111 /*
1112  * smb_rwx_wait
1113  *
1114  * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER
1115  * mode. It will:
1116  *
1117  *	1) release the lock and save its current mode.
1118  *	2) wait until the condition variable is signaled. This can happen for
1119  *	   2 reasons: When a writer releases the lock or when the time out (if
1120  *	   provided) expires.
1121  *	3) re-acquire the lock in the mode saved in (1).
1122  */
1123 int
1124 smb_rwx_rwwait(
1125     smb_rwx_t	*rwx,
1126     clock_t	timeout)
1127 {
1128 	int	rc;
1129 	krw_t	mode;
1130 
1131 	mutex_enter(&rwx->rwx_mutex);
1132 	rwx->rwx_waiting = B_TRUE;
1133 	mutex_exit(&rwx->rwx_mutex);
1134 
1135 	if (rw_write_held(&rwx->rwx_lock)) {
1136 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1137 		mode = RW_WRITER;
1138 	} else {
1139 		ASSERT(rw_read_held(&rwx->rwx_lock));
1140 		mode = RW_READER;
1141 	}
1142 	rw_exit(&rwx->rwx_lock);
1143 
1144 	mutex_enter(&rwx->rwx_mutex);
1145 	if (rwx->rwx_waiting) {
1146 		if (timeout == -1) {
1147 			rc = 1;
1148 			cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
1149 		} else {
1150 			rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
1151 			    timeout, TR_CLOCK_TICK);
1152 		}
1153 	}
1154 	mutex_exit(&rwx->rwx_mutex);
1155 
1156 	rw_enter(&rwx->rwx_lock, mode);
1157 	return (rc);
1158 }
1159 
1160 /*
1161  * SMB ID mapping
1162  *
1163  * Solaris ID mapping service (aka Winchester) works with domain SIDs
1164  * and RIDs where domain SIDs are in string format. CIFS service works
1165  * with binary SIDs understandable by CIFS clients. A layer of SMB ID
1166  * mapping functions are implemeted to hide the SID conversion details
1167  * and also hide the handling of array of batch mapping requests.
1168  *
1169  * IMPORTANT NOTE The Winchester API requires a zone. Because CIFS server
1170  * currently only runs in the global zone the global zone is specified.
1171  * This needs to be fixed when the CIFS server supports zones.
1172  */
1173 
1174 static int smb_idmap_batch_binsid(smb_idmap_batch_t *sib);
1175 
1176 /*
1177  * smb_idmap_getid
1178  *
1179  * Maps the given Windows SID to a Solaris ID using the
1180  * simple mapping API.
1181  */
1182 idmap_stat
1183 smb_idmap_getid(smb_sid_t *sid, uid_t *id, int *idtype)
1184 {
1185 	smb_idmap_t sim;
1186 	char sidstr[SMB_SID_STRSZ];
1187 
1188 	smb_sid_tostr(sid, sidstr);
1189 	if (smb_sid_splitstr(sidstr, &sim.sim_rid) != 0)
1190 		return (IDMAP_ERR_SID);
1191 	sim.sim_domsid = sidstr;
1192 	sim.sim_id = id;
1193 
1194 	switch (*idtype) {
1195 	case SMB_IDMAP_USER:
1196 		sim.sim_stat = kidmap_getuidbysid(global_zone, sim.sim_domsid,
1197 		    sim.sim_rid, sim.sim_id);
1198 		break;
1199 
1200 	case SMB_IDMAP_GROUP:
1201 		sim.sim_stat = kidmap_getgidbysid(global_zone, sim.sim_domsid,
1202 		    sim.sim_rid, sim.sim_id);
1203 		break;
1204 
1205 	case SMB_IDMAP_UNKNOWN:
1206 		sim.sim_stat = kidmap_getpidbysid(global_zone, sim.sim_domsid,
1207 		    sim.sim_rid, sim.sim_id, &sim.sim_idtype);
1208 		break;
1209 
1210 	default:
1211 		ASSERT(0);
1212 		return (IDMAP_ERR_ARG);
1213 	}
1214 
1215 	*idtype = sim.sim_idtype;
1216 
1217 	return (sim.sim_stat);
1218 }
1219 
1220 /*
1221  * smb_idmap_getsid
1222  *
1223  * Maps the given Solaris ID to a Windows SID using the
1224  * simple mapping API.
1225  */
1226 idmap_stat
1227 smb_idmap_getsid(uid_t id, int idtype, smb_sid_t **sid)
1228 {
1229 	smb_idmap_t sim;
1230 
1231 	switch (idtype) {
1232 	case SMB_IDMAP_USER:
1233 		sim.sim_stat = kidmap_getsidbyuid(global_zone, id,
1234 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1235 		break;
1236 
1237 	case SMB_IDMAP_GROUP:
1238 		sim.sim_stat = kidmap_getsidbygid(global_zone, id,
1239 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1240 		break;
1241 
1242 	case SMB_IDMAP_EVERYONE:
1243 		/* Everyone S-1-1-0 */
1244 		sim.sim_domsid = "S-1-1";
1245 		sim.sim_rid = 0;
1246 		sim.sim_stat = IDMAP_SUCCESS;
1247 		break;
1248 
1249 	default:
1250 		ASSERT(0);
1251 		return (IDMAP_ERR_ARG);
1252 	}
1253 
1254 	if (sim.sim_stat != IDMAP_SUCCESS)
1255 		return (sim.sim_stat);
1256 
1257 	if (sim.sim_domsid == NULL)
1258 		return (IDMAP_ERR_NOMAPPING);
1259 
1260 	sim.sim_sid = smb_sid_fromstr(sim.sim_domsid);
1261 	if (sim.sim_sid == NULL)
1262 		return (IDMAP_ERR_INTERNAL);
1263 
1264 	*sid = smb_sid_splice(sim.sim_sid, sim.sim_rid);
1265 	smb_sid_free(sim.sim_sid);
1266 	if (*sid == NULL)
1267 		sim.sim_stat = IDMAP_ERR_INTERNAL;
1268 
1269 	return (sim.sim_stat);
1270 }
1271 
1272 /*
1273  * smb_idmap_batch_create
1274  *
1275  * Creates and initializes the context for batch ID mapping.
1276  */
1277 idmap_stat
1278 smb_idmap_batch_create(smb_idmap_batch_t *sib, uint16_t nmap, int flags)
1279 {
1280 	ASSERT(sib);
1281 
1282 	bzero(sib, sizeof (smb_idmap_batch_t));
1283 
1284 	sib->sib_idmaph = kidmap_get_create(global_zone);
1285 
1286 	sib->sib_flags = flags;
1287 	sib->sib_nmap = nmap;
1288 	sib->sib_size = nmap * sizeof (smb_idmap_t);
1289 	sib->sib_maps = kmem_zalloc(sib->sib_size, KM_SLEEP);
1290 
1291 	return (IDMAP_SUCCESS);
1292 }
1293 
1294 /*
1295  * smb_idmap_batch_destroy
1296  *
1297  * Frees the batch ID mapping context.
1298  * If ID mapping is Solaris -> Windows it frees memories
1299  * allocated for binary SIDs.
1300  */
1301 void
1302 smb_idmap_batch_destroy(smb_idmap_batch_t *sib)
1303 {
1304 	char *domsid;
1305 	int i;
1306 
1307 	ASSERT(sib);
1308 	ASSERT(sib->sib_maps);
1309 
1310 	if (sib->sib_idmaph)
1311 		kidmap_get_destroy(sib->sib_idmaph);
1312 
1313 	if (sib->sib_flags & SMB_IDMAP_ID2SID) {
1314 		/*
1315 		 * SIDs are allocated only when mapping
1316 		 * UID/GID to SIDs
1317 		 */
1318 		for (i = 0; i < sib->sib_nmap; i++)
1319 			smb_sid_free(sib->sib_maps[i].sim_sid);
1320 	} else if (sib->sib_flags & SMB_IDMAP_SID2ID) {
1321 		/*
1322 		 * SID prefixes are allocated only when mapping
1323 		 * SIDs to UID/GID
1324 		 */
1325 		for (i = 0; i < sib->sib_nmap; i++) {
1326 			domsid = sib->sib_maps[i].sim_domsid;
1327 			if (domsid)
1328 				smb_mem_free(domsid);
1329 		}
1330 	}
1331 
1332 	if (sib->sib_size && sib->sib_maps)
1333 		kmem_free(sib->sib_maps, sib->sib_size);
1334 }
1335 
1336 /*
1337  * smb_idmap_batch_getid
1338  *
1339  * Queue a request to map the given SID to a UID or GID.
1340  *
1341  * sim->sim_id should point to variable that's supposed to
1342  * hold the returned UID/GID. This needs to be setup by caller
1343  * of this function.
1344  *
1345  * If requested ID type is known, it's passed as 'idtype',
1346  * if it's unknown it'll be returned in sim->sim_idtype.
1347  */
1348 idmap_stat
1349 smb_idmap_batch_getid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1350     smb_sid_t *sid, int idtype)
1351 {
1352 	char strsid[SMB_SID_STRSZ];
1353 	idmap_stat idm_stat;
1354 
1355 	ASSERT(idmaph);
1356 	ASSERT(sim);
1357 	ASSERT(sid);
1358 
1359 	smb_sid_tostr(sid, strsid);
1360 	if (smb_sid_splitstr(strsid, &sim->sim_rid) != 0)
1361 		return (IDMAP_ERR_SID);
1362 	sim->sim_domsid = smb_mem_strdup(strsid);
1363 
1364 	switch (idtype) {
1365 	case SMB_IDMAP_USER:
1366 		idm_stat = kidmap_batch_getuidbysid(idmaph, sim->sim_domsid,
1367 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1368 		break;
1369 
1370 	case SMB_IDMAP_GROUP:
1371 		idm_stat = kidmap_batch_getgidbysid(idmaph, sim->sim_domsid,
1372 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1373 		break;
1374 
1375 	case SMB_IDMAP_UNKNOWN:
1376 		idm_stat = kidmap_batch_getpidbysid(idmaph, sim->sim_domsid,
1377 		    sim->sim_rid, sim->sim_id, &sim->sim_idtype,
1378 		    &sim->sim_stat);
1379 		break;
1380 
1381 	default:
1382 		ASSERT(0);
1383 		return (IDMAP_ERR_ARG);
1384 	}
1385 
1386 	return (idm_stat);
1387 }
1388 
1389 /*
1390  * smb_idmap_batch_getsid
1391  *
1392  * Queue a request to map the given UID/GID to a SID.
1393  *
1394  * sim->sim_domsid and sim->sim_rid will contain the mapping
1395  * result upon successful process of the batched request.
1396  */
1397 idmap_stat
1398 smb_idmap_batch_getsid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1399     uid_t id, int idtype)
1400 {
1401 	idmap_stat idm_stat;
1402 
1403 	switch (idtype) {
1404 	case SMB_IDMAP_USER:
1405 		idm_stat = kidmap_batch_getsidbyuid(idmaph, id,
1406 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1407 		    &sim->sim_stat);
1408 		break;
1409 
1410 	case SMB_IDMAP_GROUP:
1411 		idm_stat = kidmap_batch_getsidbygid(idmaph, id,
1412 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1413 		    &sim->sim_stat);
1414 		break;
1415 
1416 	case SMB_IDMAP_OWNERAT:
1417 		/* Current Owner S-1-5-32-766 */
1418 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1419 		sim->sim_rid = SECURITY_CURRENT_OWNER_RID;
1420 		sim->sim_stat = IDMAP_SUCCESS;
1421 		idm_stat = IDMAP_SUCCESS;
1422 		break;
1423 
1424 	case SMB_IDMAP_GROUPAT:
1425 		/* Current Group S-1-5-32-767 */
1426 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1427 		sim->sim_rid = SECURITY_CURRENT_GROUP_RID;
1428 		sim->sim_stat = IDMAP_SUCCESS;
1429 		idm_stat = IDMAP_SUCCESS;
1430 		break;
1431 
1432 	case SMB_IDMAP_EVERYONE:
1433 		/* Everyone S-1-1-0 */
1434 		sim->sim_domsid = NT_WORLD_AUTH_SIDSTR;
1435 		sim->sim_rid = 0;
1436 		sim->sim_stat = IDMAP_SUCCESS;
1437 		idm_stat = IDMAP_SUCCESS;
1438 		break;
1439 
1440 	default:
1441 		ASSERT(0);
1442 		return (IDMAP_ERR_ARG);
1443 	}
1444 
1445 	return (idm_stat);
1446 }
1447 
1448 /*
1449  * smb_idmap_batch_binsid
1450  *
1451  * Convert sidrids to binary sids
1452  *
1453  * Returns 0 if successful and non-zero upon failure.
1454  */
1455 static int
1456 smb_idmap_batch_binsid(smb_idmap_batch_t *sib)
1457 {
1458 	smb_sid_t *sid;
1459 	smb_idmap_t *sim;
1460 	int i;
1461 
1462 	if (sib->sib_flags & SMB_IDMAP_SID2ID)
1463 		/* This operation is not required */
1464 		return (0);
1465 
1466 	sim = sib->sib_maps;
1467 	for (i = 0; i < sib->sib_nmap; sim++, i++) {
1468 		ASSERT(sim->sim_domsid);
1469 		if (sim->sim_domsid == NULL)
1470 			return (1);
1471 
1472 		if ((sid = smb_sid_fromstr(sim->sim_domsid)) == NULL)
1473 			return (1);
1474 
1475 		sim->sim_sid = smb_sid_splice(sid, sim->sim_rid);
1476 		smb_sid_free(sid);
1477 	}
1478 
1479 	return (0);
1480 }
1481 
1482 /*
1483  * smb_idmap_batch_getmappings
1484  *
1485  * trigger ID mapping service to get the mappings for queued
1486  * requests.
1487  *
1488  * Checks the result of all the queued requests.
1489  * If this is a Solaris -> Windows mapping it generates
1490  * binary SIDs from returned (domsid, rid) pairs.
1491  */
1492 idmap_stat
1493 smb_idmap_batch_getmappings(smb_idmap_batch_t *sib)
1494 {
1495 	idmap_stat idm_stat = IDMAP_SUCCESS;
1496 	int i;
1497 
1498 	idm_stat = kidmap_get_mappings(sib->sib_idmaph);
1499 	if (idm_stat != IDMAP_SUCCESS)
1500 		return (idm_stat);
1501 
1502 	/*
1503 	 * Check the status for all the queued requests
1504 	 */
1505 	for (i = 0; i < sib->sib_nmap; i++) {
1506 		if (sib->sib_maps[i].sim_stat != IDMAP_SUCCESS)
1507 			return (sib->sib_maps[i].sim_stat);
1508 	}
1509 
1510 	if (smb_idmap_batch_binsid(sib) != 0)
1511 		idm_stat = IDMAP_ERR_OTHER;
1512 
1513 	return (idm_stat);
1514 }
1515 
1516 uint64_t
1517 smb_time_unix_to_nt(timestruc_t *unix_time)
1518 {
1519 	uint64_t nt_time;
1520 
1521 	if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
1522 		return (0);
1523 
1524 	nt_time = unix_time->tv_sec;
1525 	nt_time *= 10000000;  /* seconds to 100ns */
1526 	nt_time += unix_time->tv_nsec / 100;
1527 	return (nt_time + NT_TIME_BIAS);
1528 }
1529 
1530 void
1531 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
1532 {
1533 	uint32_t seconds;
1534 
1535 	ASSERT(unix_time);
1536 
1537 	if ((nt_time == 0) || (nt_time == -1)) {
1538 		unix_time->tv_sec = 0;
1539 		unix_time->tv_nsec = 0;
1540 		return;
1541 	}
1542 
1543 	nt_time -= NT_TIME_BIAS;
1544 	seconds = nt_time / 10000000;
1545 	unix_time->tv_sec = seconds;
1546 	unix_time->tv_nsec = (nt_time  % 10000000) * 100;
1547 }
1548 
1549 /*
1550  * smb_time_gmt_to_local, smb_time_local_to_gmt
1551  *
1552  * Apply the gmt offset to convert between local time and gmt
1553  */
1554 int32_t
1555 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
1556 {
1557 	if ((gmt == 0) || (gmt == -1))
1558 		return (0);
1559 
1560 	return (gmt - sr->sr_gmtoff);
1561 }
1562 
1563 int32_t
1564 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
1565 {
1566 	if ((local == 0) || (local == -1))
1567 		return (0);
1568 
1569 	return (local + sr->sr_gmtoff);
1570 }
1571 
1572 
1573 /*
1574  * smb_time_dos_to_unix
1575  *
1576  * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
1577  *
1578  * A date/time field of 0 means that that server file system
1579  * assigned value need not be changed. The behaviour when the
1580  * date/time field is set to -1 is not documented but is
1581  * generally treated like 0.
1582  * If date or time is 0 or -1 the unix time is returned as 0
1583  * so that the caller can identify and handle this special case.
1584  */
1585 int32_t
1586 smb_time_dos_to_unix(int16_t date, int16_t time)
1587 {
1588 	struct tm	atm;
1589 
1590 	if (((date == 0) || (time == 0)) ||
1591 	    ((date == -1) || (time == -1))) {
1592 		return (0);
1593 	}
1594 
1595 	atm.tm_year = ((date >>  9) & 0x3F) + 80;
1596 	atm.tm_mon  = ((date >>  5) & 0x0F) - 1;
1597 	atm.tm_mday = ((date >>  0) & 0x1F);
1598 	atm.tm_hour = ((time >> 11) & 0x1F);
1599 	atm.tm_min  = ((time >>  5) & 0x3F);
1600 	atm.tm_sec  = ((time >>  0) & 0x1F) << 1;
1601 
1602 	return (smb_timegm(&atm));
1603 }
1604 
1605 void
1606 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1607 {
1608 	struct tm	atm;
1609 	int		i;
1610 	time_t		tmp_time;
1611 
1612 	if (ux_time == 0) {
1613 		*date_p = 0;
1614 		*time_p = 0;
1615 		return;
1616 	}
1617 
1618 	tmp_time = (time_t)ux_time;
1619 	(void) smb_gmtime_r(&tmp_time, &atm);
1620 
1621 	if (date_p) {
1622 		i = 0;
1623 		i += atm.tm_year - 80;
1624 		i <<= 4;
1625 		i += atm.tm_mon + 1;
1626 		i <<= 5;
1627 		i += atm.tm_mday;
1628 
1629 		*date_p = (short)i;
1630 	}
1631 	if (time_p) {
1632 		i = 0;
1633 		i += atm.tm_hour;
1634 		i <<= 6;
1635 		i += atm.tm_min;
1636 		i <<= 5;
1637 		i += atm.tm_sec >> 1;
1638 
1639 		*time_p = (short)i;
1640 	}
1641 }
1642 
1643 
1644 /*
1645  * smb_gmtime_r
1646  *
1647  * Thread-safe version of smb_gmtime. Returns a null pointer if either
1648  * input parameter is a null pointer. Otherwise returns a pointer
1649  * to result.
1650  *
1651  * Day of the week calculation: the Epoch was a thursday.
1652  *
1653  * There are no timezone corrections so tm_isdst and tm_gmtoff are
1654  * always zero, and the zone is always WET.
1655  */
1656 struct tm *
1657 smb_gmtime_r(time_t *clock, struct tm *result)
1658 {
1659 	time_t tsec;
1660 	int year;
1661 	int month;
1662 	int sec_per_month;
1663 
1664 	if (clock == 0 || result == 0)
1665 		return (0);
1666 
1667 	bzero(result, sizeof (struct tm));
1668 	tsec = *clock;
1669 	tsec -= tzh_leapcnt;
1670 
1671 	result->tm_wday = tsec / SECSPERDAY;
1672 	result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1673 
1674 	year = EPOCH_YEAR;
1675 	while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1676 	    (SECSPERDAY * DAYSPERNYEAR))) {
1677 		if (isleap(year))
1678 			tsec -= SECSPERDAY * DAYSPERLYEAR;
1679 		else
1680 			tsec -= SECSPERDAY * DAYSPERNYEAR;
1681 
1682 		++year;
1683 	}
1684 
1685 	result->tm_year = year - TM_YEAR_BASE;
1686 	result->tm_yday = tsec / SECSPERDAY;
1687 
1688 	for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1689 		sec_per_month = days_in_month[month] * SECSPERDAY;
1690 
1691 		if (month == TM_FEBRUARY && isleap(year))
1692 			sec_per_month += SECSPERDAY;
1693 
1694 		if (tsec < sec_per_month)
1695 			break;
1696 
1697 		tsec -= sec_per_month;
1698 	}
1699 
1700 	result->tm_mon = month;
1701 	result->tm_mday = (tsec / SECSPERDAY) + 1;
1702 	tsec %= SECSPERDAY;
1703 	result->tm_sec = tsec % 60;
1704 	tsec /= 60;
1705 	result->tm_min = tsec % 60;
1706 	tsec /= 60;
1707 	result->tm_hour = (int)tsec;
1708 
1709 	return (result);
1710 }
1711 
1712 
1713 /*
1714  * smb_timegm
1715  *
1716  * Converts the broken-down time in tm to a time value, i.e. the number
1717  * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1718  * not a POSIX or ANSI function. Per the man page, the input values of
1719  * tm_wday and tm_yday are ignored and, as the input data is assumed to
1720  * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1721  *
1722  * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1723  * and tm_yday, and bring the other fields within normal range. I don't
1724  * think this is really how it should be done but it's convenient for
1725  * now.
1726  */
1727 time_t
1728 smb_timegm(struct tm *tm)
1729 {
1730 	time_t tsec;
1731 	int dd;
1732 	int mm;
1733 	int yy;
1734 	int year;
1735 
1736 	if (tm == 0)
1737 		return (-1);
1738 
1739 	year = tm->tm_year + TM_YEAR_BASE;
1740 	tsec = tzh_leapcnt;
1741 
1742 	for (yy = EPOCH_YEAR; yy < year; ++yy) {
1743 		if (isleap(yy))
1744 			tsec += SECSPERDAY * DAYSPERLYEAR;
1745 		else
1746 			tsec += SECSPERDAY * DAYSPERNYEAR;
1747 	}
1748 
1749 	for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1750 		dd = days_in_month[mm] * SECSPERDAY;
1751 
1752 		if (mm == TM_FEBRUARY && isleap(year))
1753 			dd += SECSPERDAY;
1754 
1755 		tsec += dd;
1756 	}
1757 
1758 	tsec += (tm->tm_mday - 1) * SECSPERDAY;
1759 	tsec += tm->tm_sec;
1760 	tsec += tm->tm_min * SECSPERMIN;
1761 	tsec += tm->tm_hour * SECSPERHOUR;
1762 
1763 	tm->tm_isdst = 0;
1764 	(void) smb_gmtime_r(&tsec, tm);
1765 	return (tsec);
1766 }
1767 
1768 /*
1769  * smb_pad_align
1770  *
1771  * Returns the number of bytes required to pad an offset to the
1772  * specified alignment.
1773  */
1774 uint32_t
1775 smb_pad_align(uint32_t offset, uint32_t align)
1776 {
1777 	uint32_t pad = offset % align;
1778 
1779 	if (pad != 0)
1780 		pad = align - pad;
1781 
1782 	return (pad);
1783 }
1784 
1785 /*
1786  * smb_panic
1787  *
1788  * Logs the file name, function name and line number passed in and panics the
1789  * system.
1790  */
1791 void
1792 smb_panic(char *file, const char *func, int line)
1793 {
1794 	cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
1795 }
1796 
1797 /*
1798  * Creates an AVL tree and initializes the given smb_avl_t
1799  * structure using the passed args
1800  */
1801 void
1802 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset, smb_avl_nops_t *ops)
1803 {
1804 	ASSERT(avl);
1805 	ASSERT(ops);
1806 
1807 	rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL);
1808 	mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL);
1809 
1810 	avl->avl_nops = ops;
1811 	avl->avl_state = SMB_AVL_STATE_READY;
1812 	avl->avl_refcnt = 0;
1813 	(void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence,
1814 	    sizeof (uint32_t));
1815 
1816 	avl_create(&avl->avl_tree, ops->avln_cmp, size, offset);
1817 }
1818 
1819 /*
1820  * Destroys the specified AVL tree.
1821  * It waits for all the in-flight operations to finish
1822  * before destroying the AVL.
1823  */
1824 void
1825 smb_avl_destroy(smb_avl_t *avl)
1826 {
1827 	void *cookie = NULL;
1828 	void *node;
1829 
1830 	ASSERT(avl);
1831 
1832 	mutex_enter(&avl->avl_mutex);
1833 	if (avl->avl_state != SMB_AVL_STATE_READY) {
1834 		mutex_exit(&avl->avl_mutex);
1835 		return;
1836 	}
1837 
1838 	avl->avl_state = SMB_AVL_STATE_DESTROYING;
1839 
1840 	while (avl->avl_refcnt > 0)
1841 		(void) cv_wait(&avl->avl_cv, &avl->avl_mutex);
1842 	mutex_exit(&avl->avl_mutex);
1843 
1844 	rw_enter(&avl->avl_lock, RW_WRITER);
1845 	while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL)
1846 		avl->avl_nops->avln_destroy(node);
1847 
1848 	avl_destroy(&avl->avl_tree);
1849 	rw_exit(&avl->avl_lock);
1850 
1851 	rw_destroy(&avl->avl_lock);
1852 
1853 	mutex_destroy(&avl->avl_mutex);
1854 	bzero(avl, sizeof (smb_avl_t));
1855 }
1856 
1857 /*
1858  * Adds the given item to the AVL if it's
1859  * not already there.
1860  *
1861  * Returns:
1862  *
1863  * 	ENOTACTIVE	AVL is not in READY state
1864  * 	EEXIST		The item is already in AVL
1865  */
1866 int
1867 smb_avl_add(smb_avl_t *avl, void *item)
1868 {
1869 	avl_index_t where;
1870 
1871 	ASSERT(avl);
1872 	ASSERT(item);
1873 
1874 	if (!smb_avl_hold(avl))
1875 		return (ENOTACTIVE);
1876 
1877 	rw_enter(&avl->avl_lock, RW_WRITER);
1878 	if (avl_find(&avl->avl_tree, item, &where) != NULL) {
1879 		rw_exit(&avl->avl_lock);
1880 		smb_avl_rele(avl);
1881 		return (EEXIST);
1882 	}
1883 
1884 	avl_insert(&avl->avl_tree, item, where);
1885 	avl->avl_sequence++;
1886 	rw_exit(&avl->avl_lock);
1887 
1888 	smb_avl_rele(avl);
1889 	return (0);
1890 }
1891 
1892 /*
1893  * Removes the given item from the AVL.
1894  * If no reference is left on the item
1895  * it will also be destroyed by calling the
1896  * registered destroy operation.
1897  */
1898 void
1899 smb_avl_remove(smb_avl_t *avl, void *item)
1900 {
1901 	avl_index_t where;
1902 	void *rm_item;
1903 
1904 	ASSERT(avl);
1905 	ASSERT(item);
1906 
1907 	if (!smb_avl_hold(avl))
1908 		return;
1909 
1910 	rw_enter(&avl->avl_lock, RW_WRITER);
1911 	if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) {
1912 		rw_exit(&avl->avl_lock);
1913 		smb_avl_rele(avl);
1914 		return;
1915 	}
1916 
1917 	avl_remove(&avl->avl_tree, rm_item);
1918 	if (avl->avl_nops->avln_rele(rm_item))
1919 		avl->avl_nops->avln_destroy(rm_item);
1920 	avl->avl_sequence++;
1921 	rw_exit(&avl->avl_lock);
1922 
1923 	smb_avl_rele(avl);
1924 }
1925 
1926 /*
1927  * Looks up the AVL for the given item.
1928  * If the item is found a hold on the object
1929  * is taken before the pointer to it is
1930  * returned to the caller. The caller MUST
1931  * always call smb_avl_release() after it's done
1932  * using the returned object to release the hold
1933  * taken on the object.
1934  */
1935 void *
1936 smb_avl_lookup(smb_avl_t *avl, void *item)
1937 {
1938 	void *node = NULL;
1939 
1940 	ASSERT(avl);
1941 	ASSERT(item);
1942 
1943 	if (!smb_avl_hold(avl))
1944 		return (NULL);
1945 
1946 	rw_enter(&avl->avl_lock, RW_READER);
1947 	node = avl_find(&avl->avl_tree, item, NULL);
1948 	if (node != NULL)
1949 		avl->avl_nops->avln_hold(node);
1950 	rw_exit(&avl->avl_lock);
1951 
1952 	if (node == NULL)
1953 		smb_avl_rele(avl);
1954 
1955 	return (node);
1956 }
1957 
1958 /*
1959  * The hold on the given object is released.
1960  * This function MUST always be called after
1961  * smb_avl_lookup() and smb_avl_iterate() for
1962  * the returned object.
1963  *
1964  * If AVL is in DESTROYING state, the destroying
1965  * thread will be notified.
1966  */
1967 void
1968 smb_avl_release(smb_avl_t *avl, void *item)
1969 {
1970 	ASSERT(avl);
1971 	ASSERT(item);
1972 
1973 	if (avl->avl_nops->avln_rele(item))
1974 		avl->avl_nops->avln_destroy(item);
1975 
1976 	smb_avl_rele(avl);
1977 }
1978 
1979 /*
1980  * Initializes the given cursor for the AVL.
1981  * The cursor will be used to iterate through the AVL
1982  */
1983 void
1984 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1985 {
1986 	ASSERT(avl);
1987 	ASSERT(cursor);
1988 
1989 	cursor->avlc_next = NULL;
1990 	cursor->avlc_sequence = avl->avl_sequence;
1991 }
1992 
1993 /*
1994  * Iterates through the AVL using the given cursor.
1995  * It always starts at the beginning and then returns
1996  * a pointer to the next object on each subsequent call.
1997  *
1998  * If a new object is added to or removed from the AVL
1999  * between two calls to this function, the iteration
2000  * will terminate prematurely.
2001  *
2002  * The caller MUST always call smb_avl_release() after it's
2003  * done using the returned object to release the hold taken
2004  * on the object.
2005  */
2006 void *
2007 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor)
2008 {
2009 	void *node;
2010 
2011 	ASSERT(avl);
2012 	ASSERT(cursor);
2013 
2014 	if (!smb_avl_hold(avl))
2015 		return (NULL);
2016 
2017 	rw_enter(&avl->avl_lock, RW_READER);
2018 	if (cursor->avlc_sequence != avl->avl_sequence) {
2019 		rw_exit(&avl->avl_lock);
2020 		smb_avl_rele(avl);
2021 		return (NULL);
2022 	}
2023 
2024 	if (cursor->avlc_next == NULL)
2025 		node = avl_first(&avl->avl_tree);
2026 	else
2027 		node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next);
2028 
2029 	if (node != NULL)
2030 		avl->avl_nops->avln_hold(node);
2031 
2032 	cursor->avlc_next = node;
2033 	rw_exit(&avl->avl_lock);
2034 
2035 	if (node == NULL)
2036 		smb_avl_rele(avl);
2037 
2038 	return (node);
2039 }
2040 
2041 /*
2042  * Increments the AVL reference count in order to
2043  * prevent the avl from being destroyed while it's
2044  * being accessed.
2045  */
2046 static boolean_t
2047 smb_avl_hold(smb_avl_t *avl)
2048 {
2049 	mutex_enter(&avl->avl_mutex);
2050 	if (avl->avl_state != SMB_AVL_STATE_READY) {
2051 		mutex_exit(&avl->avl_mutex);
2052 		return (B_FALSE);
2053 	}
2054 	avl->avl_refcnt++;
2055 	mutex_exit(&avl->avl_mutex);
2056 
2057 	return (B_TRUE);
2058 }
2059 
2060 /*
2061  * Decrements the AVL reference count to release the
2062  * hold. If another thread is trying to destroy the
2063  * AVL and is waiting for the reference count to become
2064  * 0, it is signaled to wake up.
2065  */
2066 static void
2067 smb_avl_rele(smb_avl_t *avl)
2068 {
2069 	mutex_enter(&avl->avl_mutex);
2070 	ASSERT(avl->avl_refcnt > 0);
2071 	avl->avl_refcnt--;
2072 	if (avl->avl_state == SMB_AVL_STATE_DESTROYING)
2073 		cv_broadcast(&avl->avl_cv);
2074 	mutex_exit(&avl->avl_mutex);
2075 }
2076 
2077 /*
2078  * smb_latency_init
2079  */
2080 void
2081 smb_latency_init(smb_latency_t *lat)
2082 {
2083 	bzero(lat, sizeof (*lat));
2084 	mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
2085 }
2086 
2087 /*
2088  * smb_latency_destroy
2089  */
2090 void
2091 smb_latency_destroy(smb_latency_t *lat)
2092 {
2093 	mutex_destroy(&lat->ly_mutex);
2094 }
2095 
2096 /*
2097  * smb_latency_add_sample
2098  *
2099  * Uses the new sample to calculate the new mean and standard deviation. The
2100  * sample must be a scaled value.
2101  */
2102 void
2103 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample)
2104 {
2105 	hrtime_t	a_mean;
2106 	hrtime_t	d_mean;
2107 
2108 	mutex_enter(&lat->ly_mutex);
2109 	lat->ly_a_nreq++;
2110 	lat->ly_a_sum += sample;
2111 	if (lat->ly_a_nreq != 0) {
2112 		a_mean = lat->ly_a_sum / lat->ly_a_nreq;
2113 		lat->ly_a_stddev =
2114 		    (sample - a_mean) * (sample - lat->ly_a_mean);
2115 		lat->ly_a_mean = a_mean;
2116 	}
2117 	lat->ly_d_nreq++;
2118 	lat->ly_d_sum += sample;
2119 	if (lat->ly_d_nreq != 0) {
2120 		d_mean = lat->ly_d_sum / lat->ly_d_nreq;
2121 		lat->ly_d_stddev =
2122 		    (sample - d_mean) * (sample - lat->ly_d_mean);
2123 		lat->ly_d_mean = d_mean;
2124 	}
2125 	mutex_exit(&lat->ly_mutex);
2126 }
2127 
2128 /*
2129  * smb_srqueue_init
2130  */
2131 void
2132 smb_srqueue_init(smb_srqueue_t *srq)
2133 {
2134 	bzero(srq, sizeof (*srq));
2135 	mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
2136 	srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled();
2137 }
2138 
2139 /*
2140  * smb_srqueue_destroy
2141  */
2142 void
2143 smb_srqueue_destroy(smb_srqueue_t *srq)
2144 {
2145 	mutex_destroy(&srq->srq_mutex);
2146 }
2147 
2148 /*
2149  * smb_srqueue_waitq_enter
2150  */
2151 void
2152 smb_srqueue_waitq_enter(smb_srqueue_t *srq)
2153 {
2154 	hrtime_t	new;
2155 	hrtime_t	delta;
2156 	uint32_t	wcnt;
2157 
2158 	mutex_enter(&srq->srq_mutex);
2159 	new = gethrtime_unscaled();
2160 	delta = new - srq->srq_wlastupdate;
2161 	srq->srq_wlastupdate = new;
2162 	wcnt = srq->srq_wcnt++;
2163 	if (wcnt != 0) {
2164 		srq->srq_wlentime += delta * wcnt;
2165 		srq->srq_wtime += delta;
2166 	}
2167 	mutex_exit(&srq->srq_mutex);
2168 }
2169 
2170 /*
2171  * smb_srqueue_runq_exit
2172  */
2173 void
2174 smb_srqueue_runq_exit(smb_srqueue_t *srq)
2175 {
2176 	hrtime_t	new;
2177 	hrtime_t	delta;
2178 	uint32_t	rcnt;
2179 
2180 	mutex_enter(&srq->srq_mutex);
2181 	new = gethrtime_unscaled();
2182 	delta = new - srq->srq_rlastupdate;
2183 	srq->srq_rlastupdate = new;
2184 	rcnt = srq->srq_rcnt--;
2185 	ASSERT(rcnt > 0);
2186 	srq->srq_rlentime += delta * rcnt;
2187 	srq->srq_rtime += delta;
2188 	mutex_exit(&srq->srq_mutex);
2189 }
2190 
2191 /*
2192  * smb_srqueue_waitq_to_runq
2193  */
2194 void
2195 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq)
2196 {
2197 	hrtime_t	new;
2198 	hrtime_t	delta;
2199 	uint32_t	wcnt;
2200 	uint32_t	rcnt;
2201 
2202 	mutex_enter(&srq->srq_mutex);
2203 	new = gethrtime_unscaled();
2204 	delta = new - srq->srq_wlastupdate;
2205 	srq->srq_wlastupdate = new;
2206 	wcnt = srq->srq_wcnt--;
2207 	ASSERT(wcnt > 0);
2208 	srq->srq_wlentime += delta * wcnt;
2209 	srq->srq_wtime += delta;
2210 	delta = new - srq->srq_rlastupdate;
2211 	srq->srq_rlastupdate = new;
2212 	rcnt = srq->srq_rcnt++;
2213 	if (rcnt != 0) {
2214 		srq->srq_rlentime += delta * rcnt;
2215 		srq->srq_rtime += delta;
2216 	}
2217 	mutex_exit(&srq->srq_mutex);
2218 }
2219 
2220 /*
2221  * smb_srqueue_update
2222  *
2223  * Takes a snapshot of the smb_sr_stat_t structure passed in.
2224  */
2225 void
2226 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd)
2227 {
2228 	hrtime_t	delta;
2229 	hrtime_t	snaptime;
2230 
2231 	mutex_enter(&srq->srq_mutex);
2232 	snaptime = gethrtime_unscaled();
2233 	delta = snaptime - srq->srq_wlastupdate;
2234 	srq->srq_wlastupdate = snaptime;
2235 	if (srq->srq_wcnt != 0) {
2236 		srq->srq_wlentime += delta * srq->srq_wcnt;
2237 		srq->srq_wtime += delta;
2238 	}
2239 	delta = snaptime - srq->srq_rlastupdate;
2240 	srq->srq_rlastupdate = snaptime;
2241 	if (srq->srq_rcnt != 0) {
2242 		srq->srq_rlentime += delta * srq->srq_rcnt;
2243 		srq->srq_rtime += delta;
2244 	}
2245 	kd->ku_rlentime = srq->srq_rlentime;
2246 	kd->ku_rtime = srq->srq_rtime;
2247 	kd->ku_wlentime = srq->srq_wlentime;
2248 	kd->ku_wtime = srq->srq_wtime;
2249 	mutex_exit(&srq->srq_mutex);
2250 	scalehrtime(&kd->ku_rlentime);
2251 	scalehrtime(&kd->ku_rtime);
2252 	scalehrtime(&kd->ku_wlentime);
2253 	scalehrtime(&kd->ku_wtime);
2254 }
2255 
2256 void
2257 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd, int threshold,
2258     int timeout)
2259 {
2260 	bzero(ct, sizeof (smb_cmd_threshold_t));
2261 	mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL);
2262 	ct->ct_cmd = cmd;
2263 	ct->ct_threshold = threshold;
2264 	ct->ct_event = smb_event_create(timeout);
2265 	ct->ct_event_id = smb_event_txid(ct->ct_event);
2266 
2267 	if (smb_threshold_debug) {
2268 		cmn_err(CE_NOTE, "smb_threshold_init[%s]: threshold (%d), "
2269 		    "timeout (%d)", cmd, threshold, timeout);
2270 	}
2271 }
2272 
2273 /*
2274  * This function must be called prior to SMB_SERVER_STATE_STOPPING state
2275  * so that ct_event can be successfully removed from the event list.
2276  * It should not be called when the server mutex is held or when the
2277  * server is removed from the server list.
2278  */
2279 void
2280 smb_threshold_fini(smb_cmd_threshold_t *ct)
2281 {
2282 	smb_event_destroy(ct->ct_event);
2283 	mutex_destroy(&ct->ct_mutex);
2284 	bzero(ct, sizeof (smb_cmd_threshold_t));
2285 }
2286 
2287 /*
2288  * This threshold mechanism can be used to limit the number of simultaneous
2289  * requests, which serves to limit the stress that can be applied to the
2290  * service and also allows the service to respond to requests before the
2291  * client times out and reports that the server is not responding,
2292  *
2293  * If the number of requests exceeds the threshold, new requests will be
2294  * stalled until the number drops back to the threshold.  Stalled requests
2295  * will be notified as appropriate, in which case 0 will be returned.
2296  * If the timeout expires before the request is notified, a non-zero errno
2297  * value will be returned.
2298  *
2299  * To avoid a flood of messages, the message rate is throttled as well.
2300  */
2301 int
2302 smb_threshold_enter(smb_cmd_threshold_t *ct)
2303 {
2304 	int	rc;
2305 
2306 	mutex_enter(&ct->ct_mutex);
2307 	if (ct->ct_active_cnt >= ct->ct_threshold && ct->ct_event != NULL) {
2308 		atomic_inc_32(&ct->ct_blocked_cnt);
2309 
2310 		if (smb_threshold_debug) {
2311 			cmn_err(CE_NOTE, "smb_threshold_enter[%s]: blocked "
2312 			    "(blocked ops: %u, inflight ops: %u)",
2313 			    ct->ct_cmd, ct->ct_blocked_cnt, ct->ct_active_cnt);
2314 		}
2315 
2316 		mutex_exit(&ct->ct_mutex);
2317 
2318 		if ((rc = smb_event_wait(ct->ct_event)) != 0) {
2319 			if (rc == ECANCELED)
2320 				return (rc);
2321 
2322 			mutex_enter(&ct->ct_mutex);
2323 			if (ct->ct_active_cnt >= ct->ct_threshold) {
2324 
2325 				if ((ct->ct_error_cnt %
2326 				    SMB_THRESHOLD_REPORT_THROTTLE) == 0) {
2327 					cmn_err(CE_NOTE, "%s: server busy: "
2328 					    "threshold %d exceeded)",
2329 					    ct->ct_cmd, ct->ct_threshold);
2330 				}
2331 
2332 				atomic_inc_32(&ct->ct_error_cnt);
2333 				mutex_exit(&ct->ct_mutex);
2334 				return (rc);
2335 			}
2336 
2337 			mutex_exit(&ct->ct_mutex);
2338 
2339 		}
2340 
2341 		mutex_enter(&ct->ct_mutex);
2342 		atomic_dec_32(&ct->ct_blocked_cnt);
2343 		if (smb_threshold_debug) {
2344 			cmn_err(CE_NOTE, "smb_threshold_enter[%s]: resumed "
2345 			    "(blocked ops: %u, inflight ops: %u)", ct->ct_cmd,
2346 			    ct->ct_blocked_cnt, ct->ct_active_cnt);
2347 		}
2348 	}
2349 
2350 	atomic_inc_32(&ct->ct_active_cnt);
2351 	mutex_exit(&ct->ct_mutex);
2352 	return (0);
2353 }
2354 
2355 void
2356 smb_threshold_exit(smb_cmd_threshold_t *ct, smb_server_t *sv)
2357 {
2358 	mutex_enter(&ct->ct_mutex);
2359 	atomic_dec_32(&ct->ct_active_cnt);
2360 	mutex_exit(&ct->ct_mutex);
2361 	smb_event_notify(sv, ct->ct_event_id);
2362 }
2363