xref: /titanic_41/usr/src/uts/common/fs/smbsrv/smb_kutil.c (revision 35dae2328064ca9e149cf5d3a7ba1688ed4629b6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/param.h>
26 #include <sys/types.h>
27 #include <sys/tzfile.h>
28 #include <sys/atomic.h>
29 #include <sys/kidmap.h>
30 #include <sys/time.h>
31 #include <sys/cpuvar.h>
32 #include <smbsrv/smb_kproto.h>
33 #include <smbsrv/smb_fsops.h>
34 #include <smbsrv/smbinfo.h>
35 #include <smbsrv/smb_xdr.h>
36 #include <smbsrv/smb_vops.h>
37 #include <smbsrv/smb_idmap.h>
38 
39 #include <sys/sid.h>
40 #include <sys/priv_names.h>
41 
42 static kmem_cache_t	*smb_dtor_cache;
43 static boolean_t	smb_llist_initialized = B_FALSE;
44 
45 static boolean_t smb_thread_continue_timedwait_locked(smb_thread_t *, int);
46 
47 time_t tzh_leapcnt = 0;
48 
49 struct tm
50 *smb_gmtime_r(time_t *clock, struct tm *result);
51 
52 time_t
53 smb_timegm(struct tm *tm);
54 
55 struct	tm {
56 	int	tm_sec;
57 	int	tm_min;
58 	int	tm_hour;
59 	int	tm_mday;
60 	int	tm_mon;
61 	int	tm_year;
62 	int	tm_wday;
63 	int	tm_yday;
64 	int	tm_isdst;
65 };
66 
67 static int days_in_month[] = {
68 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
69 };
70 
71 int
72 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
73 {
74 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
75 		return (smb_wcequiv_strlen(str));
76 	return (strlen(str));
77 }
78 
79 int
80 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
81 {
82 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
83 		return (smb_wcequiv_strlen(str) + 2);
84 	return (strlen(str) + 1);
85 }
86 
87 int
88 smb_ascii_or_unicode_null_len(struct smb_request *sr)
89 {
90 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
91 		return (2);
92 	return (1);
93 }
94 
95 /*
96  * Return B_TRUE if pattern contains wildcards
97  */
98 boolean_t
99 smb_contains_wildcards(const char *pattern)
100 {
101 	static const char *wildcards = "*?";
102 
103 	return (strpbrk(pattern, wildcards) != NULL);
104 }
105 
106 /*
107  * When converting wildcards a '.' in a name is treated as a base and
108  * extension separator even if the name is longer than 8.3.
109  *
110  * The '*' character matches an entire part of the name.  For example,
111  * "*.abc" matches any name with an extension of "abc".
112  *
113  * The '?' character matches a single character.
114  * If the base contains all ? (8 or more) then it is treated as *.
115  * If the extension contains all ? (3 or more) then it is treated as *.
116  *
117  * Clients convert ASCII wildcards to Unicode wildcards as follows:
118  *
119  *	? is converted to >
120  *	. is converted to " if it is followed by ? or *
121  *	* is converted to < if it is followed by .
122  *
123  * Note that clients convert "*." to '< and drop the '.' but "*.txt"
124  * is sent as "<.TXT", i.e.
125  *
126  * 	dir *.		->	dir <
127  * 	dir *.txt	->	dir <.TXT
128  *
129  * Since " and < are illegal in Windows file names, we always convert
130  * these Unicode wildcards without checking the following character.
131  */
132 void
133 smb_convert_wildcards(char *pattern)
134 {
135 	static char *match_all[] = {
136 		"*.",
137 		"*.*"
138 	};
139 	char	*extension;
140 	char	*p;
141 	int	len;
142 	int	i;
143 
144 	/*
145 	 * Special case "<" for "dir *.", and fast-track for "*".
146 	 */
147 	if ((*pattern == '<') || (*pattern == '*')) {
148 		if (*(pattern + 1) == '\0') {
149 			*pattern = '*';
150 			return;
151 		}
152 	}
153 
154 	for (p = pattern; *p != '\0'; ++p) {
155 		switch (*p) {
156 		case '<':
157 			*p = '*';
158 			break;
159 		case '>':
160 			*p = '?';
161 			break;
162 		case '\"':
163 			*p = '.';
164 			break;
165 		default:
166 			break;
167 		}
168 	}
169 
170 	/*
171 	 * Replace "????????.ext" with "*.ext".
172 	 */
173 	p = pattern;
174 	p += strspn(p, "?");
175 	if (*p == '.') {
176 		*p = '\0';
177 		len = strlen(pattern);
178 		*p = '.';
179 		if (len >= SMB_NAME83_BASELEN) {
180 			*pattern = '*';
181 			(void) strlcpy(pattern + 1, p, MAXPATHLEN - 1);
182 		}
183 	}
184 
185 	/*
186 	 * Replace "base.???" with 'base.*'.
187 	 */
188 	if ((extension = strrchr(pattern, '.')) != NULL) {
189 		p = ++extension;
190 		p += strspn(p, "?");
191 		if (*p == '\0') {
192 			len = strlen(extension);
193 			if (len >= SMB_NAME83_EXTLEN) {
194 				*extension = '\0';
195 				(void) strlcat(pattern, "*", MAXPATHLEN);
196 			}
197 		}
198 	}
199 
200 	/*
201 	 * Replace anything that matches an entry in match_all with "*".
202 	 */
203 	for (i = 0; i < sizeof (match_all) / sizeof (match_all[0]); ++i) {
204 		if (strcmp(pattern, match_all[i]) == 0) {
205 			(void) strlcpy(pattern, "*", MAXPATHLEN);
206 			break;
207 		}
208 	}
209 }
210 
211 /*
212  * smb_sattr_check
213  *
214  * Check file attributes against a search attribute (sattr) mask.
215  *
216  * Normal files, which includes READONLY and ARCHIVE, always pass
217  * this check.  If the DIRECTORY, HIDDEN or SYSTEM special attributes
218  * are set then they must appear in the search mask.  The special
219  * attributes are inclusive, i.e. all special attributes that appear
220  * in sattr must also appear in the file attributes for the check to
221  * pass.
222  *
223  * The following examples show how this works:
224  *
225  *		fileA:	READONLY
226  *		fileB:	0 (no attributes = normal file)
227  *		fileC:	READONLY, ARCHIVE
228  *		fileD:	HIDDEN
229  *		fileE:	READONLY, HIDDEN, SYSTEM
230  *		dirA:	DIRECTORY
231  *
232  * search attribute: 0
233  *		Returns: fileA, fileB and fileC.
234  * search attribute: HIDDEN
235  *		Returns: fileA, fileB, fileC and fileD.
236  * search attribute: SYSTEM
237  *		Returns: fileA, fileB and fileC.
238  * search attribute: DIRECTORY
239  *		Returns: fileA, fileB, fileC and dirA.
240  * search attribute: HIDDEN and SYSTEM
241  *		Returns: fileA, fileB, fileC, fileD and fileE.
242  *
243  * Returns true if the file and sattr match; otherwise, returns false.
244  */
245 boolean_t
246 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
247 {
248 	if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
249 	    !(sattr & FILE_ATTRIBUTE_DIRECTORY))
250 		return (B_FALSE);
251 
252 	if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
253 	    !(sattr & FILE_ATTRIBUTE_HIDDEN))
254 		return (B_FALSE);
255 
256 	if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
257 	    !(sattr & FILE_ATTRIBUTE_SYSTEM))
258 		return (B_FALSE);
259 
260 	return (B_TRUE);
261 }
262 
263 int
264 microtime(timestruc_t *tvp)
265 {
266 	tvp->tv_sec = gethrestime_sec();
267 	tvp->tv_nsec = 0;
268 	return (0);
269 }
270 
271 int32_t
272 clock_get_milli_uptime()
273 {
274 	return (TICK_TO_MSEC(ddi_get_lbolt()));
275 }
276 
277 int /*ARGSUSED*/
278 smb_noop(void *p, size_t size, int foo)
279 {
280 	return (0);
281 }
282 
283 /*
284  * smb_idpool_increment
285  *
286  * This function increments the ID pool by doubling the current size. This
287  * function assumes the caller entered the mutex of the pool.
288  */
289 static int
290 smb_idpool_increment(
291     smb_idpool_t	*pool)
292 {
293 	uint8_t		*new_pool;
294 	uint32_t	new_size;
295 
296 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
297 
298 	new_size = pool->id_size * 2;
299 	if (new_size <= SMB_IDPOOL_MAX_SIZE) {
300 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
301 		if (new_pool) {
302 			bzero(new_pool, new_size / 8);
303 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
304 			kmem_free(pool->id_pool, pool->id_size / 8);
305 			pool->id_pool = new_pool;
306 			pool->id_free_counter += new_size - pool->id_size;
307 			pool->id_max_free_counter += new_size - pool->id_size;
308 			pool->id_size = new_size;
309 			pool->id_idx_msk = (new_size / 8) - 1;
310 			if (new_size >= SMB_IDPOOL_MAX_SIZE) {
311 				/* id -1 made unavailable */
312 				pool->id_pool[pool->id_idx_msk] = 0x80;
313 				pool->id_free_counter--;
314 				pool->id_max_free_counter--;
315 			}
316 			return (0);
317 		}
318 	}
319 	return (-1);
320 }
321 
322 /*
323  * smb_idpool_constructor
324  *
325  * This function initializes the pool structure provided.
326  */
327 int
328 smb_idpool_constructor(
329     smb_idpool_t	*pool)
330 {
331 
332 	ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
333 
334 	pool->id_size = SMB_IDPOOL_MIN_SIZE;
335 	pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
336 	pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
337 	pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
338 	pool->id_bit = 0x02;
339 	pool->id_bit_idx = 1;
340 	pool->id_idx = 0;
341 	pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
342 	    KM_SLEEP);
343 	bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
344 	/* -1 id made unavailable */
345 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
346 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
347 	pool->id_magic = SMB_IDPOOL_MAGIC;
348 	return (0);
349 }
350 
351 /*
352  * smb_idpool_destructor
353  *
354  * This function tears down and frees the resources associated with the
355  * pool provided.
356  */
357 void
358 smb_idpool_destructor(
359     smb_idpool_t	*pool)
360 {
361 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
362 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
363 	pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
364 	mutex_destroy(&pool->id_mutex);
365 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
366 }
367 
368 /*
369  * smb_idpool_alloc
370  *
371  * This function allocates an ID from the pool provided.
372  */
373 int
374 smb_idpool_alloc(
375     smb_idpool_t	*pool,
376     uint16_t		*id)
377 {
378 	uint32_t	i;
379 	uint8_t		bit;
380 	uint8_t		bit_idx;
381 	uint8_t		byte;
382 
383 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
384 
385 	mutex_enter(&pool->id_mutex);
386 	if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
387 		mutex_exit(&pool->id_mutex);
388 		return (-1);
389 	}
390 
391 	i = pool->id_size;
392 	while (i) {
393 		bit = pool->id_bit;
394 		bit_idx = pool->id_bit_idx;
395 		byte = pool->id_pool[pool->id_idx];
396 		while (bit) {
397 			if (byte & bit) {
398 				bit = bit << 1;
399 				bit_idx++;
400 				continue;
401 			}
402 			pool->id_pool[pool->id_idx] |= bit;
403 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
404 			pool->id_free_counter--;
405 			pool->id_bit = bit;
406 			pool->id_bit_idx = bit_idx;
407 			mutex_exit(&pool->id_mutex);
408 			return (0);
409 		}
410 		pool->id_bit = 1;
411 		pool->id_bit_idx = 0;
412 		pool->id_idx++;
413 		pool->id_idx &= pool->id_idx_msk;
414 		--i;
415 	}
416 	/*
417 	 * This section of code shouldn't be reached. If there are IDs
418 	 * available and none could be found there's a problem.
419 	 */
420 	ASSERT(0);
421 	mutex_exit(&pool->id_mutex);
422 	return (-1);
423 }
424 
425 /*
426  * smb_idpool_free
427  *
428  * This function frees the ID provided.
429  */
430 void
431 smb_idpool_free(
432     smb_idpool_t	*pool,
433     uint16_t		id)
434 {
435 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
436 	ASSERT(id != 0);
437 	ASSERT(id != 0xFFFF);
438 
439 	mutex_enter(&pool->id_mutex);
440 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
441 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
442 		pool->id_free_counter++;
443 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
444 		mutex_exit(&pool->id_mutex);
445 		return;
446 	}
447 	/* Freeing a free ID. */
448 	ASSERT(0);
449 	mutex_exit(&pool->id_mutex);
450 }
451 
452 /*
453  * Initialize the llist delete queue object cache.
454  */
455 void
456 smb_llist_init(void)
457 {
458 	if (smb_llist_initialized)
459 		return;
460 
461 	smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
462 	    sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
463 
464 	smb_llist_initialized = B_TRUE;
465 }
466 
467 /*
468  * Destroy the llist delete queue object cache.
469  */
470 void
471 smb_llist_fini(void)
472 {
473 	if (!smb_llist_initialized)
474 		return;
475 
476 	kmem_cache_destroy(smb_dtor_cache);
477 	smb_llist_initialized = B_FALSE;
478 }
479 
480 /*
481  * smb_llist_constructor
482  *
483  * This function initializes a locked list.
484  */
485 void
486 smb_llist_constructor(
487     smb_llist_t	*ll,
488     size_t	size,
489     size_t	offset)
490 {
491 	rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
492 	mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
493 	list_create(&ll->ll_list, size, offset);
494 	list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
495 	    offsetof(smb_dtor_t, dt_lnd));
496 	ll->ll_count = 0;
497 	ll->ll_wrop = 0;
498 	ll->ll_deleteq_count = 0;
499 }
500 
501 /*
502  * Flush the delete queue and destroy a locked list.
503  */
504 void
505 smb_llist_destructor(
506     smb_llist_t	*ll)
507 {
508 	smb_llist_flush(ll);
509 
510 	ASSERT(ll->ll_count == 0);
511 	ASSERT(ll->ll_deleteq_count == 0);
512 
513 	rw_destroy(&ll->ll_lock);
514 	list_destroy(&ll->ll_list);
515 	list_destroy(&ll->ll_deleteq);
516 	mutex_destroy(&ll->ll_mutex);
517 }
518 
519 /*
520  * Post an object to the delete queue.  The delete queue will be processed
521  * during list exit or list destruction.  Objects are often posted for
522  * deletion during list iteration (while the list is locked) but that is
523  * not required, and an object can be posted at any time.
524  */
525 void
526 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
527 {
528 	smb_dtor_t	*dtor;
529 
530 	ASSERT((object != NULL) && (dtorproc != NULL));
531 
532 	dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
533 	bzero(dtor, sizeof (smb_dtor_t));
534 	dtor->dt_magic = SMB_DTOR_MAGIC;
535 	dtor->dt_object = object;
536 	dtor->dt_proc = dtorproc;
537 
538 	mutex_enter(&ll->ll_mutex);
539 	list_insert_tail(&ll->ll_deleteq, dtor);
540 	++ll->ll_deleteq_count;
541 	mutex_exit(&ll->ll_mutex);
542 }
543 
544 /*
545  * Exit the list lock and process the delete queue.
546  */
547 void
548 smb_llist_exit(smb_llist_t *ll)
549 {
550 	rw_exit(&ll->ll_lock);
551 	smb_llist_flush(ll);
552 }
553 
554 /*
555  * Flush the list delete queue.  The mutex is dropped across the destructor
556  * call in case this leads to additional objects being posted to the delete
557  * queue.
558  */
559 void
560 smb_llist_flush(smb_llist_t *ll)
561 {
562 	smb_dtor_t    *dtor;
563 
564 	mutex_enter(&ll->ll_mutex);
565 
566 	dtor = list_head(&ll->ll_deleteq);
567 	while (dtor != NULL) {
568 		SMB_DTOR_VALID(dtor);
569 		ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
570 		list_remove(&ll->ll_deleteq, dtor);
571 		--ll->ll_deleteq_count;
572 		mutex_exit(&ll->ll_mutex);
573 
574 		dtor->dt_proc(dtor->dt_object);
575 
576 		dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
577 		kmem_cache_free(smb_dtor_cache, dtor);
578 		mutex_enter(&ll->ll_mutex);
579 		dtor = list_head(&ll->ll_deleteq);
580 	}
581 
582 	mutex_exit(&ll->ll_mutex);
583 }
584 
585 /*
586  * smb_llist_upgrade
587  *
588  * This function tries to upgrade the lock of the locked list. It assumes the
589  * locked has already been entered in RW_READER mode. It first tries using the
590  * Solaris function rw_tryupgrade(). If that call fails the lock is released
591  * and reentered in RW_WRITER mode. In that last case a window is opened during
592  * which the contents of the list may have changed. The return code indicates
593  * whether or not the list was modified when the lock was exited.
594  */
595 int smb_llist_upgrade(
596     smb_llist_t *ll)
597 {
598 	uint64_t	wrop;
599 
600 	if (rw_tryupgrade(&ll->ll_lock) != 0) {
601 		return (0);
602 	}
603 	wrop = ll->ll_wrop;
604 	rw_exit(&ll->ll_lock);
605 	rw_enter(&ll->ll_lock, RW_WRITER);
606 	return (wrop != ll->ll_wrop);
607 }
608 
609 /*
610  * smb_llist_insert_head
611  *
612  * This function inserts the object passed a the beginning of the list. This
613  * function assumes the lock of the list has already been entered.
614  */
615 void
616 smb_llist_insert_head(
617     smb_llist_t	*ll,
618     void	*obj)
619 {
620 	list_insert_head(&ll->ll_list, obj);
621 	++ll->ll_wrop;
622 	++ll->ll_count;
623 }
624 
625 /*
626  * smb_llist_insert_tail
627  *
628  * This function appends to the object passed to the list. This function assumes
629  * the lock of the list has already been entered.
630  *
631  */
632 void
633 smb_llist_insert_tail(
634     smb_llist_t	*ll,
635     void	*obj)
636 {
637 	list_insert_tail(&ll->ll_list, obj);
638 	++ll->ll_wrop;
639 	++ll->ll_count;
640 }
641 
642 /*
643  * smb_llist_remove
644  *
645  * This function removes the object passed from the list. This function assumes
646  * the lock of the list has already been entered.
647  */
648 void
649 smb_llist_remove(
650     smb_llist_t	*ll,
651     void	*obj)
652 {
653 	list_remove(&ll->ll_list, obj);
654 	++ll->ll_wrop;
655 	--ll->ll_count;
656 }
657 
658 /*
659  * smb_llist_get_count
660  *
661  * This function returns the number of elements in the specified list.
662  */
663 uint32_t
664 smb_llist_get_count(
665     smb_llist_t *ll)
666 {
667 	return (ll->ll_count);
668 }
669 
670 /*
671  * smb_slist_constructor
672  *
673  * Synchronized list constructor.
674  */
675 void
676 smb_slist_constructor(
677     smb_slist_t	*sl,
678     size_t	size,
679     size_t	offset)
680 {
681 	mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
682 	cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
683 	list_create(&sl->sl_list, size, offset);
684 	sl->sl_count = 0;
685 	sl->sl_waiting = B_FALSE;
686 }
687 
688 /*
689  * smb_slist_destructor
690  *
691  * Synchronized list destructor.
692  */
693 void
694 smb_slist_destructor(
695     smb_slist_t	*sl)
696 {
697 	ASSERT(sl->sl_count == 0);
698 
699 	mutex_destroy(&sl->sl_mutex);
700 	cv_destroy(&sl->sl_cv);
701 	list_destroy(&sl->sl_list);
702 }
703 
704 /*
705  * smb_slist_insert_head
706  *
707  * This function inserts the object passed a the beginning of the list.
708  */
709 void
710 smb_slist_insert_head(
711     smb_slist_t	*sl,
712     void	*obj)
713 {
714 	mutex_enter(&sl->sl_mutex);
715 	list_insert_head(&sl->sl_list, obj);
716 	++sl->sl_count;
717 	mutex_exit(&sl->sl_mutex);
718 }
719 
720 /*
721  * smb_slist_insert_tail
722  *
723  * This function appends the object passed to the list.
724  */
725 void
726 smb_slist_insert_tail(
727     smb_slist_t	*sl,
728     void	*obj)
729 {
730 	mutex_enter(&sl->sl_mutex);
731 	list_insert_tail(&sl->sl_list, obj);
732 	++sl->sl_count;
733 	mutex_exit(&sl->sl_mutex);
734 }
735 
736 /*
737  * smb_llist_remove
738  *
739  * This function removes the object passed by the caller from the list.
740  */
741 void
742 smb_slist_remove(
743     smb_slist_t	*sl,
744     void	*obj)
745 {
746 	mutex_enter(&sl->sl_mutex);
747 	list_remove(&sl->sl_list, obj);
748 	if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
749 		sl->sl_waiting = B_FALSE;
750 		cv_broadcast(&sl->sl_cv);
751 	}
752 	mutex_exit(&sl->sl_mutex);
753 }
754 
755 /*
756  * smb_slist_move_tail
757  *
758  * This function transfers all the contents of the synchronized list to the
759  * list_t provided. It returns the number of objects transferred.
760  */
761 uint32_t
762 smb_slist_move_tail(
763     list_t	*lst,
764     smb_slist_t	*sl)
765 {
766 	uint32_t	rv;
767 
768 	mutex_enter(&sl->sl_mutex);
769 	rv = sl->sl_count;
770 	if (sl->sl_count) {
771 		list_move_tail(lst, &sl->sl_list);
772 		sl->sl_count = 0;
773 		if (sl->sl_waiting) {
774 			sl->sl_waiting = B_FALSE;
775 			cv_broadcast(&sl->sl_cv);
776 		}
777 	}
778 	mutex_exit(&sl->sl_mutex);
779 	return (rv);
780 }
781 
782 /*
783  * smb_slist_obj_move
784  *
785  * This function moves an object from one list to the end of the other list. It
786  * assumes the mutex of each list has been entered.
787  */
788 void
789 smb_slist_obj_move(
790     smb_slist_t	*dst,
791     smb_slist_t	*src,
792     void	*obj)
793 {
794 	ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
795 	ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
796 
797 	list_remove(&src->sl_list, obj);
798 	list_insert_tail(&dst->sl_list, obj);
799 	dst->sl_count++;
800 	src->sl_count--;
801 	if ((src->sl_count == 0) && (src->sl_waiting)) {
802 		src->sl_waiting = B_FALSE;
803 		cv_broadcast(&src->sl_cv);
804 	}
805 }
806 
807 /*
808  * smb_slist_wait_for_empty
809  *
810  * This function waits for a list to be emptied.
811  */
812 void
813 smb_slist_wait_for_empty(
814     smb_slist_t	*sl)
815 {
816 	mutex_enter(&sl->sl_mutex);
817 	while (sl->sl_count) {
818 		sl->sl_waiting = B_TRUE;
819 		cv_wait(&sl->sl_cv, &sl->sl_mutex);
820 	}
821 	mutex_exit(&sl->sl_mutex);
822 }
823 
824 /*
825  * smb_slist_exit
826  *
827  * This function exits the muetx of the list and signal the condition variable
828  * if the list is empty.
829  */
830 void
831 smb_slist_exit(smb_slist_t *sl)
832 {
833 	if ((sl->sl_count == 0) && (sl->sl_waiting)) {
834 		sl->sl_waiting = B_FALSE;
835 		cv_broadcast(&sl->sl_cv);
836 	}
837 	mutex_exit(&sl->sl_mutex);
838 }
839 
840 /*
841  * smb_thread_entry_point
842  *
843  * Common entry point for all the threads created through smb_thread_start.
844  * The state of the thread is set to "running" at the beginning and moved to
845  * "exiting" just before calling thread_exit(). The condition variable is
846  *  also signaled.
847  */
848 static void
849 smb_thread_entry_point(
850     smb_thread_t	*thread)
851 {
852 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
853 	mutex_enter(&thread->sth_mtx);
854 	ASSERT(thread->sth_state == SMB_THREAD_STATE_STARTING);
855 	thread->sth_th = curthread;
856 	thread->sth_did = thread->sth_th->t_did;
857 
858 	if (!thread->sth_kill) {
859 		thread->sth_state = SMB_THREAD_STATE_RUNNING;
860 		cv_signal(&thread->sth_cv);
861 		mutex_exit(&thread->sth_mtx);
862 		thread->sth_ep(thread, thread->sth_ep_arg);
863 		mutex_enter(&thread->sth_mtx);
864 	}
865 	thread->sth_th = NULL;
866 	thread->sth_state = SMB_THREAD_STATE_EXITING;
867 	cv_broadcast(&thread->sth_cv);
868 	mutex_exit(&thread->sth_mtx);
869 	thread_exit();
870 }
871 
872 /*
873  * smb_thread_init
874  */
875 void
876 smb_thread_init(
877     smb_thread_t	*thread,
878     char		*name,
879     smb_thread_ep_t	ep,
880     void		*ep_arg,
881     smb_thread_aw_t	aw,
882     void		*aw_arg)
883 {
884 	ASSERT(thread->sth_magic != SMB_THREAD_MAGIC);
885 
886 	bzero(thread, sizeof (*thread));
887 
888 	(void) strlcpy(thread->sth_name, name, sizeof (thread->sth_name));
889 	thread->sth_ep = ep;
890 	thread->sth_ep_arg = ep_arg;
891 	thread->sth_aw = aw;
892 	thread->sth_aw_arg = aw_arg;
893 	thread->sth_state = SMB_THREAD_STATE_EXITED;
894 	mutex_init(&thread->sth_mtx, NULL, MUTEX_DEFAULT, NULL);
895 	cv_init(&thread->sth_cv, NULL, CV_DEFAULT, NULL);
896 	thread->sth_magic = SMB_THREAD_MAGIC;
897 }
898 
899 /*
900  * smb_thread_destroy
901  */
902 void
903 smb_thread_destroy(
904     smb_thread_t	*thread)
905 {
906 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
907 	ASSERT(thread->sth_state == SMB_THREAD_STATE_EXITED);
908 	thread->sth_magic = 0;
909 	mutex_destroy(&thread->sth_mtx);
910 	cv_destroy(&thread->sth_cv);
911 }
912 
913 /*
914  * smb_thread_start
915  *
916  * This function starts a thread with the parameters provided. It waits until
917  * the state of the thread has been moved to running.
918  */
919 /*ARGSUSED*/
920 int
921 smb_thread_start(
922     smb_thread_t	*thread)
923 {
924 	int		rc = 0;
925 	kthread_t	*tmpthread;
926 
927 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
928 
929 	mutex_enter(&thread->sth_mtx);
930 	switch (thread->sth_state) {
931 	case SMB_THREAD_STATE_EXITED:
932 		thread->sth_state = SMB_THREAD_STATE_STARTING;
933 		mutex_exit(&thread->sth_mtx);
934 		tmpthread = thread_create(NULL, 0, smb_thread_entry_point,
935 		    thread, 0, &p0, TS_RUN, minclsyspri);
936 		ASSERT(tmpthread != NULL);
937 		mutex_enter(&thread->sth_mtx);
938 		while (thread->sth_state == SMB_THREAD_STATE_STARTING)
939 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
940 		if (thread->sth_state != SMB_THREAD_STATE_RUNNING)
941 			rc = -1;
942 		break;
943 	default:
944 		ASSERT(0);
945 		rc = -1;
946 		break;
947 	}
948 	mutex_exit(&thread->sth_mtx);
949 	return (rc);
950 }
951 
952 /*
953  * smb_thread_stop
954  *
955  * This function signals a thread to kill itself and waits until the "exiting"
956  * state has been reached.
957  */
958 void
959 smb_thread_stop(
960     smb_thread_t	*thread)
961 {
962 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
963 
964 	mutex_enter(&thread->sth_mtx);
965 	switch (thread->sth_state) {
966 	case SMB_THREAD_STATE_RUNNING:
967 	case SMB_THREAD_STATE_STARTING:
968 		if (!thread->sth_kill) {
969 			thread->sth_kill = B_TRUE;
970 			if (thread->sth_aw)
971 				thread->sth_aw(thread, thread->sth_aw_arg);
972 			cv_broadcast(&thread->sth_cv);
973 			while (thread->sth_state != SMB_THREAD_STATE_EXITING)
974 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
975 			mutex_exit(&thread->sth_mtx);
976 			thread_join(thread->sth_did);
977 			mutex_enter(&thread->sth_mtx);
978 			thread->sth_state = SMB_THREAD_STATE_EXITED;
979 			thread->sth_did = 0;
980 			thread->sth_kill = B_FALSE;
981 			cv_broadcast(&thread->sth_cv);
982 			break;
983 		}
984 		/*FALLTHRU*/
985 
986 	case SMB_THREAD_STATE_EXITING:
987 		if (thread->sth_kill) {
988 			while (thread->sth_state != SMB_THREAD_STATE_EXITED)
989 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
990 		} else {
991 			thread->sth_state = SMB_THREAD_STATE_EXITED;
992 			thread->sth_did = 0;
993 		}
994 		break;
995 
996 	case SMB_THREAD_STATE_EXITED:
997 		break;
998 
999 	default:
1000 		ASSERT(0);
1001 		break;
1002 	}
1003 	mutex_exit(&thread->sth_mtx);
1004 }
1005 
1006 /*
1007  * smb_thread_signal
1008  *
1009  * This function signals a thread.
1010  */
1011 void
1012 smb_thread_signal(
1013     smb_thread_t	*thread)
1014 {
1015 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1016 
1017 	mutex_enter(&thread->sth_mtx);
1018 	switch (thread->sth_state) {
1019 	case SMB_THREAD_STATE_RUNNING:
1020 		if (thread->sth_aw)
1021 			thread->sth_aw(thread, thread->sth_aw_arg);
1022 		cv_signal(&thread->sth_cv);
1023 		break;
1024 
1025 	default:
1026 		break;
1027 	}
1028 	mutex_exit(&thread->sth_mtx);
1029 }
1030 
1031 boolean_t
1032 smb_thread_continue(smb_thread_t *thread)
1033 {
1034 	boolean_t result;
1035 
1036 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1037 
1038 	mutex_enter(&thread->sth_mtx);
1039 	result = smb_thread_continue_timedwait_locked(thread, 0);
1040 	mutex_exit(&thread->sth_mtx);
1041 
1042 	return (result);
1043 }
1044 
1045 boolean_t
1046 smb_thread_continue_nowait(smb_thread_t *thread)
1047 {
1048 	boolean_t result;
1049 
1050 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1051 
1052 	mutex_enter(&thread->sth_mtx);
1053 	/*
1054 	 * Setting ticks=-1 requests a non-blocking check.  We will
1055 	 * still block if the thread is in "suspend" state.
1056 	 */
1057 	result = smb_thread_continue_timedwait_locked(thread, -1);
1058 	mutex_exit(&thread->sth_mtx);
1059 
1060 	return (result);
1061 }
1062 
1063 boolean_t
1064 smb_thread_continue_timedwait(smb_thread_t *thread, int seconds)
1065 {
1066 	boolean_t result;
1067 
1068 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1069 
1070 	mutex_enter(&thread->sth_mtx);
1071 	result = smb_thread_continue_timedwait_locked(thread,
1072 	    SEC_TO_TICK(seconds));
1073 	mutex_exit(&thread->sth_mtx);
1074 
1075 	return (result);
1076 }
1077 
1078 /*
1079  * smb_thread_continue_timedwait_locked
1080  *
1081  * Internal only.  Ticks==-1 means don't block, Ticks == 0 means wait
1082  * indefinitely
1083  */
1084 static boolean_t
1085 smb_thread_continue_timedwait_locked(smb_thread_t *thread, int ticks)
1086 {
1087 	boolean_t	result;
1088 
1089 	/* -1 means don't block */
1090 	if (ticks != -1 && !thread->sth_kill) {
1091 		if (ticks == 0) {
1092 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
1093 		} else {
1094 			(void) cv_reltimedwait(&thread->sth_cv,
1095 			    &thread->sth_mtx, (clock_t)ticks, TR_CLOCK_TICK);
1096 		}
1097 	}
1098 	result = (thread->sth_kill == 0);
1099 
1100 	return (result);
1101 }
1102 
1103 void
1104 smb_thread_set_awaken(smb_thread_t *thread, smb_thread_aw_t new_aw_fn,
1105     void *new_aw_arg)
1106 {
1107 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1108 
1109 	mutex_enter(&thread->sth_mtx);
1110 	thread->sth_aw = new_aw_fn;
1111 	thread->sth_aw_arg = new_aw_arg;
1112 	mutex_exit(&thread->sth_mtx);
1113 }
1114 
1115 /*
1116  * smb_rwx_init
1117  */
1118 void
1119 smb_rwx_init(
1120     smb_rwx_t	*rwx)
1121 {
1122 	bzero(rwx, sizeof (smb_rwx_t));
1123 	cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
1124 	mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
1125 	rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
1126 }
1127 
1128 /*
1129  * smb_rwx_destroy
1130  */
1131 void
1132 smb_rwx_destroy(
1133     smb_rwx_t	*rwx)
1134 {
1135 	mutex_destroy(&rwx->rwx_mutex);
1136 	cv_destroy(&rwx->rwx_cv);
1137 	rw_destroy(&rwx->rwx_lock);
1138 }
1139 
1140 /*
1141  * smb_rwx_rwexit
1142  */
1143 void
1144 smb_rwx_rwexit(
1145     smb_rwx_t	*rwx)
1146 {
1147 	if (rw_write_held(&rwx->rwx_lock)) {
1148 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1149 		mutex_enter(&rwx->rwx_mutex);
1150 		if (rwx->rwx_waiting) {
1151 			rwx->rwx_waiting = B_FALSE;
1152 			cv_broadcast(&rwx->rwx_cv);
1153 		}
1154 		mutex_exit(&rwx->rwx_mutex);
1155 	}
1156 	rw_exit(&rwx->rwx_lock);
1157 }
1158 
1159 /*
1160  * smb_rwx_rwupgrade
1161  */
1162 krw_t
1163 smb_rwx_rwupgrade(
1164     smb_rwx_t	*rwx)
1165 {
1166 	if (rw_write_held(&rwx->rwx_lock)) {
1167 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1168 		return (RW_WRITER);
1169 	}
1170 	if (!rw_tryupgrade(&rwx->rwx_lock)) {
1171 		rw_exit(&rwx->rwx_lock);
1172 		rw_enter(&rwx->rwx_lock, RW_WRITER);
1173 	}
1174 	return (RW_READER);
1175 }
1176 
1177 /*
1178  * smb_rwx_rwrestore
1179  */
1180 void
1181 smb_rwx_rwdowngrade(
1182     smb_rwx_t	*rwx,
1183     krw_t	mode)
1184 {
1185 	ASSERT(rw_write_held(&rwx->rwx_lock));
1186 	ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1187 
1188 	if (mode == RW_WRITER) {
1189 		return;
1190 	}
1191 	ASSERT(mode == RW_READER);
1192 	mutex_enter(&rwx->rwx_mutex);
1193 	if (rwx->rwx_waiting) {
1194 		rwx->rwx_waiting = B_FALSE;
1195 		cv_broadcast(&rwx->rwx_cv);
1196 	}
1197 	mutex_exit(&rwx->rwx_mutex);
1198 	rw_downgrade(&rwx->rwx_lock);
1199 }
1200 
1201 /*
1202  * smb_rwx_wait
1203  *
1204  * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER
1205  * mode. It will:
1206  *
1207  *	1) release the lock and save its current mode.
1208  *	2) wait until the condition variable is signaled. This can happen for
1209  *	   2 reasons: When a writer releases the lock or when the time out (if
1210  *	   provided) expires.
1211  *	3) re-acquire the lock in the mode saved in (1).
1212  */
1213 int
1214 smb_rwx_rwwait(
1215     smb_rwx_t	*rwx,
1216     clock_t	timeout)
1217 {
1218 	int	rc;
1219 	krw_t	mode;
1220 
1221 	mutex_enter(&rwx->rwx_mutex);
1222 	rwx->rwx_waiting = B_TRUE;
1223 	mutex_exit(&rwx->rwx_mutex);
1224 
1225 	if (rw_write_held(&rwx->rwx_lock)) {
1226 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1227 		mode = RW_WRITER;
1228 	} else {
1229 		ASSERT(rw_read_held(&rwx->rwx_lock));
1230 		mode = RW_READER;
1231 	}
1232 	rw_exit(&rwx->rwx_lock);
1233 
1234 	mutex_enter(&rwx->rwx_mutex);
1235 	if (rwx->rwx_waiting) {
1236 		if (timeout == -1) {
1237 			rc = 1;
1238 			cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
1239 		} else {
1240 			rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
1241 			    timeout, TR_CLOCK_TICK);
1242 		}
1243 	}
1244 	mutex_exit(&rwx->rwx_mutex);
1245 
1246 	rw_enter(&rwx->rwx_lock, mode);
1247 	return (rc);
1248 }
1249 
1250 /*
1251  * SMB ID mapping
1252  *
1253  * Solaris ID mapping service (aka Winchester) works with domain SIDs
1254  * and RIDs where domain SIDs are in string format. CIFS service works
1255  * with binary SIDs understandable by CIFS clients. A layer of SMB ID
1256  * mapping functions are implemeted to hide the SID conversion details
1257  * and also hide the handling of array of batch mapping requests.
1258  *
1259  * IMPORTANT NOTE The Winchester API requires a zone. Because CIFS server
1260  * currently only runs in the global zone the global zone is specified.
1261  * This needs to be fixed when the CIFS server supports zones.
1262  */
1263 
1264 static int smb_idmap_batch_binsid(smb_idmap_batch_t *sib);
1265 
1266 /*
1267  * smb_idmap_getid
1268  *
1269  * Maps the given Windows SID to a Solaris ID using the
1270  * simple mapping API.
1271  */
1272 idmap_stat
1273 smb_idmap_getid(smb_sid_t *sid, uid_t *id, int *idtype)
1274 {
1275 	smb_idmap_t sim;
1276 	char sidstr[SMB_SID_STRSZ];
1277 
1278 	smb_sid_tostr(sid, sidstr);
1279 	if (smb_sid_splitstr(sidstr, &sim.sim_rid) != 0)
1280 		return (IDMAP_ERR_SID);
1281 	sim.sim_domsid = sidstr;
1282 	sim.sim_id = id;
1283 
1284 	switch (*idtype) {
1285 	case SMB_IDMAP_USER:
1286 		sim.sim_stat = kidmap_getuidbysid(global_zone, sim.sim_domsid,
1287 		    sim.sim_rid, sim.sim_id);
1288 		break;
1289 
1290 	case SMB_IDMAP_GROUP:
1291 		sim.sim_stat = kidmap_getgidbysid(global_zone, sim.sim_domsid,
1292 		    sim.sim_rid, sim.sim_id);
1293 		break;
1294 
1295 	case SMB_IDMAP_UNKNOWN:
1296 		sim.sim_stat = kidmap_getpidbysid(global_zone, sim.sim_domsid,
1297 		    sim.sim_rid, sim.sim_id, &sim.sim_idtype);
1298 		break;
1299 
1300 	default:
1301 		ASSERT(0);
1302 		return (IDMAP_ERR_ARG);
1303 	}
1304 
1305 	*idtype = sim.sim_idtype;
1306 
1307 	return (sim.sim_stat);
1308 }
1309 
1310 /*
1311  * smb_idmap_getsid
1312  *
1313  * Maps the given Solaris ID to a Windows SID using the
1314  * simple mapping API.
1315  */
1316 idmap_stat
1317 smb_idmap_getsid(uid_t id, int idtype, smb_sid_t **sid)
1318 {
1319 	smb_idmap_t sim;
1320 
1321 	switch (idtype) {
1322 	case SMB_IDMAP_USER:
1323 		sim.sim_stat = kidmap_getsidbyuid(global_zone, id,
1324 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1325 		break;
1326 
1327 	case SMB_IDMAP_GROUP:
1328 		sim.sim_stat = kidmap_getsidbygid(global_zone, id,
1329 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1330 		break;
1331 
1332 	case SMB_IDMAP_EVERYONE:
1333 		/* Everyone S-1-1-0 */
1334 		sim.sim_domsid = "S-1-1";
1335 		sim.sim_rid = 0;
1336 		sim.sim_stat = IDMAP_SUCCESS;
1337 		break;
1338 
1339 	default:
1340 		ASSERT(0);
1341 		return (IDMAP_ERR_ARG);
1342 	}
1343 
1344 	if (sim.sim_stat != IDMAP_SUCCESS)
1345 		return (sim.sim_stat);
1346 
1347 	if (sim.sim_domsid == NULL)
1348 		return (IDMAP_ERR_NOMAPPING);
1349 
1350 	sim.sim_sid = smb_sid_fromstr(sim.sim_domsid);
1351 	if (sim.sim_sid == NULL)
1352 		return (IDMAP_ERR_INTERNAL);
1353 
1354 	*sid = smb_sid_splice(sim.sim_sid, sim.sim_rid);
1355 	smb_sid_free(sim.sim_sid);
1356 	if (*sid == NULL)
1357 		sim.sim_stat = IDMAP_ERR_INTERNAL;
1358 
1359 	return (sim.sim_stat);
1360 }
1361 
1362 /*
1363  * smb_idmap_batch_create
1364  *
1365  * Creates and initializes the context for batch ID mapping.
1366  */
1367 idmap_stat
1368 smb_idmap_batch_create(smb_idmap_batch_t *sib, uint16_t nmap, int flags)
1369 {
1370 	ASSERT(sib);
1371 
1372 	bzero(sib, sizeof (smb_idmap_batch_t));
1373 
1374 	sib->sib_idmaph = kidmap_get_create(global_zone);
1375 
1376 	sib->sib_flags = flags;
1377 	sib->sib_nmap = nmap;
1378 	sib->sib_size = nmap * sizeof (smb_idmap_t);
1379 	sib->sib_maps = kmem_zalloc(sib->sib_size, KM_SLEEP);
1380 
1381 	return (IDMAP_SUCCESS);
1382 }
1383 
1384 /*
1385  * smb_idmap_batch_destroy
1386  *
1387  * Frees the batch ID mapping context.
1388  * If ID mapping is Solaris -> Windows it frees memories
1389  * allocated for binary SIDs.
1390  */
1391 void
1392 smb_idmap_batch_destroy(smb_idmap_batch_t *sib)
1393 {
1394 	char *domsid;
1395 	int i;
1396 
1397 	ASSERT(sib);
1398 	ASSERT(sib->sib_maps);
1399 
1400 	if (sib->sib_idmaph)
1401 		kidmap_get_destroy(sib->sib_idmaph);
1402 
1403 	if (sib->sib_flags & SMB_IDMAP_ID2SID) {
1404 		/*
1405 		 * SIDs are allocated only when mapping
1406 		 * UID/GID to SIDs
1407 		 */
1408 		for (i = 0; i < sib->sib_nmap; i++)
1409 			smb_sid_free(sib->sib_maps[i].sim_sid);
1410 	} else if (sib->sib_flags & SMB_IDMAP_SID2ID) {
1411 		/*
1412 		 * SID prefixes are allocated only when mapping
1413 		 * SIDs to UID/GID
1414 		 */
1415 		for (i = 0; i < sib->sib_nmap; i++) {
1416 			domsid = sib->sib_maps[i].sim_domsid;
1417 			if (domsid)
1418 				smb_mem_free(domsid);
1419 		}
1420 	}
1421 
1422 	if (sib->sib_size && sib->sib_maps)
1423 		kmem_free(sib->sib_maps, sib->sib_size);
1424 }
1425 
1426 /*
1427  * smb_idmap_batch_getid
1428  *
1429  * Queue a request to map the given SID to a UID or GID.
1430  *
1431  * sim->sim_id should point to variable that's supposed to
1432  * hold the returned UID/GID. This needs to be setup by caller
1433  * of this function.
1434  *
1435  * If requested ID type is known, it's passed as 'idtype',
1436  * if it's unknown it'll be returned in sim->sim_idtype.
1437  */
1438 idmap_stat
1439 smb_idmap_batch_getid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1440     smb_sid_t *sid, int idtype)
1441 {
1442 	char strsid[SMB_SID_STRSZ];
1443 	idmap_stat idm_stat;
1444 
1445 	ASSERT(idmaph);
1446 	ASSERT(sim);
1447 	ASSERT(sid);
1448 
1449 	smb_sid_tostr(sid, strsid);
1450 	if (smb_sid_splitstr(strsid, &sim->sim_rid) != 0)
1451 		return (IDMAP_ERR_SID);
1452 	sim->sim_domsid = smb_mem_strdup(strsid);
1453 
1454 	switch (idtype) {
1455 	case SMB_IDMAP_USER:
1456 		idm_stat = kidmap_batch_getuidbysid(idmaph, sim->sim_domsid,
1457 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1458 		break;
1459 
1460 	case SMB_IDMAP_GROUP:
1461 		idm_stat = kidmap_batch_getgidbysid(idmaph, sim->sim_domsid,
1462 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1463 		break;
1464 
1465 	case SMB_IDMAP_UNKNOWN:
1466 		idm_stat = kidmap_batch_getpidbysid(idmaph, sim->sim_domsid,
1467 		    sim->sim_rid, sim->sim_id, &sim->sim_idtype,
1468 		    &sim->sim_stat);
1469 		break;
1470 
1471 	default:
1472 		ASSERT(0);
1473 		return (IDMAP_ERR_ARG);
1474 	}
1475 
1476 	return (idm_stat);
1477 }
1478 
1479 /*
1480  * smb_idmap_batch_getsid
1481  *
1482  * Queue a request to map the given UID/GID to a SID.
1483  *
1484  * sim->sim_domsid and sim->sim_rid will contain the mapping
1485  * result upon successful process of the batched request.
1486  */
1487 idmap_stat
1488 smb_idmap_batch_getsid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1489     uid_t id, int idtype)
1490 {
1491 	idmap_stat idm_stat;
1492 
1493 	switch (idtype) {
1494 	case SMB_IDMAP_USER:
1495 		idm_stat = kidmap_batch_getsidbyuid(idmaph, id,
1496 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1497 		    &sim->sim_stat);
1498 		break;
1499 
1500 	case SMB_IDMAP_GROUP:
1501 		idm_stat = kidmap_batch_getsidbygid(idmaph, id,
1502 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1503 		    &sim->sim_stat);
1504 		break;
1505 
1506 	case SMB_IDMAP_OWNERAT:
1507 		/* Current Owner S-1-5-32-766 */
1508 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1509 		sim->sim_rid = SECURITY_CURRENT_OWNER_RID;
1510 		sim->sim_stat = IDMAP_SUCCESS;
1511 		idm_stat = IDMAP_SUCCESS;
1512 		break;
1513 
1514 	case SMB_IDMAP_GROUPAT:
1515 		/* Current Group S-1-5-32-767 */
1516 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1517 		sim->sim_rid = SECURITY_CURRENT_GROUP_RID;
1518 		sim->sim_stat = IDMAP_SUCCESS;
1519 		idm_stat = IDMAP_SUCCESS;
1520 		break;
1521 
1522 	case SMB_IDMAP_EVERYONE:
1523 		/* Everyone S-1-1-0 */
1524 		sim->sim_domsid = NT_WORLD_AUTH_SIDSTR;
1525 		sim->sim_rid = 0;
1526 		sim->sim_stat = IDMAP_SUCCESS;
1527 		idm_stat = IDMAP_SUCCESS;
1528 		break;
1529 
1530 	default:
1531 		ASSERT(0);
1532 		return (IDMAP_ERR_ARG);
1533 	}
1534 
1535 	return (idm_stat);
1536 }
1537 
1538 /*
1539  * smb_idmap_batch_binsid
1540  *
1541  * Convert sidrids to binary sids
1542  *
1543  * Returns 0 if successful and non-zero upon failure.
1544  */
1545 static int
1546 smb_idmap_batch_binsid(smb_idmap_batch_t *sib)
1547 {
1548 	smb_sid_t *sid;
1549 	smb_idmap_t *sim;
1550 	int i;
1551 
1552 	if (sib->sib_flags & SMB_IDMAP_SID2ID)
1553 		/* This operation is not required */
1554 		return (0);
1555 
1556 	sim = sib->sib_maps;
1557 	for (i = 0; i < sib->sib_nmap; sim++, i++) {
1558 		ASSERT(sim->sim_domsid);
1559 		if (sim->sim_domsid == NULL)
1560 			return (1);
1561 
1562 		if ((sid = smb_sid_fromstr(sim->sim_domsid)) == NULL)
1563 			return (1);
1564 
1565 		sim->sim_sid = smb_sid_splice(sid, sim->sim_rid);
1566 		smb_sid_free(sid);
1567 	}
1568 
1569 	return (0);
1570 }
1571 
1572 /*
1573  * smb_idmap_batch_getmappings
1574  *
1575  * trigger ID mapping service to get the mappings for queued
1576  * requests.
1577  *
1578  * Checks the result of all the queued requests.
1579  * If this is a Solaris -> Windows mapping it generates
1580  * binary SIDs from returned (domsid, rid) pairs.
1581  */
1582 idmap_stat
1583 smb_idmap_batch_getmappings(smb_idmap_batch_t *sib)
1584 {
1585 	idmap_stat idm_stat = IDMAP_SUCCESS;
1586 	int i;
1587 
1588 	idm_stat = kidmap_get_mappings(sib->sib_idmaph);
1589 	if (idm_stat != IDMAP_SUCCESS)
1590 		return (idm_stat);
1591 
1592 	/*
1593 	 * Check the status for all the queued requests
1594 	 */
1595 	for (i = 0; i < sib->sib_nmap; i++) {
1596 		if (sib->sib_maps[i].sim_stat != IDMAP_SUCCESS)
1597 			return (sib->sib_maps[i].sim_stat);
1598 	}
1599 
1600 	if (smb_idmap_batch_binsid(sib) != 0)
1601 		idm_stat = IDMAP_ERR_OTHER;
1602 
1603 	return (idm_stat);
1604 }
1605 
1606 uint64_t
1607 smb_time_unix_to_nt(timestruc_t *unix_time)
1608 {
1609 	uint64_t nt_time;
1610 
1611 	if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
1612 		return (0);
1613 
1614 	nt_time = unix_time->tv_sec;
1615 	nt_time *= 10000000;  /* seconds to 100ns */
1616 	nt_time += unix_time->tv_nsec / 100;
1617 	return (nt_time + NT_TIME_BIAS);
1618 }
1619 
1620 void
1621 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
1622 {
1623 	uint32_t seconds;
1624 
1625 	ASSERT(unix_time);
1626 
1627 	if ((nt_time == 0) || (nt_time == -1)) {
1628 		unix_time->tv_sec = 0;
1629 		unix_time->tv_nsec = 0;
1630 		return;
1631 	}
1632 
1633 	nt_time -= NT_TIME_BIAS;
1634 	seconds = nt_time / 10000000;
1635 	unix_time->tv_sec = seconds;
1636 	unix_time->tv_nsec = (nt_time  % 10000000) * 100;
1637 }
1638 
1639 /*
1640  * smb_time_gmt_to_local, smb_time_local_to_gmt
1641  *
1642  * Apply the gmt offset to convert between local time and gmt
1643  */
1644 int32_t
1645 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
1646 {
1647 	if ((gmt == 0) || (gmt == -1))
1648 		return (0);
1649 
1650 	return (gmt - sr->sr_gmtoff);
1651 }
1652 
1653 int32_t
1654 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
1655 {
1656 	if ((local == 0) || (local == -1))
1657 		return (0);
1658 
1659 	return (local + sr->sr_gmtoff);
1660 }
1661 
1662 
1663 /*
1664  * smb_time_dos_to_unix
1665  *
1666  * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
1667  *
1668  * A date/time field of 0 means that that server file system
1669  * assigned value need not be changed. The behaviour when the
1670  * date/time field is set to -1 is not documented but is
1671  * generally treated like 0.
1672  * If date or time is 0 or -1 the unix time is returned as 0
1673  * so that the caller can identify and handle this special case.
1674  */
1675 int32_t
1676 smb_time_dos_to_unix(int16_t date, int16_t time)
1677 {
1678 	struct tm	atm;
1679 
1680 	if (((date == 0) || (time == 0)) ||
1681 	    ((date == -1) || (time == -1))) {
1682 		return (0);
1683 	}
1684 
1685 	atm.tm_year = ((date >>  9) & 0x3F) + 80;
1686 	atm.tm_mon  = ((date >>  5) & 0x0F) - 1;
1687 	atm.tm_mday = ((date >>  0) & 0x1F);
1688 	atm.tm_hour = ((time >> 11) & 0x1F);
1689 	atm.tm_min  = ((time >>  5) & 0x3F);
1690 	atm.tm_sec  = ((time >>  0) & 0x1F) << 1;
1691 
1692 	return (smb_timegm(&atm));
1693 }
1694 
1695 void
1696 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1697 {
1698 	struct tm	atm;
1699 	int		i;
1700 	time_t		tmp_time;
1701 
1702 	if (ux_time == 0) {
1703 		*date_p = 0;
1704 		*time_p = 0;
1705 		return;
1706 	}
1707 
1708 	tmp_time = (time_t)ux_time;
1709 	(void) smb_gmtime_r(&tmp_time, &atm);
1710 
1711 	if (date_p) {
1712 		i = 0;
1713 		i += atm.tm_year - 80;
1714 		i <<= 4;
1715 		i += atm.tm_mon + 1;
1716 		i <<= 5;
1717 		i += atm.tm_mday;
1718 
1719 		*date_p = (short)i;
1720 	}
1721 	if (time_p) {
1722 		i = 0;
1723 		i += atm.tm_hour;
1724 		i <<= 6;
1725 		i += atm.tm_min;
1726 		i <<= 5;
1727 		i += atm.tm_sec >> 1;
1728 
1729 		*time_p = (short)i;
1730 	}
1731 }
1732 
1733 
1734 /*
1735  * smb_gmtime_r
1736  *
1737  * Thread-safe version of smb_gmtime. Returns a null pointer if either
1738  * input parameter is a null pointer. Otherwise returns a pointer
1739  * to result.
1740  *
1741  * Day of the week calculation: the Epoch was a thursday.
1742  *
1743  * There are no timezone corrections so tm_isdst and tm_gmtoff are
1744  * always zero, and the zone is always WET.
1745  */
1746 struct tm *
1747 smb_gmtime_r(time_t *clock, struct tm *result)
1748 {
1749 	time_t tsec;
1750 	int year;
1751 	int month;
1752 	int sec_per_month;
1753 
1754 	if (clock == 0 || result == 0)
1755 		return (0);
1756 
1757 	bzero(result, sizeof (struct tm));
1758 	tsec = *clock;
1759 	tsec -= tzh_leapcnt;
1760 
1761 	result->tm_wday = tsec / SECSPERDAY;
1762 	result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1763 
1764 	year = EPOCH_YEAR;
1765 	while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1766 	    (SECSPERDAY * DAYSPERNYEAR))) {
1767 		if (isleap(year))
1768 			tsec -= SECSPERDAY * DAYSPERLYEAR;
1769 		else
1770 			tsec -= SECSPERDAY * DAYSPERNYEAR;
1771 
1772 		++year;
1773 	}
1774 
1775 	result->tm_year = year - TM_YEAR_BASE;
1776 	result->tm_yday = tsec / SECSPERDAY;
1777 
1778 	for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1779 		sec_per_month = days_in_month[month] * SECSPERDAY;
1780 
1781 		if (month == TM_FEBRUARY && isleap(year))
1782 			sec_per_month += SECSPERDAY;
1783 
1784 		if (tsec < sec_per_month)
1785 			break;
1786 
1787 		tsec -= sec_per_month;
1788 	}
1789 
1790 	result->tm_mon = month;
1791 	result->tm_mday = (tsec / SECSPERDAY) + 1;
1792 	tsec %= SECSPERDAY;
1793 	result->tm_sec = tsec % 60;
1794 	tsec /= 60;
1795 	result->tm_min = tsec % 60;
1796 	tsec /= 60;
1797 	result->tm_hour = (int)tsec;
1798 
1799 	return (result);
1800 }
1801 
1802 
1803 /*
1804  * smb_timegm
1805  *
1806  * Converts the broken-down time in tm to a time value, i.e. the number
1807  * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1808  * not a POSIX or ANSI function. Per the man page, the input values of
1809  * tm_wday and tm_yday are ignored and, as the input data is assumed to
1810  * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1811  *
1812  * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1813  * and tm_yday, and bring the other fields within normal range. I don't
1814  * think this is really how it should be done but it's convenient for
1815  * now.
1816  */
1817 time_t
1818 smb_timegm(struct tm *tm)
1819 {
1820 	time_t tsec;
1821 	int dd;
1822 	int mm;
1823 	int yy;
1824 	int year;
1825 
1826 	if (tm == 0)
1827 		return (-1);
1828 
1829 	year = tm->tm_year + TM_YEAR_BASE;
1830 	tsec = tzh_leapcnt;
1831 
1832 	for (yy = EPOCH_YEAR; yy < year; ++yy) {
1833 		if (isleap(yy))
1834 			tsec += SECSPERDAY * DAYSPERLYEAR;
1835 		else
1836 			tsec += SECSPERDAY * DAYSPERNYEAR;
1837 	}
1838 
1839 	for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1840 		dd = days_in_month[mm] * SECSPERDAY;
1841 
1842 		if (mm == TM_FEBRUARY && isleap(year))
1843 			dd += SECSPERDAY;
1844 
1845 		tsec += dd;
1846 	}
1847 
1848 	tsec += (tm->tm_mday - 1) * SECSPERDAY;
1849 	tsec += tm->tm_sec;
1850 	tsec += tm->tm_min * SECSPERMIN;
1851 	tsec += tm->tm_hour * SECSPERHOUR;
1852 
1853 	tm->tm_isdst = 0;
1854 	(void) smb_gmtime_r(&tsec, tm);
1855 	return (tsec);
1856 }
1857 
1858 /*
1859  * smb_cred_set_sid
1860  *
1861  * Initialize the ksid based on the given smb_id_t.
1862  */
1863 static void
1864 smb_cred_set_sid(smb_id_t *id, ksid_t *ksid)
1865 {
1866 	char sidstr[SMB_SID_STRSZ];
1867 	int rc;
1868 
1869 	ASSERT(id);
1870 	ASSERT(id->i_sid);
1871 
1872 	ksid->ks_id = id->i_id;
1873 	smb_sid_tostr(id->i_sid, sidstr);
1874 	rc = smb_sid_splitstr(sidstr, &ksid->ks_rid);
1875 	ASSERT(rc == 0);
1876 
1877 	ksid->ks_attr = id->i_attrs;
1878 	ksid->ks_domain = ksid_lookupdomain(sidstr);
1879 }
1880 
1881 /*
1882  * smb_cred_set_sidlist
1883  *
1884  * Allocate and initialize the ksidlist based on the Windows group list of the
1885  * access token.
1886  */
1887 static ksidlist_t *
1888 smb_cred_set_sidlist(smb_ids_t *token_grps)
1889 {
1890 	int i;
1891 	ksidlist_t *lp;
1892 
1893 	lp = kmem_zalloc(KSIDLIST_MEM(token_grps->i_cnt), KM_SLEEP);
1894 	lp->ksl_ref = 1;
1895 	lp->ksl_nsid = token_grps->i_cnt;
1896 	lp->ksl_neid = 0;
1897 
1898 	for (i = 0; i < lp->ksl_nsid; i++) {
1899 		smb_cred_set_sid(&token_grps->i_ids[i], &lp->ksl_sids[i]);
1900 		if (lp->ksl_sids[i].ks_id > IDMAP_WK__MAX_GID)
1901 			lp->ksl_neid++;
1902 	}
1903 
1904 	return (lp);
1905 }
1906 
1907 /*
1908  * A Solaris credential (cred_t structure) will be allocated and
1909  * initialized based on the given Windows style user access token.
1910  *
1911  * cred's gid is set to the primary group of the mapped Solaris user.
1912  * When there is no such mapped user (i.e. the mapped UID is ephemeral)
1913  * or his/her primary group could not be obtained, cred's gid is set to
1914  * the mapped Solaris group of token's primary group.
1915  */
1916 cred_t *
1917 smb_cred_create(smb_token_t *token, uint32_t *privileges)
1918 {
1919 	ksid_t			ksid;
1920 	ksidlist_t		*ksidlist = NULL;
1921 	smb_posix_grps_t	*posix_grps;
1922 	cred_t			*cr;
1923 	gid_t			gid;
1924 
1925 	ASSERT(token);
1926 	ASSERT(token->tkn_posix_grps);
1927 	posix_grps = token->tkn_posix_grps;
1928 
1929 	ASSERT(privileges);
1930 
1931 	cr = crget();
1932 	ASSERT(cr != NULL);
1933 
1934 	if (!IDMAP_ID_IS_EPHEMERAL(token->tkn_user.i_id) &&
1935 	    (posix_grps->pg_ngrps != 0)) {
1936 		gid = posix_grps->pg_grps[0];
1937 	} else {
1938 		gid = token->tkn_primary_grp.i_id;
1939 	}
1940 
1941 	if (crsetugid(cr, token->tkn_user.i_id, gid) != 0) {
1942 		crfree(cr);
1943 		return (NULL);
1944 	}
1945 
1946 	if (crsetgroups(cr, posix_grps->pg_ngrps, posix_grps->pg_grps) != 0) {
1947 		crfree(cr);
1948 		return (NULL);
1949 	}
1950 
1951 	smb_cred_set_sid(&token->tkn_user, &ksid);
1952 	crsetsid(cr, &ksid, KSID_USER);
1953 	smb_cred_set_sid(&token->tkn_primary_grp, &ksid);
1954 	crsetsid(cr, &ksid, KSID_GROUP);
1955 	smb_cred_set_sid(&token->tkn_owner, &ksid);
1956 	crsetsid(cr, &ksid, KSID_OWNER);
1957 	ksidlist = smb_cred_set_sidlist(&token->tkn_win_grps);
1958 	crsetsidlist(cr, ksidlist);
1959 
1960 	*privileges = 0;
1961 
1962 	if (smb_token_query_privilege(token, SE_BACKUP_LUID))
1963 		*privileges |= SMB_USER_PRIV_BACKUP;
1964 
1965 	if (smb_token_query_privilege(token, SE_RESTORE_LUID))
1966 		*privileges |= SMB_USER_PRIV_RESTORE;
1967 
1968 	if (smb_token_query_privilege(token, SE_TAKE_OWNERSHIP_LUID)) {
1969 		*privileges |= SMB_USER_PRIV_TAKE_OWNERSHIP;
1970 		(void) crsetpriv(cr, PRIV_FILE_CHOWN, NULL);
1971 	}
1972 
1973 	if (smb_token_query_privilege(token, SE_SECURITY_LUID))
1974 		*privileges |= SMB_USER_PRIV_SECURITY;
1975 
1976 	return (cr);
1977 }
1978 
1979 /*
1980  * smb_cred_rele
1981  *
1982  * The reference count of the user's credential will get decremented if it
1983  * is non-zero. Otherwise, the credential will be freed.
1984  */
1985 void
1986 smb_cred_rele(cred_t *cr)
1987 {
1988 	ASSERT(cr);
1989 	crfree(cr);
1990 }
1991 
1992 /*
1993  * smb_cred_is_member
1994  *
1995  * Same as smb_token_is_member. The only difference is that
1996  * we compare the given SID against user SID and the ksidlist
1997  * of the user's cred.
1998  */
1999 int
2000 smb_cred_is_member(cred_t *cr, smb_sid_t *sid)
2001 {
2002 	ksidlist_t *ksidlist;
2003 	ksid_t ksid1, *ksid2;
2004 	smb_id_t id;
2005 	int i, rc = 0;
2006 
2007 	ASSERT(cr);
2008 
2009 	bzero(&id, sizeof (smb_id_t));
2010 	id.i_sid = sid;
2011 	smb_cred_set_sid(&id, &ksid1);
2012 
2013 	ksidlist = crgetsidlist(cr);
2014 	ASSERT(ksidlist);
2015 	ASSERT(ksid1.ks_domain);
2016 	ASSERT(ksid1.ks_domain->kd_name);
2017 
2018 	i = 0;
2019 	ksid2 = crgetsid(cr, KSID_USER);
2020 	do {
2021 		ASSERT(ksid2->ks_domain);
2022 		ASSERT(ksid2->ks_domain->kd_name);
2023 
2024 		if (strcmp(ksid1.ks_domain->kd_name,
2025 		    ksid2->ks_domain->kd_name) == 0 &&
2026 		    ksid1.ks_rid == ksid2->ks_rid) {
2027 			rc = 1;
2028 			break;
2029 		}
2030 
2031 		ksid2 = &ksidlist->ksl_sids[i];
2032 	} while (i++ < ksidlist->ksl_nsid);
2033 
2034 	ksid_rele(&ksid1);
2035 	return (rc);
2036 }
2037 
2038 /*
2039  * smb_cred_create_privs
2040  *
2041  * Creates a duplicate credential that contains system privileges for
2042  * certain SMB privileges: Backup and Restore.
2043  *
2044  */
2045 cred_t *
2046 smb_cred_create_privs(cred_t *user_cr, uint32_t privileges)
2047 {
2048 	cred_t *cr = NULL;
2049 
2050 	ASSERT(user_cr != NULL);
2051 
2052 	if (privileges & (SMB_USER_PRIV_BACKUP | SMB_USER_PRIV_RESTORE))
2053 		cr = crdup(user_cr);
2054 
2055 	if (cr == NULL)
2056 		return (NULL);
2057 
2058 	if (privileges & SMB_USER_PRIV_BACKUP) {
2059 		(void) crsetpriv(cr, PRIV_FILE_DAC_READ,
2060 		    PRIV_FILE_DAC_SEARCH, PRIV_SYS_MOUNT, NULL);
2061 	}
2062 
2063 	if (privileges & SMB_USER_PRIV_RESTORE) {
2064 		(void) crsetpriv(cr, PRIV_FILE_DAC_WRITE,
2065 		    PRIV_FILE_CHOWN, PRIV_FILE_CHOWN_SELF,
2066 		    PRIV_FILE_DAC_SEARCH, PRIV_FILE_LINK_ANY,
2067 		    PRIV_FILE_OWNER, PRIV_FILE_SETID, PRIV_SYS_LINKDIR,
2068 		    PRIV_SYS_MOUNT, NULL);
2069 	}
2070 
2071 	return (cr);
2072 }
2073 
2074 /*
2075  * smb_pad_align
2076  *
2077  * Returns the number of bytes required to pad an offset to the
2078  * specified alignment.
2079  */
2080 uint32_t
2081 smb_pad_align(uint32_t offset, uint32_t align)
2082 {
2083 	uint32_t pad = offset % align;
2084 
2085 	if (pad != 0)
2086 		pad = align - pad;
2087 
2088 	return (pad);
2089 }
2090 
2091 /*
2092  * smb_panic
2093  *
2094  * Logs the file name, function name and line number passed in and panics the
2095  * system.
2096  */
2097 void
2098 smb_panic(char *file, const char *func, int line)
2099 {
2100 	cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
2101 }
2102