xref: /titanic_50/usr/src/uts/common/fs/smbsrv/smb_kutil.c (revision 1e67f0f0096be223aa7f51802953bebd95866ddc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/param.h>
27 #include <sys/types.h>
28 #include <sys/tzfile.h>
29 #include <sys/atomic.h>
30 #include <sys/kidmap.h>
31 #include <sys/time.h>
32 #include <sys/cpuvar.h>
33 #include <smbsrv/smb_kproto.h>
34 #include <smbsrv/smb_fsops.h>
35 #include <smbsrv/smbinfo.h>
36 #include <smbsrv/smb_xdr.h>
37 #include <smbsrv/smb_vops.h>
38 #include <smbsrv/smb_idmap.h>
39 
40 #include <sys/sid.h>
41 #include <sys/priv_names.h>
42 
43 static kmem_cache_t	*smb_dtor_cache;
44 static boolean_t	smb_llist_initialized = B_FALSE;
45 
46 static void smb_llist_flush(smb_llist_t *);
47 static boolean_t smb_thread_continue_timedwait_locked(smb_thread_t *, int);
48 
49 time_t tzh_leapcnt = 0;
50 
51 struct tm
52 *smb_gmtime_r(time_t *clock, struct tm *result);
53 
54 time_t
55 smb_timegm(struct tm *tm);
56 
57 struct	tm {
58 	int	tm_sec;
59 	int	tm_min;
60 	int	tm_hour;
61 	int	tm_mday;
62 	int	tm_mon;
63 	int	tm_year;
64 	int	tm_wday;
65 	int	tm_yday;
66 	int	tm_isdst;
67 };
68 
69 static int days_in_month[] = {
70 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
71 };
72 
73 int
74 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
75 {
76 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
77 		return (smb_wcequiv_strlen(str));
78 	return (strlen(str));
79 }
80 
81 int
82 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
83 {
84 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
85 		return (smb_wcequiv_strlen(str) + 2);
86 	return (strlen(str) + 1);
87 }
88 
89 int
90 smb_ascii_or_unicode_null_len(struct smb_request *sr)
91 {
92 	if (sr->smb_flg2 & SMB_FLAGS2_UNICODE)
93 		return (2);
94 	return (1);
95 }
96 
97 /*
98  * Return B_TRUE if pattern contains wildcards
99  */
100 boolean_t
101 smb_contains_wildcards(const char *pattern)
102 {
103 	static const char *wildcards = "*?";
104 
105 	return (strpbrk(pattern, wildcards) != NULL);
106 }
107 
108 /*
109  * When converting wildcards a '.' in a name is treated as a base and
110  * extension separator even if the name is longer than 8.3.
111  *
112  * The '*' character matches an entire part of the name.  For example,
113  * "*.abc" matches any name with an extension of "abc".
114  *
115  * The '?' character matches a single character.
116  * If the base contains all ? (8 or more) then it is treated as *.
117  * If the extension contains all ? (3 or more) then it is treated as *.
118  *
119  * Clients convert ASCII wildcards to Unicode wildcards as follows:
120  *
121  *	? is converted to >
122  *	. is converted to " if it is followed by ? or *
123  *	* is converted to < if it is followed by .
124  *
125  * Note that clients convert "*." to '< and drop the '.' but "*.txt"
126  * is sent as "<.TXT", i.e.
127  *
128  * 	dir *.		->	dir <
129  * 	dir *.txt	->	dir <.TXT
130  *
131  * Since " and < are illegal in Windows file names, we always convert
132  * these Unicode wildcards without checking the following character.
133  */
134 void
135 smb_convert_wildcards(char *pattern)
136 {
137 	static char *match_all[] = {
138 		"*.",
139 		"*.*"
140 	};
141 	char	*extension;
142 	char	*p;
143 	int	len;
144 	int	i;
145 
146 	/*
147 	 * Special case "<" for "dir *.", and fast-track for "*".
148 	 */
149 	if ((*pattern == '<') || (*pattern == '*')) {
150 		if (*(pattern + 1) == '\0') {
151 			*pattern = '*';
152 			return;
153 		}
154 	}
155 
156 	for (p = pattern; *p != '\0'; ++p) {
157 		switch (*p) {
158 		case '<':
159 			*p = '*';
160 			break;
161 		case '>':
162 			*p = '?';
163 			break;
164 		case '\"':
165 			*p = '.';
166 			break;
167 		default:
168 			break;
169 		}
170 	}
171 
172 	/*
173 	 * Replace "????????.ext" with "*.ext".
174 	 */
175 	p = pattern;
176 	p += strspn(p, "?");
177 	if (*p == '.') {
178 		*p = '\0';
179 		len = strlen(pattern);
180 		*p = '.';
181 		if (len >= SMB_NAME83_BASELEN) {
182 			*pattern = '*';
183 			(void) strlcpy(pattern + 1, p, MAXPATHLEN - 1);
184 		}
185 	}
186 
187 	/*
188 	 * Replace "base.???" with 'base.*'.
189 	 */
190 	if ((extension = strrchr(pattern, '.')) != NULL) {
191 		p = ++extension;
192 		p += strspn(p, "?");
193 		if (*p == '\0') {
194 			len = strlen(extension);
195 			if (len >= SMB_NAME83_EXTLEN) {
196 				*extension = '\0';
197 				(void) strlcat(pattern, "*", MAXPATHLEN);
198 			}
199 		}
200 	}
201 
202 	/*
203 	 * Replace anything that matches an entry in match_all with "*".
204 	 */
205 	for (i = 0; i < sizeof (match_all) / sizeof (match_all[0]); ++i) {
206 		if (strcmp(pattern, match_all[i]) == 0) {
207 			(void) strlcpy(pattern, "*", MAXPATHLEN);
208 			break;
209 		}
210 	}
211 }
212 
213 /*
214  * smb_sattr_check
215  *
216  * Check file attributes against a search attribute (sattr) mask.
217  *
218  * Normal files, which includes READONLY and ARCHIVE, always pass
219  * this check.  If the DIRECTORY, HIDDEN or SYSTEM special attributes
220  * are set then they must appear in the search mask.  The special
221  * attributes are inclusive, i.e. all special attributes that appear
222  * in sattr must also appear in the file attributes for the check to
223  * pass.
224  *
225  * The following examples show how this works:
226  *
227  *		fileA:	READONLY
228  *		fileB:	0 (no attributes = normal file)
229  *		fileC:	READONLY, ARCHIVE
230  *		fileD:	HIDDEN
231  *		fileE:	READONLY, HIDDEN, SYSTEM
232  *		dirA:	DIRECTORY
233  *
234  * search attribute: 0
235  *		Returns: fileA, fileB and fileC.
236  * search attribute: HIDDEN
237  *		Returns: fileA, fileB, fileC and fileD.
238  * search attribute: SYSTEM
239  *		Returns: fileA, fileB and fileC.
240  * search attribute: DIRECTORY
241  *		Returns: fileA, fileB, fileC and dirA.
242  * search attribute: HIDDEN and SYSTEM
243  *		Returns: fileA, fileB, fileC, fileD and fileE.
244  *
245  * Returns true if the file and sattr match; otherwise, returns false.
246  */
247 boolean_t
248 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
249 {
250 	if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
251 	    !(sattr & FILE_ATTRIBUTE_DIRECTORY))
252 		return (B_FALSE);
253 
254 	if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
255 	    !(sattr & FILE_ATTRIBUTE_HIDDEN))
256 		return (B_FALSE);
257 
258 	if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
259 	    !(sattr & FILE_ATTRIBUTE_SYSTEM))
260 		return (B_FALSE);
261 
262 	return (B_TRUE);
263 }
264 
265 int
266 microtime(timestruc_t *tvp)
267 {
268 	tvp->tv_sec = gethrestime_sec();
269 	tvp->tv_nsec = 0;
270 	return (0);
271 }
272 
273 int32_t
274 clock_get_milli_uptime()
275 {
276 	return (TICK_TO_MSEC(ddi_get_lbolt()));
277 }
278 
279 int /*ARGSUSED*/
280 smb_noop(void *p, size_t size, int foo)
281 {
282 	return (0);
283 }
284 
285 /*
286  * smb_idpool_increment
287  *
288  * This function increments the ID pool by doubling the current size. This
289  * function assumes the caller entered the mutex of the pool.
290  */
291 static int
292 smb_idpool_increment(
293     smb_idpool_t	*pool)
294 {
295 	uint8_t		*new_pool;
296 	uint32_t	new_size;
297 
298 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
299 
300 	new_size = pool->id_size * 2;
301 	if (new_size <= SMB_IDPOOL_MAX_SIZE) {
302 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
303 		if (new_pool) {
304 			bzero(new_pool, new_size / 8);
305 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
306 			kmem_free(pool->id_pool, pool->id_size / 8);
307 			pool->id_pool = new_pool;
308 			pool->id_free_counter += new_size - pool->id_size;
309 			pool->id_max_free_counter += new_size - pool->id_size;
310 			pool->id_size = new_size;
311 			pool->id_idx_msk = (new_size / 8) - 1;
312 			if (new_size >= SMB_IDPOOL_MAX_SIZE) {
313 				/* id -1 made unavailable */
314 				pool->id_pool[pool->id_idx_msk] = 0x80;
315 				pool->id_free_counter--;
316 				pool->id_max_free_counter--;
317 			}
318 			return (0);
319 		}
320 	}
321 	return (-1);
322 }
323 
324 /*
325  * smb_idpool_constructor
326  *
327  * This function initializes the pool structure provided.
328  */
329 int
330 smb_idpool_constructor(
331     smb_idpool_t	*pool)
332 {
333 
334 	ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
335 
336 	pool->id_size = SMB_IDPOOL_MIN_SIZE;
337 	pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
338 	pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
339 	pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
340 	pool->id_bit = 0x02;
341 	pool->id_bit_idx = 1;
342 	pool->id_idx = 0;
343 	pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
344 	    KM_SLEEP);
345 	bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
346 	/* -1 id made unavailable */
347 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
348 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
349 	pool->id_magic = SMB_IDPOOL_MAGIC;
350 	return (0);
351 }
352 
353 /*
354  * smb_idpool_destructor
355  *
356  * This function tears down and frees the resources associated with the
357  * pool provided.
358  */
359 void
360 smb_idpool_destructor(
361     smb_idpool_t	*pool)
362 {
363 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
364 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
365 	pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
366 	mutex_destroy(&pool->id_mutex);
367 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
368 }
369 
370 /*
371  * smb_idpool_alloc
372  *
373  * This function allocates an ID from the pool provided.
374  */
375 int
376 smb_idpool_alloc(
377     smb_idpool_t	*pool,
378     uint16_t		*id)
379 {
380 	uint32_t	i;
381 	uint8_t		bit;
382 	uint8_t		bit_idx;
383 	uint8_t		byte;
384 
385 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
386 
387 	mutex_enter(&pool->id_mutex);
388 	if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
389 		mutex_exit(&pool->id_mutex);
390 		return (-1);
391 	}
392 
393 	i = pool->id_size;
394 	while (i) {
395 		bit = pool->id_bit;
396 		bit_idx = pool->id_bit_idx;
397 		byte = pool->id_pool[pool->id_idx];
398 		while (bit) {
399 			if (byte & bit) {
400 				bit = bit << 1;
401 				bit_idx++;
402 				continue;
403 			}
404 			pool->id_pool[pool->id_idx] |= bit;
405 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
406 			pool->id_free_counter--;
407 			pool->id_bit = bit;
408 			pool->id_bit_idx = bit_idx;
409 			mutex_exit(&pool->id_mutex);
410 			return (0);
411 		}
412 		pool->id_bit = 1;
413 		pool->id_bit_idx = 0;
414 		pool->id_idx++;
415 		pool->id_idx &= pool->id_idx_msk;
416 		--i;
417 	}
418 	/*
419 	 * This section of code shouldn't be reached. If there are IDs
420 	 * available and none could be found there's a problem.
421 	 */
422 	ASSERT(0);
423 	mutex_exit(&pool->id_mutex);
424 	return (-1);
425 }
426 
427 /*
428  * smb_idpool_free
429  *
430  * This function frees the ID provided.
431  */
432 void
433 smb_idpool_free(
434     smb_idpool_t	*pool,
435     uint16_t		id)
436 {
437 	ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
438 	ASSERT(id != 0);
439 	ASSERT(id != 0xFFFF);
440 
441 	mutex_enter(&pool->id_mutex);
442 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
443 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
444 		pool->id_free_counter++;
445 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
446 		mutex_exit(&pool->id_mutex);
447 		return;
448 	}
449 	/* Freeing a free ID. */
450 	ASSERT(0);
451 	mutex_exit(&pool->id_mutex);
452 }
453 
454 /*
455  * Initialize the llist delete queue object cache.
456  */
457 void
458 smb_llist_init(void)
459 {
460 	if (smb_llist_initialized)
461 		return;
462 
463 	smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
464 	    sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
465 
466 	smb_llist_initialized = B_TRUE;
467 }
468 
469 /*
470  * Destroy the llist delete queue object cache.
471  */
472 void
473 smb_llist_fini(void)
474 {
475 	if (!smb_llist_initialized)
476 		return;
477 
478 	kmem_cache_destroy(smb_dtor_cache);
479 	smb_llist_initialized = B_FALSE;
480 }
481 
482 /*
483  * smb_llist_constructor
484  *
485  * This function initializes a locked list.
486  */
487 void
488 smb_llist_constructor(
489     smb_llist_t	*ll,
490     size_t	size,
491     size_t	offset)
492 {
493 	rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
494 	mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
495 	list_create(&ll->ll_list, size, offset);
496 	list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
497 	    offsetof(smb_dtor_t, dt_lnd));
498 	ll->ll_count = 0;
499 	ll->ll_wrop = 0;
500 	ll->ll_deleteq_count = 0;
501 }
502 
503 /*
504  * Flush the delete queue and destroy a locked list.
505  */
506 void
507 smb_llist_destructor(
508     smb_llist_t	*ll)
509 {
510 	smb_llist_flush(ll);
511 
512 	ASSERT(ll->ll_count == 0);
513 	ASSERT(ll->ll_deleteq_count == 0);
514 
515 	rw_destroy(&ll->ll_lock);
516 	list_destroy(&ll->ll_list);
517 	list_destroy(&ll->ll_deleteq);
518 	mutex_destroy(&ll->ll_mutex);
519 }
520 
521 /*
522  * Post an object to the delete queue.  The delete queue will be processed
523  * during list exit or list destruction.  Objects are often posted for
524  * deletion during list iteration (while the list is locked) but that is
525  * not required, and an object can be posted at any time.
526  */
527 void
528 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
529 {
530 	smb_dtor_t	*dtor;
531 
532 	ASSERT((object != NULL) && (dtorproc != NULL));
533 
534 	dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
535 	bzero(dtor, sizeof (smb_dtor_t));
536 	dtor->dt_magic = SMB_DTOR_MAGIC;
537 	dtor->dt_object = object;
538 	dtor->dt_proc = dtorproc;
539 
540 	mutex_enter(&ll->ll_mutex);
541 	list_insert_tail(&ll->ll_deleteq, dtor);
542 	++ll->ll_deleteq_count;
543 	mutex_exit(&ll->ll_mutex);
544 }
545 
546 /*
547  * Exit the list lock and process the delete queue.
548  */
549 void
550 smb_llist_exit(smb_llist_t *ll)
551 {
552 	rw_exit(&ll->ll_lock);
553 	smb_llist_flush(ll);
554 }
555 
556 /*
557  * Flush the list delete queue.  The mutex is dropped across the destructor
558  * call in case this leads to additional objects being posted to the delete
559  * queue.
560  */
561 static void
562 smb_llist_flush(smb_llist_t *ll)
563 {
564 	smb_dtor_t    *dtor;
565 
566 	mutex_enter(&ll->ll_mutex);
567 
568 	dtor = list_head(&ll->ll_deleteq);
569 	while (dtor != NULL) {
570 		SMB_DTOR_VALID(dtor);
571 		ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
572 		list_remove(&ll->ll_deleteq, dtor);
573 		--ll->ll_deleteq_count;
574 		mutex_exit(&ll->ll_mutex);
575 
576 		dtor->dt_proc(dtor->dt_object);
577 
578 		dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
579 		kmem_cache_free(smb_dtor_cache, dtor);
580 		mutex_enter(&ll->ll_mutex);
581 		dtor = list_head(&ll->ll_deleteq);
582 	}
583 
584 	mutex_exit(&ll->ll_mutex);
585 }
586 
587 /*
588  * smb_llist_upgrade
589  *
590  * This function tries to upgrade the lock of the locked list. It assumes the
591  * locked has already been entered in RW_READER mode. It first tries using the
592  * Solaris function rw_tryupgrade(). If that call fails the lock is released
593  * and reentered in RW_WRITER mode. In that last case a window is opened during
594  * which the contents of the list may have changed. The return code indicates
595  * whether or not the list was modified when the lock was exited.
596  */
597 int smb_llist_upgrade(
598     smb_llist_t *ll)
599 {
600 	uint64_t	wrop;
601 
602 	if (rw_tryupgrade(&ll->ll_lock) != 0) {
603 		return (0);
604 	}
605 	wrop = ll->ll_wrop;
606 	rw_exit(&ll->ll_lock);
607 	rw_enter(&ll->ll_lock, RW_WRITER);
608 	return (wrop != ll->ll_wrop);
609 }
610 
611 /*
612  * smb_llist_insert_head
613  *
614  * This function inserts the object passed a the beginning of the list. This
615  * function assumes the lock of the list has already been entered.
616  */
617 void
618 smb_llist_insert_head(
619     smb_llist_t	*ll,
620     void	*obj)
621 {
622 	list_insert_head(&ll->ll_list, obj);
623 	++ll->ll_wrop;
624 	++ll->ll_count;
625 }
626 
627 /*
628  * smb_llist_insert_tail
629  *
630  * This function appends to the object passed to the list. This function assumes
631  * the lock of the list has already been entered.
632  *
633  */
634 void
635 smb_llist_insert_tail(
636     smb_llist_t	*ll,
637     void	*obj)
638 {
639 	list_insert_tail(&ll->ll_list, obj);
640 	++ll->ll_wrop;
641 	++ll->ll_count;
642 }
643 
644 /*
645  * smb_llist_remove
646  *
647  * This function removes the object passed from the list. This function assumes
648  * the lock of the list has already been entered.
649  */
650 void
651 smb_llist_remove(
652     smb_llist_t	*ll,
653     void	*obj)
654 {
655 	list_remove(&ll->ll_list, obj);
656 	++ll->ll_wrop;
657 	--ll->ll_count;
658 }
659 
660 /*
661  * smb_llist_get_count
662  *
663  * This function returns the number of elements in the specified list.
664  */
665 uint32_t
666 smb_llist_get_count(
667     smb_llist_t *ll)
668 {
669 	return (ll->ll_count);
670 }
671 
672 /*
673  * smb_slist_constructor
674  *
675  * Synchronized list constructor.
676  */
677 void
678 smb_slist_constructor(
679     smb_slist_t	*sl,
680     size_t	size,
681     size_t	offset)
682 {
683 	mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
684 	cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
685 	list_create(&sl->sl_list, size, offset);
686 	sl->sl_count = 0;
687 	sl->sl_waiting = B_FALSE;
688 }
689 
690 /*
691  * smb_slist_destructor
692  *
693  * Synchronized list destructor.
694  */
695 void
696 smb_slist_destructor(
697     smb_slist_t	*sl)
698 {
699 	ASSERT(sl->sl_count == 0);
700 
701 	mutex_destroy(&sl->sl_mutex);
702 	cv_destroy(&sl->sl_cv);
703 	list_destroy(&sl->sl_list);
704 }
705 
706 /*
707  * smb_slist_insert_head
708  *
709  * This function inserts the object passed a the beginning of the list.
710  */
711 void
712 smb_slist_insert_head(
713     smb_slist_t	*sl,
714     void	*obj)
715 {
716 	mutex_enter(&sl->sl_mutex);
717 	list_insert_head(&sl->sl_list, obj);
718 	++sl->sl_count;
719 	mutex_exit(&sl->sl_mutex);
720 }
721 
722 /*
723  * smb_slist_insert_tail
724  *
725  * This function appends the object passed to the list.
726  */
727 void
728 smb_slist_insert_tail(
729     smb_slist_t	*sl,
730     void	*obj)
731 {
732 	mutex_enter(&sl->sl_mutex);
733 	list_insert_tail(&sl->sl_list, obj);
734 	++sl->sl_count;
735 	mutex_exit(&sl->sl_mutex);
736 }
737 
738 /*
739  * smb_llist_remove
740  *
741  * This function removes the object passed by the caller from the list.
742  */
743 void
744 smb_slist_remove(
745     smb_slist_t	*sl,
746     void	*obj)
747 {
748 	mutex_enter(&sl->sl_mutex);
749 	list_remove(&sl->sl_list, obj);
750 	if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
751 		sl->sl_waiting = B_FALSE;
752 		cv_broadcast(&sl->sl_cv);
753 	}
754 	mutex_exit(&sl->sl_mutex);
755 }
756 
757 /*
758  * smb_slist_move_tail
759  *
760  * This function transfers all the contents of the synchronized list to the
761  * list_t provided. It returns the number of objects transferred.
762  */
763 uint32_t
764 smb_slist_move_tail(
765     list_t	*lst,
766     smb_slist_t	*sl)
767 {
768 	uint32_t	rv;
769 
770 	mutex_enter(&sl->sl_mutex);
771 	rv = sl->sl_count;
772 	if (sl->sl_count) {
773 		list_move_tail(lst, &sl->sl_list);
774 		sl->sl_count = 0;
775 		if (sl->sl_waiting) {
776 			sl->sl_waiting = B_FALSE;
777 			cv_broadcast(&sl->sl_cv);
778 		}
779 	}
780 	mutex_exit(&sl->sl_mutex);
781 	return (rv);
782 }
783 
784 /*
785  * smb_slist_obj_move
786  *
787  * This function moves an object from one list to the end of the other list. It
788  * assumes the mutex of each list has been entered.
789  */
790 void
791 smb_slist_obj_move(
792     smb_slist_t	*dst,
793     smb_slist_t	*src,
794     void	*obj)
795 {
796 	ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
797 	ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
798 
799 	list_remove(&src->sl_list, obj);
800 	list_insert_tail(&dst->sl_list, obj);
801 	dst->sl_count++;
802 	src->sl_count--;
803 	if ((src->sl_count == 0) && (src->sl_waiting)) {
804 		src->sl_waiting = B_FALSE;
805 		cv_broadcast(&src->sl_cv);
806 	}
807 }
808 
809 /*
810  * smb_slist_wait_for_empty
811  *
812  * This function waits for a list to be emptied.
813  */
814 void
815 smb_slist_wait_for_empty(
816     smb_slist_t	*sl)
817 {
818 	mutex_enter(&sl->sl_mutex);
819 	while (sl->sl_count) {
820 		sl->sl_waiting = B_TRUE;
821 		cv_wait(&sl->sl_cv, &sl->sl_mutex);
822 	}
823 	mutex_exit(&sl->sl_mutex);
824 }
825 
826 /*
827  * smb_slist_exit
828  *
829  * This function exits the muetx of the list and signal the condition variable
830  * if the list is empty.
831  */
832 void
833 smb_slist_exit(smb_slist_t *sl)
834 {
835 	if ((sl->sl_count == 0) && (sl->sl_waiting)) {
836 		sl->sl_waiting = B_FALSE;
837 		cv_broadcast(&sl->sl_cv);
838 	}
839 	mutex_exit(&sl->sl_mutex);
840 }
841 
842 /*
843  * smb_thread_entry_point
844  *
845  * Common entry point for all the threads created through smb_thread_start.
846  * The state of the thread is set to "running" at the beginning and moved to
847  * "exiting" just before calling thread_exit(). The condition variable is
848  *  also signaled.
849  */
850 static void
851 smb_thread_entry_point(
852     smb_thread_t	*thread)
853 {
854 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
855 	mutex_enter(&thread->sth_mtx);
856 	ASSERT(thread->sth_state == SMB_THREAD_STATE_STARTING);
857 	thread->sth_th = curthread;
858 	thread->sth_did = thread->sth_th->t_did;
859 
860 	if (!thread->sth_kill) {
861 		thread->sth_state = SMB_THREAD_STATE_RUNNING;
862 		cv_signal(&thread->sth_cv);
863 		mutex_exit(&thread->sth_mtx);
864 		thread->sth_ep(thread, thread->sth_ep_arg);
865 		mutex_enter(&thread->sth_mtx);
866 	}
867 	thread->sth_th = NULL;
868 	thread->sth_state = SMB_THREAD_STATE_EXITING;
869 	cv_broadcast(&thread->sth_cv);
870 	mutex_exit(&thread->sth_mtx);
871 	thread_exit();
872 }
873 
874 /*
875  * smb_thread_init
876  */
877 void
878 smb_thread_init(
879     smb_thread_t	*thread,
880     char		*name,
881     smb_thread_ep_t	ep,
882     void		*ep_arg,
883     smb_thread_aw_t	aw,
884     void		*aw_arg)
885 {
886 	ASSERT(thread->sth_magic != SMB_THREAD_MAGIC);
887 
888 	bzero(thread, sizeof (*thread));
889 
890 	(void) strlcpy(thread->sth_name, name, sizeof (thread->sth_name));
891 	thread->sth_ep = ep;
892 	thread->sth_ep_arg = ep_arg;
893 	thread->sth_aw = aw;
894 	thread->sth_aw_arg = aw_arg;
895 	thread->sth_state = SMB_THREAD_STATE_EXITED;
896 	mutex_init(&thread->sth_mtx, NULL, MUTEX_DEFAULT, NULL);
897 	cv_init(&thread->sth_cv, NULL, CV_DEFAULT, NULL);
898 	thread->sth_magic = SMB_THREAD_MAGIC;
899 }
900 
901 /*
902  * smb_thread_destroy
903  */
904 void
905 smb_thread_destroy(
906     smb_thread_t	*thread)
907 {
908 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
909 	ASSERT(thread->sth_state == SMB_THREAD_STATE_EXITED);
910 	thread->sth_magic = 0;
911 	mutex_destroy(&thread->sth_mtx);
912 	cv_destroy(&thread->sth_cv);
913 }
914 
915 /*
916  * smb_thread_start
917  *
918  * This function starts a thread with the parameters provided. It waits until
919  * the state of the thread has been moved to running.
920  */
921 /*ARGSUSED*/
922 int
923 smb_thread_start(
924     smb_thread_t	*thread)
925 {
926 	int		rc = 0;
927 	kthread_t	*tmpthread;
928 
929 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
930 
931 	mutex_enter(&thread->sth_mtx);
932 	switch (thread->sth_state) {
933 	case SMB_THREAD_STATE_EXITED:
934 		thread->sth_state = SMB_THREAD_STATE_STARTING;
935 		mutex_exit(&thread->sth_mtx);
936 		tmpthread = thread_create(NULL, 0, smb_thread_entry_point,
937 		    thread, 0, &p0, TS_RUN, minclsyspri);
938 		ASSERT(tmpthread != NULL);
939 		mutex_enter(&thread->sth_mtx);
940 		while (thread->sth_state == SMB_THREAD_STATE_STARTING)
941 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
942 		if (thread->sth_state != SMB_THREAD_STATE_RUNNING)
943 			rc = -1;
944 		break;
945 	default:
946 		ASSERT(0);
947 		rc = -1;
948 		break;
949 	}
950 	mutex_exit(&thread->sth_mtx);
951 	return (rc);
952 }
953 
954 /*
955  * smb_thread_stop
956  *
957  * This function signals a thread to kill itself and waits until the "exiting"
958  * state has been reached.
959  */
960 void
961 smb_thread_stop(
962     smb_thread_t	*thread)
963 {
964 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
965 
966 	mutex_enter(&thread->sth_mtx);
967 	switch (thread->sth_state) {
968 	case SMB_THREAD_STATE_RUNNING:
969 	case SMB_THREAD_STATE_STARTING:
970 		if (!thread->sth_kill) {
971 			thread->sth_kill = B_TRUE;
972 			if (thread->sth_aw)
973 				thread->sth_aw(thread, thread->sth_aw_arg);
974 			cv_broadcast(&thread->sth_cv);
975 			while (thread->sth_state != SMB_THREAD_STATE_EXITING)
976 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
977 			mutex_exit(&thread->sth_mtx);
978 			thread_join(thread->sth_did);
979 			mutex_enter(&thread->sth_mtx);
980 			thread->sth_state = SMB_THREAD_STATE_EXITED;
981 			thread->sth_did = 0;
982 			thread->sth_kill = B_FALSE;
983 			cv_broadcast(&thread->sth_cv);
984 			break;
985 		}
986 		/*FALLTHRU*/
987 
988 	case SMB_THREAD_STATE_EXITING:
989 		if (thread->sth_kill) {
990 			while (thread->sth_state != SMB_THREAD_STATE_EXITED)
991 				cv_wait(&thread->sth_cv, &thread->sth_mtx);
992 		} else {
993 			thread->sth_state = SMB_THREAD_STATE_EXITED;
994 			thread->sth_did = 0;
995 		}
996 		break;
997 
998 	case SMB_THREAD_STATE_EXITED:
999 		break;
1000 
1001 	default:
1002 		ASSERT(0);
1003 		break;
1004 	}
1005 	mutex_exit(&thread->sth_mtx);
1006 }
1007 
1008 /*
1009  * smb_thread_signal
1010  *
1011  * This function signals a thread.
1012  */
1013 void
1014 smb_thread_signal(
1015     smb_thread_t	*thread)
1016 {
1017 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1018 
1019 	mutex_enter(&thread->sth_mtx);
1020 	switch (thread->sth_state) {
1021 	case SMB_THREAD_STATE_RUNNING:
1022 		if (thread->sth_aw)
1023 			thread->sth_aw(thread, thread->sth_aw_arg);
1024 		cv_signal(&thread->sth_cv);
1025 		break;
1026 
1027 	default:
1028 		break;
1029 	}
1030 	mutex_exit(&thread->sth_mtx);
1031 }
1032 
1033 boolean_t
1034 smb_thread_continue(smb_thread_t *thread)
1035 {
1036 	boolean_t result;
1037 
1038 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1039 
1040 	mutex_enter(&thread->sth_mtx);
1041 	result = smb_thread_continue_timedwait_locked(thread, 0);
1042 	mutex_exit(&thread->sth_mtx);
1043 
1044 	return (result);
1045 }
1046 
1047 boolean_t
1048 smb_thread_continue_nowait(smb_thread_t *thread)
1049 {
1050 	boolean_t result;
1051 
1052 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1053 
1054 	mutex_enter(&thread->sth_mtx);
1055 	/*
1056 	 * Setting ticks=-1 requests a non-blocking check.  We will
1057 	 * still block if the thread is in "suspend" state.
1058 	 */
1059 	result = smb_thread_continue_timedwait_locked(thread, -1);
1060 	mutex_exit(&thread->sth_mtx);
1061 
1062 	return (result);
1063 }
1064 
1065 boolean_t
1066 smb_thread_continue_timedwait(smb_thread_t *thread, int seconds)
1067 {
1068 	boolean_t result;
1069 
1070 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1071 
1072 	mutex_enter(&thread->sth_mtx);
1073 	result = smb_thread_continue_timedwait_locked(thread,
1074 	    SEC_TO_TICK(seconds));
1075 	mutex_exit(&thread->sth_mtx);
1076 
1077 	return (result);
1078 }
1079 
1080 /*
1081  * smb_thread_continue_timedwait_locked
1082  *
1083  * Internal only.  Ticks==-1 means don't block, Ticks == 0 means wait
1084  * indefinitely
1085  */
1086 static boolean_t
1087 smb_thread_continue_timedwait_locked(smb_thread_t *thread, int ticks)
1088 {
1089 	boolean_t	result;
1090 
1091 	/* -1 means don't block */
1092 	if (ticks != -1 && !thread->sth_kill) {
1093 		if (ticks == 0) {
1094 			cv_wait(&thread->sth_cv, &thread->sth_mtx);
1095 		} else {
1096 			(void) cv_reltimedwait(&thread->sth_cv,
1097 			    &thread->sth_mtx, (clock_t)ticks, TR_CLOCK_TICK);
1098 		}
1099 	}
1100 	result = (thread->sth_kill == 0);
1101 
1102 	return (result);
1103 }
1104 
1105 void
1106 smb_thread_set_awaken(smb_thread_t *thread, smb_thread_aw_t new_aw_fn,
1107     void *new_aw_arg)
1108 {
1109 	ASSERT(thread->sth_magic == SMB_THREAD_MAGIC);
1110 
1111 	mutex_enter(&thread->sth_mtx);
1112 	thread->sth_aw = new_aw_fn;
1113 	thread->sth_aw_arg = new_aw_arg;
1114 	mutex_exit(&thread->sth_mtx);
1115 }
1116 
1117 /*
1118  * smb_rwx_init
1119  */
1120 void
1121 smb_rwx_init(
1122     smb_rwx_t	*rwx)
1123 {
1124 	bzero(rwx, sizeof (smb_rwx_t));
1125 	cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
1126 	mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
1127 	rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
1128 }
1129 
1130 /*
1131  * smb_rwx_destroy
1132  */
1133 void
1134 smb_rwx_destroy(
1135     smb_rwx_t	*rwx)
1136 {
1137 	mutex_destroy(&rwx->rwx_mutex);
1138 	cv_destroy(&rwx->rwx_cv);
1139 	rw_destroy(&rwx->rwx_lock);
1140 }
1141 
1142 /*
1143  * smb_rwx_rwexit
1144  */
1145 void
1146 smb_rwx_rwexit(
1147     smb_rwx_t	*rwx)
1148 {
1149 	if (rw_write_held(&rwx->rwx_lock)) {
1150 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1151 		mutex_enter(&rwx->rwx_mutex);
1152 		if (rwx->rwx_waiting) {
1153 			rwx->rwx_waiting = B_FALSE;
1154 			cv_broadcast(&rwx->rwx_cv);
1155 		}
1156 		mutex_exit(&rwx->rwx_mutex);
1157 	}
1158 	rw_exit(&rwx->rwx_lock);
1159 }
1160 
1161 /*
1162  * smb_rwx_rwupgrade
1163  */
1164 krw_t
1165 smb_rwx_rwupgrade(
1166     smb_rwx_t	*rwx)
1167 {
1168 	if (rw_write_held(&rwx->rwx_lock)) {
1169 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1170 		return (RW_WRITER);
1171 	}
1172 	if (!rw_tryupgrade(&rwx->rwx_lock)) {
1173 		rw_exit(&rwx->rwx_lock);
1174 		rw_enter(&rwx->rwx_lock, RW_WRITER);
1175 	}
1176 	return (RW_READER);
1177 }
1178 
1179 /*
1180  * smb_rwx_rwrestore
1181  */
1182 void
1183 smb_rwx_rwdowngrade(
1184     smb_rwx_t	*rwx,
1185     krw_t	mode)
1186 {
1187 	ASSERT(rw_write_held(&rwx->rwx_lock));
1188 	ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1189 
1190 	if (mode == RW_WRITER) {
1191 		return;
1192 	}
1193 	ASSERT(mode == RW_READER);
1194 	mutex_enter(&rwx->rwx_mutex);
1195 	if (rwx->rwx_waiting) {
1196 		rwx->rwx_waiting = B_FALSE;
1197 		cv_broadcast(&rwx->rwx_cv);
1198 	}
1199 	mutex_exit(&rwx->rwx_mutex);
1200 	rw_downgrade(&rwx->rwx_lock);
1201 }
1202 
1203 /*
1204  * smb_rwx_wait
1205  *
1206  * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER
1207  * mode. It will:
1208  *
1209  *	1) release the lock and save its current mode.
1210  *	2) wait until the condition variable is signaled. This can happen for
1211  *	   2 reasons: When a writer releases the lock or when the time out (if
1212  *	   provided) expires.
1213  *	3) re-acquire the lock in the mode saved in (1).
1214  */
1215 int
1216 smb_rwx_rwwait(
1217     smb_rwx_t	*rwx,
1218     clock_t	timeout)
1219 {
1220 	int	rc;
1221 	krw_t	mode;
1222 
1223 	mutex_enter(&rwx->rwx_mutex);
1224 	rwx->rwx_waiting = B_TRUE;
1225 	mutex_exit(&rwx->rwx_mutex);
1226 
1227 	if (rw_write_held(&rwx->rwx_lock)) {
1228 		ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
1229 		mode = RW_WRITER;
1230 	} else {
1231 		ASSERT(rw_read_held(&rwx->rwx_lock));
1232 		mode = RW_READER;
1233 	}
1234 	rw_exit(&rwx->rwx_lock);
1235 
1236 	mutex_enter(&rwx->rwx_mutex);
1237 	if (rwx->rwx_waiting) {
1238 		if (timeout == -1) {
1239 			rc = 1;
1240 			cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
1241 		} else {
1242 			rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
1243 			    timeout, TR_CLOCK_TICK);
1244 		}
1245 	}
1246 	mutex_exit(&rwx->rwx_mutex);
1247 
1248 	rw_enter(&rwx->rwx_lock, mode);
1249 	return (rc);
1250 }
1251 
1252 /*
1253  * SMB ID mapping
1254  *
1255  * Solaris ID mapping service (aka Winchester) works with domain SIDs
1256  * and RIDs where domain SIDs are in string format. CIFS service works
1257  * with binary SIDs understandable by CIFS clients. A layer of SMB ID
1258  * mapping functions are implemeted to hide the SID conversion details
1259  * and also hide the handling of array of batch mapping requests.
1260  *
1261  * IMPORTANT NOTE The Winchester API requires a zone. Because CIFS server
1262  * currently only runs in the global zone the global zone is specified.
1263  * This needs to be fixed when the CIFS server supports zones.
1264  */
1265 
1266 static int smb_idmap_batch_binsid(smb_idmap_batch_t *sib);
1267 
1268 /*
1269  * smb_idmap_getid
1270  *
1271  * Maps the given Windows SID to a Solaris ID using the
1272  * simple mapping API.
1273  */
1274 idmap_stat
1275 smb_idmap_getid(smb_sid_t *sid, uid_t *id, int *idtype)
1276 {
1277 	smb_idmap_t sim;
1278 	char sidstr[SMB_SID_STRSZ];
1279 
1280 	smb_sid_tostr(sid, sidstr);
1281 	if (smb_sid_splitstr(sidstr, &sim.sim_rid) != 0)
1282 		return (IDMAP_ERR_SID);
1283 	sim.sim_domsid = sidstr;
1284 	sim.sim_id = id;
1285 
1286 	switch (*idtype) {
1287 	case SMB_IDMAP_USER:
1288 		sim.sim_stat = kidmap_getuidbysid(global_zone, sim.sim_domsid,
1289 		    sim.sim_rid, sim.sim_id);
1290 		break;
1291 
1292 	case SMB_IDMAP_GROUP:
1293 		sim.sim_stat = kidmap_getgidbysid(global_zone, sim.sim_domsid,
1294 		    sim.sim_rid, sim.sim_id);
1295 		break;
1296 
1297 	case SMB_IDMAP_UNKNOWN:
1298 		sim.sim_stat = kidmap_getpidbysid(global_zone, sim.sim_domsid,
1299 		    sim.sim_rid, sim.sim_id, &sim.sim_idtype);
1300 		break;
1301 
1302 	default:
1303 		ASSERT(0);
1304 		return (IDMAP_ERR_ARG);
1305 	}
1306 
1307 	*idtype = sim.sim_idtype;
1308 
1309 	return (sim.sim_stat);
1310 }
1311 
1312 /*
1313  * smb_idmap_getsid
1314  *
1315  * Maps the given Solaris ID to a Windows SID using the
1316  * simple mapping API.
1317  */
1318 idmap_stat
1319 smb_idmap_getsid(uid_t id, int idtype, smb_sid_t **sid)
1320 {
1321 	smb_idmap_t sim;
1322 
1323 	switch (idtype) {
1324 	case SMB_IDMAP_USER:
1325 		sim.sim_stat = kidmap_getsidbyuid(global_zone, id,
1326 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1327 		break;
1328 
1329 	case SMB_IDMAP_GROUP:
1330 		sim.sim_stat = kidmap_getsidbygid(global_zone, id,
1331 		    (const char **)&sim.sim_domsid, &sim.sim_rid);
1332 		break;
1333 
1334 	case SMB_IDMAP_EVERYONE:
1335 		/* Everyone S-1-1-0 */
1336 		sim.sim_domsid = "S-1-1";
1337 		sim.sim_rid = 0;
1338 		sim.sim_stat = IDMAP_SUCCESS;
1339 		break;
1340 
1341 	default:
1342 		ASSERT(0);
1343 		return (IDMAP_ERR_ARG);
1344 	}
1345 
1346 	if (sim.sim_stat != IDMAP_SUCCESS)
1347 		return (sim.sim_stat);
1348 
1349 	if (sim.sim_domsid == NULL)
1350 		return (IDMAP_ERR_NOMAPPING);
1351 
1352 	sim.sim_sid = smb_sid_fromstr(sim.sim_domsid);
1353 	if (sim.sim_sid == NULL)
1354 		return (IDMAP_ERR_INTERNAL);
1355 
1356 	*sid = smb_sid_splice(sim.sim_sid, sim.sim_rid);
1357 	smb_sid_free(sim.sim_sid);
1358 	if (*sid == NULL)
1359 		sim.sim_stat = IDMAP_ERR_INTERNAL;
1360 
1361 	return (sim.sim_stat);
1362 }
1363 
1364 /*
1365  * smb_idmap_batch_create
1366  *
1367  * Creates and initializes the context for batch ID mapping.
1368  */
1369 idmap_stat
1370 smb_idmap_batch_create(smb_idmap_batch_t *sib, uint16_t nmap, int flags)
1371 {
1372 	ASSERT(sib);
1373 
1374 	bzero(sib, sizeof (smb_idmap_batch_t));
1375 
1376 	sib->sib_idmaph = kidmap_get_create(global_zone);
1377 
1378 	sib->sib_flags = flags;
1379 	sib->sib_nmap = nmap;
1380 	sib->sib_size = nmap * sizeof (smb_idmap_t);
1381 	sib->sib_maps = kmem_zalloc(sib->sib_size, KM_SLEEP);
1382 
1383 	return (IDMAP_SUCCESS);
1384 }
1385 
1386 /*
1387  * smb_idmap_batch_destroy
1388  *
1389  * Frees the batch ID mapping context.
1390  * If ID mapping is Solaris -> Windows it frees memories
1391  * allocated for binary SIDs.
1392  */
1393 void
1394 smb_idmap_batch_destroy(smb_idmap_batch_t *sib)
1395 {
1396 	char *domsid;
1397 	int i;
1398 
1399 	ASSERT(sib);
1400 	ASSERT(sib->sib_maps);
1401 
1402 	if (sib->sib_idmaph)
1403 		kidmap_get_destroy(sib->sib_idmaph);
1404 
1405 	if (sib->sib_flags & SMB_IDMAP_ID2SID) {
1406 		/*
1407 		 * SIDs are allocated only when mapping
1408 		 * UID/GID to SIDs
1409 		 */
1410 		for (i = 0; i < sib->sib_nmap; i++)
1411 			smb_sid_free(sib->sib_maps[i].sim_sid);
1412 	} else if (sib->sib_flags & SMB_IDMAP_SID2ID) {
1413 		/*
1414 		 * SID prefixes are allocated only when mapping
1415 		 * SIDs to UID/GID
1416 		 */
1417 		for (i = 0; i < sib->sib_nmap; i++) {
1418 			domsid = sib->sib_maps[i].sim_domsid;
1419 			if (domsid)
1420 				smb_mem_free(domsid);
1421 		}
1422 	}
1423 
1424 	if (sib->sib_size && sib->sib_maps)
1425 		kmem_free(sib->sib_maps, sib->sib_size);
1426 }
1427 
1428 /*
1429  * smb_idmap_batch_getid
1430  *
1431  * Queue a request to map the given SID to a UID or GID.
1432  *
1433  * sim->sim_id should point to variable that's supposed to
1434  * hold the returned UID/GID. This needs to be setup by caller
1435  * of this function.
1436  *
1437  * If requested ID type is known, it's passed as 'idtype',
1438  * if it's unknown it'll be returned in sim->sim_idtype.
1439  */
1440 idmap_stat
1441 smb_idmap_batch_getid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1442     smb_sid_t *sid, int idtype)
1443 {
1444 	char strsid[SMB_SID_STRSZ];
1445 	idmap_stat idm_stat;
1446 
1447 	ASSERT(idmaph);
1448 	ASSERT(sim);
1449 	ASSERT(sid);
1450 
1451 	smb_sid_tostr(sid, strsid);
1452 	if (smb_sid_splitstr(strsid, &sim->sim_rid) != 0)
1453 		return (IDMAP_ERR_SID);
1454 	sim->sim_domsid = smb_mem_strdup(strsid);
1455 
1456 	switch (idtype) {
1457 	case SMB_IDMAP_USER:
1458 		idm_stat = kidmap_batch_getuidbysid(idmaph, sim->sim_domsid,
1459 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1460 		break;
1461 
1462 	case SMB_IDMAP_GROUP:
1463 		idm_stat = kidmap_batch_getgidbysid(idmaph, sim->sim_domsid,
1464 		    sim->sim_rid, sim->sim_id, &sim->sim_stat);
1465 		break;
1466 
1467 	case SMB_IDMAP_UNKNOWN:
1468 		idm_stat = kidmap_batch_getpidbysid(idmaph, sim->sim_domsid,
1469 		    sim->sim_rid, sim->sim_id, &sim->sim_idtype,
1470 		    &sim->sim_stat);
1471 		break;
1472 
1473 	default:
1474 		ASSERT(0);
1475 		return (IDMAP_ERR_ARG);
1476 	}
1477 
1478 	return (idm_stat);
1479 }
1480 
1481 /*
1482  * smb_idmap_batch_getsid
1483  *
1484  * Queue a request to map the given UID/GID to a SID.
1485  *
1486  * sim->sim_domsid and sim->sim_rid will contain the mapping
1487  * result upon successful process of the batched request.
1488  */
1489 idmap_stat
1490 smb_idmap_batch_getsid(idmap_get_handle_t *idmaph, smb_idmap_t *sim,
1491     uid_t id, int idtype)
1492 {
1493 	idmap_stat idm_stat;
1494 
1495 	switch (idtype) {
1496 	case SMB_IDMAP_USER:
1497 		idm_stat = kidmap_batch_getsidbyuid(idmaph, id,
1498 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1499 		    &sim->sim_stat);
1500 		break;
1501 
1502 	case SMB_IDMAP_GROUP:
1503 		idm_stat = kidmap_batch_getsidbygid(idmaph, id,
1504 		    (const char **)&sim->sim_domsid, &sim->sim_rid,
1505 		    &sim->sim_stat);
1506 		break;
1507 
1508 	case SMB_IDMAP_OWNERAT:
1509 		/* Current Owner S-1-5-32-766 */
1510 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1511 		sim->sim_rid = SECURITY_CURRENT_OWNER_RID;
1512 		sim->sim_stat = IDMAP_SUCCESS;
1513 		idm_stat = IDMAP_SUCCESS;
1514 		break;
1515 
1516 	case SMB_IDMAP_GROUPAT:
1517 		/* Current Group S-1-5-32-767 */
1518 		sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR;
1519 		sim->sim_rid = SECURITY_CURRENT_GROUP_RID;
1520 		sim->sim_stat = IDMAP_SUCCESS;
1521 		idm_stat = IDMAP_SUCCESS;
1522 		break;
1523 
1524 	case SMB_IDMAP_EVERYONE:
1525 		/* Everyone S-1-1-0 */
1526 		sim->sim_domsid = NT_WORLD_AUTH_SIDSTR;
1527 		sim->sim_rid = 0;
1528 		sim->sim_stat = IDMAP_SUCCESS;
1529 		idm_stat = IDMAP_SUCCESS;
1530 		break;
1531 
1532 	default:
1533 		ASSERT(0);
1534 		return (IDMAP_ERR_ARG);
1535 	}
1536 
1537 	return (idm_stat);
1538 }
1539 
1540 /*
1541  * smb_idmap_batch_binsid
1542  *
1543  * Convert sidrids to binary sids
1544  *
1545  * Returns 0 if successful and non-zero upon failure.
1546  */
1547 static int
1548 smb_idmap_batch_binsid(smb_idmap_batch_t *sib)
1549 {
1550 	smb_sid_t *sid;
1551 	smb_idmap_t *sim;
1552 	int i;
1553 
1554 	if (sib->sib_flags & SMB_IDMAP_SID2ID)
1555 		/* This operation is not required */
1556 		return (0);
1557 
1558 	sim = sib->sib_maps;
1559 	for (i = 0; i < sib->sib_nmap; sim++, i++) {
1560 		ASSERT(sim->sim_domsid);
1561 		if (sim->sim_domsid == NULL)
1562 			return (1);
1563 
1564 		if ((sid = smb_sid_fromstr(sim->sim_domsid)) == NULL)
1565 			return (1);
1566 
1567 		sim->sim_sid = smb_sid_splice(sid, sim->sim_rid);
1568 		smb_sid_free(sid);
1569 	}
1570 
1571 	return (0);
1572 }
1573 
1574 /*
1575  * smb_idmap_batch_getmappings
1576  *
1577  * trigger ID mapping service to get the mappings for queued
1578  * requests.
1579  *
1580  * Checks the result of all the queued requests.
1581  * If this is a Solaris -> Windows mapping it generates
1582  * binary SIDs from returned (domsid, rid) pairs.
1583  */
1584 idmap_stat
1585 smb_idmap_batch_getmappings(smb_idmap_batch_t *sib)
1586 {
1587 	idmap_stat idm_stat = IDMAP_SUCCESS;
1588 	int i;
1589 
1590 	idm_stat = kidmap_get_mappings(sib->sib_idmaph);
1591 	if (idm_stat != IDMAP_SUCCESS)
1592 		return (idm_stat);
1593 
1594 	/*
1595 	 * Check the status for all the queued requests
1596 	 */
1597 	for (i = 0; i < sib->sib_nmap; i++) {
1598 		if (sib->sib_maps[i].sim_stat != IDMAP_SUCCESS)
1599 			return (sib->sib_maps[i].sim_stat);
1600 	}
1601 
1602 	if (smb_idmap_batch_binsid(sib) != 0)
1603 		idm_stat = IDMAP_ERR_OTHER;
1604 
1605 	return (idm_stat);
1606 }
1607 
1608 uint64_t
1609 smb_time_unix_to_nt(timestruc_t *unix_time)
1610 {
1611 	uint64_t nt_time;
1612 
1613 	if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
1614 		return (0);
1615 
1616 	nt_time = unix_time->tv_sec;
1617 	nt_time *= 10000000;  /* seconds to 100ns */
1618 	nt_time += unix_time->tv_nsec / 100;
1619 	return (nt_time + NT_TIME_BIAS);
1620 }
1621 
1622 void
1623 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
1624 {
1625 	uint32_t seconds;
1626 
1627 	ASSERT(unix_time);
1628 
1629 	if ((nt_time == 0) || (nt_time == -1)) {
1630 		unix_time->tv_sec = 0;
1631 		unix_time->tv_nsec = 0;
1632 		return;
1633 	}
1634 
1635 	nt_time -= NT_TIME_BIAS;
1636 	seconds = nt_time / 10000000;
1637 	unix_time->tv_sec = seconds;
1638 	unix_time->tv_nsec = (nt_time  % 10000000) * 100;
1639 }
1640 
1641 /*
1642  * smb_time_gmt_to_local, smb_time_local_to_gmt
1643  *
1644  * Apply the gmt offset to convert between local time and gmt
1645  */
1646 int32_t
1647 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
1648 {
1649 	if ((gmt == 0) || (gmt == -1))
1650 		return (0);
1651 
1652 	return (gmt - sr->sr_gmtoff);
1653 }
1654 
1655 int32_t
1656 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
1657 {
1658 	if ((local == 0) || (local == -1))
1659 		return (0);
1660 
1661 	return (local + sr->sr_gmtoff);
1662 }
1663 
1664 
1665 /*
1666  * smb_time_dos_to_unix
1667  *
1668  * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
1669  *
1670  * A date/time field of 0 means that that server file system
1671  * assigned value need not be changed. The behaviour when the
1672  * date/time field is set to -1 is not documented but is
1673  * generally treated like 0.
1674  * If date or time is 0 or -1 the unix time is returned as 0
1675  * so that the caller can identify and handle this special case.
1676  */
1677 int32_t
1678 smb_time_dos_to_unix(int16_t date, int16_t time)
1679 {
1680 	struct tm	atm;
1681 
1682 	if (((date == 0) || (time == 0)) ||
1683 	    ((date == -1) || (time == -1))) {
1684 		return (0);
1685 	}
1686 
1687 	atm.tm_year = ((date >>  9) & 0x3F) + 80;
1688 	atm.tm_mon  = ((date >>  5) & 0x0F) - 1;
1689 	atm.tm_mday = ((date >>  0) & 0x1F);
1690 	atm.tm_hour = ((time >> 11) & 0x1F);
1691 	atm.tm_min  = ((time >>  5) & 0x3F);
1692 	atm.tm_sec  = ((time >>  0) & 0x1F) << 1;
1693 
1694 	return (smb_timegm(&atm));
1695 }
1696 
1697 void
1698 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1699 {
1700 	struct tm	atm;
1701 	int		i;
1702 	time_t		tmp_time;
1703 
1704 	if (ux_time == 0) {
1705 		*date_p = 0;
1706 		*time_p = 0;
1707 		return;
1708 	}
1709 
1710 	tmp_time = (time_t)ux_time;
1711 	(void) smb_gmtime_r(&tmp_time, &atm);
1712 
1713 	if (date_p) {
1714 		i = 0;
1715 		i += atm.tm_year - 80;
1716 		i <<= 4;
1717 		i += atm.tm_mon + 1;
1718 		i <<= 5;
1719 		i += atm.tm_mday;
1720 
1721 		*date_p = (short)i;
1722 	}
1723 	if (time_p) {
1724 		i = 0;
1725 		i += atm.tm_hour;
1726 		i <<= 6;
1727 		i += atm.tm_min;
1728 		i <<= 5;
1729 		i += atm.tm_sec >> 1;
1730 
1731 		*time_p = (short)i;
1732 	}
1733 }
1734 
1735 
1736 /*
1737  * smb_gmtime_r
1738  *
1739  * Thread-safe version of smb_gmtime. Returns a null pointer if either
1740  * input parameter is a null pointer. Otherwise returns a pointer
1741  * to result.
1742  *
1743  * Day of the week calculation: the Epoch was a thursday.
1744  *
1745  * There are no timezone corrections so tm_isdst and tm_gmtoff are
1746  * always zero, and the zone is always WET.
1747  */
1748 struct tm *
1749 smb_gmtime_r(time_t *clock, struct tm *result)
1750 {
1751 	time_t tsec;
1752 	int year;
1753 	int month;
1754 	int sec_per_month;
1755 
1756 	if (clock == 0 || result == 0)
1757 		return (0);
1758 
1759 	bzero(result, sizeof (struct tm));
1760 	tsec = *clock;
1761 	tsec -= tzh_leapcnt;
1762 
1763 	result->tm_wday = tsec / SECSPERDAY;
1764 	result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1765 
1766 	year = EPOCH_YEAR;
1767 	while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1768 	    (SECSPERDAY * DAYSPERNYEAR))) {
1769 		if (isleap(year))
1770 			tsec -= SECSPERDAY * DAYSPERLYEAR;
1771 		else
1772 			tsec -= SECSPERDAY * DAYSPERNYEAR;
1773 
1774 		++year;
1775 	}
1776 
1777 	result->tm_year = year - TM_YEAR_BASE;
1778 	result->tm_yday = tsec / SECSPERDAY;
1779 
1780 	for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1781 		sec_per_month = days_in_month[month] * SECSPERDAY;
1782 
1783 		if (month == TM_FEBRUARY && isleap(year))
1784 			sec_per_month += SECSPERDAY;
1785 
1786 		if (tsec < sec_per_month)
1787 			break;
1788 
1789 		tsec -= sec_per_month;
1790 	}
1791 
1792 	result->tm_mon = month;
1793 	result->tm_mday = (tsec / SECSPERDAY) + 1;
1794 	tsec %= SECSPERDAY;
1795 	result->tm_sec = tsec % 60;
1796 	tsec /= 60;
1797 	result->tm_min = tsec % 60;
1798 	tsec /= 60;
1799 	result->tm_hour = (int)tsec;
1800 
1801 	return (result);
1802 }
1803 
1804 
1805 /*
1806  * smb_timegm
1807  *
1808  * Converts the broken-down time in tm to a time value, i.e. the number
1809  * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1810  * not a POSIX or ANSI function. Per the man page, the input values of
1811  * tm_wday and tm_yday are ignored and, as the input data is assumed to
1812  * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1813  *
1814  * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1815  * and tm_yday, and bring the other fields within normal range. I don't
1816  * think this is really how it should be done but it's convenient for
1817  * now.
1818  */
1819 time_t
1820 smb_timegm(struct tm *tm)
1821 {
1822 	time_t tsec;
1823 	int dd;
1824 	int mm;
1825 	int yy;
1826 	int year;
1827 
1828 	if (tm == 0)
1829 		return (-1);
1830 
1831 	year = tm->tm_year + TM_YEAR_BASE;
1832 	tsec = tzh_leapcnt;
1833 
1834 	for (yy = EPOCH_YEAR; yy < year; ++yy) {
1835 		if (isleap(yy))
1836 			tsec += SECSPERDAY * DAYSPERLYEAR;
1837 		else
1838 			tsec += SECSPERDAY * DAYSPERNYEAR;
1839 	}
1840 
1841 	for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1842 		dd = days_in_month[mm] * SECSPERDAY;
1843 
1844 		if (mm == TM_FEBRUARY && isleap(year))
1845 			dd += SECSPERDAY;
1846 
1847 		tsec += dd;
1848 	}
1849 
1850 	tsec += (tm->tm_mday - 1) * SECSPERDAY;
1851 	tsec += tm->tm_sec;
1852 	tsec += tm->tm_min * SECSPERMIN;
1853 	tsec += tm->tm_hour * SECSPERHOUR;
1854 
1855 	tm->tm_isdst = 0;
1856 	(void) smb_gmtime_r(&tsec, tm);
1857 	return (tsec);
1858 }
1859 
1860 /*
1861  * smb_cred_set_sid
1862  *
1863  * Initialize the ksid based on the given smb_id_t.
1864  */
1865 static void
1866 smb_cred_set_sid(smb_id_t *id, ksid_t *ksid)
1867 {
1868 	char sidstr[SMB_SID_STRSZ];
1869 	int rc;
1870 
1871 	ASSERT(id);
1872 	ASSERT(id->i_sid);
1873 
1874 	ksid->ks_id = id->i_id;
1875 	smb_sid_tostr(id->i_sid, sidstr);
1876 	rc = smb_sid_splitstr(sidstr, &ksid->ks_rid);
1877 	ASSERT(rc == 0);
1878 
1879 	ksid->ks_attr = id->i_attrs;
1880 	ksid->ks_domain = ksid_lookupdomain(sidstr);
1881 }
1882 
1883 /*
1884  * smb_cred_set_sidlist
1885  *
1886  * Allocate and initialize the ksidlist based on the Windows group list of the
1887  * access token.
1888  */
1889 static ksidlist_t *
1890 smb_cred_set_sidlist(smb_ids_t *token_grps)
1891 {
1892 	int i;
1893 	ksidlist_t *lp;
1894 
1895 	lp = kmem_zalloc(KSIDLIST_MEM(token_grps->i_cnt), KM_SLEEP);
1896 	lp->ksl_ref = 1;
1897 	lp->ksl_nsid = token_grps->i_cnt;
1898 	lp->ksl_neid = 0;
1899 
1900 	for (i = 0; i < lp->ksl_nsid; i++) {
1901 		smb_cred_set_sid(&token_grps->i_ids[i], &lp->ksl_sids[i]);
1902 		if (lp->ksl_sids[i].ks_id > IDMAP_WK__MAX_GID)
1903 			lp->ksl_neid++;
1904 	}
1905 
1906 	return (lp);
1907 }
1908 
1909 /*
1910  * A Solaris credential (cred_t structure) will be allocated and
1911  * initialized based on the given Windows style user access token.
1912  *
1913  * cred's gid is set to the primary group of the mapped Solaris user.
1914  * When there is no such mapped user (i.e. the mapped UID is ephemeral)
1915  * or his/her primary group could not be obtained, cred's gid is set to
1916  * the mapped Solaris group of token's primary group.
1917  */
1918 cred_t *
1919 smb_cred_create(smb_token_t *token, uint32_t *privileges)
1920 {
1921 	ksid_t			ksid;
1922 	ksidlist_t		*ksidlist = NULL;
1923 	smb_posix_grps_t	*posix_grps;
1924 	cred_t			*cr;
1925 	gid_t			gid;
1926 
1927 	ASSERT(token);
1928 	ASSERT(token->tkn_posix_grps);
1929 	posix_grps = token->tkn_posix_grps;
1930 
1931 	ASSERT(privileges);
1932 
1933 	cr = crget();
1934 	ASSERT(cr != NULL);
1935 
1936 	if (!IDMAP_ID_IS_EPHEMERAL(token->tkn_user.i_id) &&
1937 	    (posix_grps->pg_ngrps != 0)) {
1938 		gid = posix_grps->pg_grps[0];
1939 	} else {
1940 		gid = token->tkn_primary_grp.i_id;
1941 	}
1942 
1943 	if (crsetugid(cr, token->tkn_user.i_id, gid) != 0) {
1944 		crfree(cr);
1945 		return (NULL);
1946 	}
1947 
1948 	if (crsetgroups(cr, posix_grps->pg_ngrps, posix_grps->pg_grps) != 0) {
1949 		crfree(cr);
1950 		return (NULL);
1951 	}
1952 
1953 	smb_cred_set_sid(&token->tkn_user, &ksid);
1954 	crsetsid(cr, &ksid, KSID_USER);
1955 	smb_cred_set_sid(&token->tkn_primary_grp, &ksid);
1956 	crsetsid(cr, &ksid, KSID_GROUP);
1957 	smb_cred_set_sid(&token->tkn_owner, &ksid);
1958 	crsetsid(cr, &ksid, KSID_OWNER);
1959 	ksidlist = smb_cred_set_sidlist(&token->tkn_win_grps);
1960 	crsetsidlist(cr, ksidlist);
1961 
1962 	*privileges = 0;
1963 
1964 	if (smb_token_query_privilege(token, SE_BACKUP_LUID))
1965 		*privileges |= SMB_USER_PRIV_BACKUP;
1966 
1967 	if (smb_token_query_privilege(token, SE_RESTORE_LUID))
1968 		*privileges |= SMB_USER_PRIV_RESTORE;
1969 
1970 	if (smb_token_query_privilege(token, SE_TAKE_OWNERSHIP_LUID)) {
1971 		*privileges |= SMB_USER_PRIV_TAKE_OWNERSHIP;
1972 		(void) crsetpriv(cr, PRIV_FILE_CHOWN, NULL);
1973 	}
1974 
1975 	if (smb_token_query_privilege(token, SE_SECURITY_LUID))
1976 		*privileges |= SMB_USER_PRIV_SECURITY;
1977 
1978 	return (cr);
1979 }
1980 
1981 /*
1982  * smb_cred_rele
1983  *
1984  * The reference count of the user's credential will get decremented if it
1985  * is non-zero. Otherwise, the credential will be freed.
1986  */
1987 void
1988 smb_cred_rele(cred_t *cr)
1989 {
1990 	ASSERT(cr);
1991 	crfree(cr);
1992 }
1993 
1994 /*
1995  * smb_cred_is_member
1996  *
1997  * Same as smb_token_is_member. The only difference is that
1998  * we compare the given SID against user SID and the ksidlist
1999  * of the user's cred.
2000  */
2001 int
2002 smb_cred_is_member(cred_t *cr, smb_sid_t *sid)
2003 {
2004 	ksidlist_t *ksidlist;
2005 	ksid_t ksid1, *ksid2;
2006 	smb_id_t id;
2007 	int i, rc = 0;
2008 
2009 	ASSERT(cr);
2010 
2011 	bzero(&id, sizeof (smb_id_t));
2012 	id.i_sid = sid;
2013 	smb_cred_set_sid(&id, &ksid1);
2014 
2015 	ksidlist = crgetsidlist(cr);
2016 	ASSERT(ksidlist);
2017 	ASSERT(ksid1.ks_domain);
2018 	ASSERT(ksid1.ks_domain->kd_name);
2019 
2020 	i = 0;
2021 	ksid2 = crgetsid(cr, KSID_USER);
2022 	do {
2023 		ASSERT(ksid2->ks_domain);
2024 		ASSERT(ksid2->ks_domain->kd_name);
2025 
2026 		if (strcmp(ksid1.ks_domain->kd_name,
2027 		    ksid2->ks_domain->kd_name) == 0 &&
2028 		    ksid1.ks_rid == ksid2->ks_rid) {
2029 			rc = 1;
2030 			break;
2031 		}
2032 
2033 		ksid2 = &ksidlist->ksl_sids[i];
2034 	} while (i++ < ksidlist->ksl_nsid);
2035 
2036 	ksid_rele(&ksid1);
2037 	return (rc);
2038 }
2039 
2040 /*
2041  * smb_cred_create_privs
2042  *
2043  * Creates a duplicate credential that contains system privileges for
2044  * certain SMB privileges: Backup and Restore.
2045  *
2046  */
2047 cred_t *
2048 smb_cred_create_privs(cred_t *user_cr, uint32_t privileges)
2049 {
2050 	cred_t *cr = NULL;
2051 
2052 	ASSERT(user_cr != NULL);
2053 
2054 	if (privileges & (SMB_USER_PRIV_BACKUP | SMB_USER_PRIV_RESTORE))
2055 		cr = crdup(user_cr);
2056 
2057 	if (cr == NULL)
2058 		return (NULL);
2059 
2060 	if (privileges & SMB_USER_PRIV_BACKUP) {
2061 		(void) crsetpriv(cr, PRIV_FILE_DAC_READ,
2062 		    PRIV_FILE_DAC_SEARCH, PRIV_SYS_MOUNT, NULL);
2063 	}
2064 
2065 	if (privileges & SMB_USER_PRIV_RESTORE) {
2066 		(void) crsetpriv(cr, PRIV_FILE_DAC_WRITE,
2067 		    PRIV_FILE_CHOWN, PRIV_FILE_CHOWN_SELF,
2068 		    PRIV_FILE_DAC_SEARCH, PRIV_FILE_LINK_ANY,
2069 		    PRIV_FILE_OWNER, PRIV_FILE_SETID, PRIV_SYS_LINKDIR,
2070 		    PRIV_SYS_MOUNT, NULL);
2071 	}
2072 
2073 	return (cr);
2074 }
2075 
2076 /*
2077  * smb_pad_align
2078  *
2079  * Returns the number of bytes required to pad an offset to the
2080  * specified alignment.
2081  */
2082 uint32_t
2083 smb_pad_align(uint32_t offset, uint32_t align)
2084 {
2085 	uint32_t pad = offset % align;
2086 
2087 	if (pad != 0)
2088 		pad = align - pad;
2089 
2090 	return (pad);
2091 }
2092 
2093 /*
2094  * smb_panic
2095  *
2096  * Logs the file name, function name and line number passed in and panics the
2097  * system.
2098  */
2099 void
2100 smb_panic(char *file, const char *func, int line)
2101 {
2102 	cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
2103 }
2104