1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 #include <sys/param.h>
28 #include <sys/types.h>
29 #include <sys/tzfile.h>
30 #include <sys/atomic.h>
31 #include <sys/time.h>
32 #include <sys/spl.h>
33 #include <sys/random.h>
34 #include <smbsrv/smb_kproto.h>
35 #include <smbsrv/smb_fsops.h>
36 #include <smbsrv/smbinfo.h>
37 #include <smbsrv/smb_xdr.h>
38 #include <smbsrv/smb_vops.h>
39 #include <smbsrv/smb_idmap.h>
40
41 #include <sys/sid.h>
42 #include <sys/priv_names.h>
43
44 static kmem_cache_t *smb_dtor_cache = NULL;
45
46 static boolean_t smb_avl_hold(smb_avl_t *);
47 static void smb_avl_rele(smb_avl_t *);
48
49 time_t tzh_leapcnt = 0;
50
51 struct tm
52 *smb_gmtime_r(time_t *clock, struct tm *result);
53
54 time_t
55 smb_timegm(struct tm *tm);
56
57 struct tm {
58 int tm_sec;
59 int tm_min;
60 int tm_hour;
61 int tm_mday;
62 int tm_mon;
63 int tm_year;
64 int tm_wday;
65 int tm_yday;
66 int tm_isdst;
67 };
68
69 static const int days_in_month[] = {
70 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
71 };
72
73 int
smb_ascii_or_unicode_strlen(struct smb_request * sr,char * str)74 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
75 {
76 if (sr->session->dialect >= SMB_VERS_2_BASE ||
77 (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
78 return (smb_wcequiv_strlen(str));
79 return (strlen(str));
80 }
81
82 int
smb_ascii_or_unicode_strlen_null(struct smb_request * sr,char * str)83 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
84 {
85 if (sr->session->dialect >= SMB_VERS_2_BASE ||
86 (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
87 return (smb_wcequiv_strlen(str) + 2);
88 return (strlen(str) + 1);
89 }
90
91 int
smb_ascii_or_unicode_null_len(struct smb_request * sr)92 smb_ascii_or_unicode_null_len(struct smb_request *sr)
93 {
94 if (sr->session->dialect >= SMB_VERS_2_BASE ||
95 (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
96 return (2);
97 return (1);
98 }
99
100 /*
101 *
102 * Convert old-style (DOS, LanMan) wildcard strings to NT style.
103 * This should ONLY happen to patterns that come from old clients,
104 * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12).
105 *
106 * ? is converted to >
107 * * is converted to < if it is followed by .
108 * . is converted to " if it is followed by ? or * or end of pattern
109 *
110 * Note: modifies pattern in place.
111 */
112 void
smb_convert_wildcards(char * pattern)113 smb_convert_wildcards(char *pattern)
114 {
115 char *p;
116
117 for (p = pattern; *p != '\0'; p++) {
118 switch (*p) {
119 case '?':
120 *p = '>';
121 break;
122 case '*':
123 if (p[1] == '.')
124 *p = '<';
125 break;
126 case '.':
127 if (p[1] == '?' || p[1] == '*' || p[1] == '\0')
128 *p = '\"';
129 break;
130 }
131 }
132 }
133
134 /*
135 * smb_sattr_check
136 *
137 * Check file attributes against a search attribute (sattr) mask.
138 *
139 * Normal files, which includes READONLY and ARCHIVE, always pass
140 * this check. If the DIRECTORY, HIDDEN or SYSTEM special attributes
141 * are set then they must appear in the search mask. The special
142 * attributes are inclusive, i.e. all special attributes that appear
143 * in sattr must also appear in the file attributes for the check to
144 * pass.
145 *
146 * The following examples show how this works:
147 *
148 * fileA: READONLY
149 * fileB: 0 (no attributes = normal file)
150 * fileC: READONLY, ARCHIVE
151 * fileD: HIDDEN
152 * fileE: READONLY, HIDDEN, SYSTEM
153 * dirA: DIRECTORY
154 *
155 * search attribute: 0
156 * Returns: fileA, fileB and fileC.
157 * search attribute: HIDDEN
158 * Returns: fileA, fileB, fileC and fileD.
159 * search attribute: SYSTEM
160 * Returns: fileA, fileB and fileC.
161 * search attribute: DIRECTORY
162 * Returns: fileA, fileB, fileC and dirA.
163 * search attribute: HIDDEN and SYSTEM
164 * Returns: fileA, fileB, fileC, fileD and fileE.
165 *
166 * Returns true if the file and sattr match; otherwise, returns false.
167 */
168 boolean_t
smb_sattr_check(uint16_t dosattr,uint16_t sattr)169 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
170 {
171 if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
172 !(sattr & FILE_ATTRIBUTE_DIRECTORY))
173 return (B_FALSE);
174
175 if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
176 !(sattr & FILE_ATTRIBUTE_HIDDEN))
177 return (B_FALSE);
178
179 if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
180 !(sattr & FILE_ATTRIBUTE_SYSTEM))
181 return (B_FALSE);
182
183 return (B_TRUE);
184 }
185
186 time_t
smb_get_boottime(void)187 smb_get_boottime(void)
188 {
189 extern time_t boot_time;
190 zone_t *z = curzone;
191
192 /* Unfortunately, the GZ doesn't set zone_boot_time. */
193 if (z->zone_id == GLOBAL_ZONEID)
194 return (boot_time);
195
196 return (z->zone_boot_time);
197 }
198
199 /*
200 * smb_idpool_increment
201 *
202 * This function increments the ID pool by doubling the current size. This
203 * function assumes the caller entered the mutex of the pool.
204 */
205 static int
smb_idpool_increment(smb_idpool_t * pool)206 smb_idpool_increment(
207 smb_idpool_t *pool)
208 {
209 uint8_t *new_pool;
210 uint32_t new_size;
211
212 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
213
214 new_size = pool->id_size * 2;
215 if (new_size <= SMB_IDPOOL_MAX_SIZE) {
216 new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
217 if (new_pool) {
218 bzero(new_pool, new_size / 8);
219 bcopy(pool->id_pool, new_pool, pool->id_size / 8);
220 kmem_free(pool->id_pool, pool->id_size / 8);
221 pool->id_pool = new_pool;
222 pool->id_free_counter += new_size - pool->id_size;
223 pool->id_max_free_counter += new_size - pool->id_size;
224 pool->id_size = new_size;
225 pool->id_idx_msk = (new_size / 8) - 1;
226 if (new_size >= SMB_IDPOOL_MAX_SIZE) {
227 /* id -1 made unavailable */
228 pool->id_pool[pool->id_idx_msk] = 0x80;
229 pool->id_free_counter--;
230 pool->id_max_free_counter--;
231 }
232 return (0);
233 }
234 }
235 return (-1);
236 }
237
238 /*
239 * smb_idpool_constructor
240 *
241 * This function initializes the pool structure provided.
242 */
243 int
smb_idpool_constructor(smb_idpool_t * pool)244 smb_idpool_constructor(
245 smb_idpool_t *pool)
246 {
247
248 ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
249
250 pool->id_size = SMB_IDPOOL_MIN_SIZE;
251 pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
252 pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
253 pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
254 pool->id_bit = 0x02;
255 pool->id_bit_idx = 1;
256 pool->id_idx = 0;
257 pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
258 KM_SLEEP);
259 bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
260 /* -1 id made unavailable */
261 pool->id_pool[0] = 0x01; /* id 0 made unavailable */
262 mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
263 pool->id_magic = SMB_IDPOOL_MAGIC;
264 return (0);
265 }
266
267 /*
268 * smb_idpool_destructor
269 *
270 * This function tears down and frees the resources associated with the
271 * pool provided.
272 */
273 void
smb_idpool_destructor(smb_idpool_t * pool)274 smb_idpool_destructor(
275 smb_idpool_t *pool)
276 {
277 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
278 ASSERT(pool->id_free_counter == pool->id_max_free_counter);
279 pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
280 mutex_destroy(&pool->id_mutex);
281 kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
282 }
283
284 /*
285 * smb_idpool_alloc
286 *
287 * This function allocates an ID from the pool provided.
288 */
289 int
smb_idpool_alloc(smb_idpool_t * pool,uint16_t * id)290 smb_idpool_alloc(
291 smb_idpool_t *pool,
292 uint16_t *id)
293 {
294 uint32_t i;
295 uint8_t bit;
296 uint8_t bit_idx;
297 uint8_t byte;
298
299 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
300
301 mutex_enter(&pool->id_mutex);
302 if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
303 mutex_exit(&pool->id_mutex);
304 return (-1);
305 }
306
307 i = pool->id_size;
308 while (i) {
309 bit = pool->id_bit;
310 bit_idx = pool->id_bit_idx;
311 byte = pool->id_pool[pool->id_idx];
312 while (bit) {
313 if (byte & bit) {
314 bit = bit << 1;
315 bit_idx++;
316 continue;
317 }
318 pool->id_pool[pool->id_idx] |= bit;
319 *id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
320 pool->id_free_counter--;
321 pool->id_bit = bit;
322 pool->id_bit_idx = bit_idx;
323 mutex_exit(&pool->id_mutex);
324 return (0);
325 }
326 pool->id_bit = 1;
327 pool->id_bit_idx = 0;
328 pool->id_idx++;
329 pool->id_idx &= pool->id_idx_msk;
330 --i;
331 }
332 /*
333 * This section of code shouldn't be reached. If there are IDs
334 * available and none could be found there's a problem.
335 */
336 ASSERT(0);
337 mutex_exit(&pool->id_mutex);
338 return (-1);
339 }
340
341 /*
342 * smb_idpool_free
343 *
344 * This function frees the ID provided.
345 */
346 void
smb_idpool_free(smb_idpool_t * pool,uint16_t id)347 smb_idpool_free(
348 smb_idpool_t *pool,
349 uint16_t id)
350 {
351 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
352 ASSERT(id != 0);
353 ASSERT(id != 0xFFFF);
354
355 mutex_enter(&pool->id_mutex);
356 if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
357 pool->id_pool[id >> 3] &= ~(1 << (id & 7));
358 pool->id_free_counter++;
359 ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
360 mutex_exit(&pool->id_mutex);
361 return;
362 }
363 /* Freeing a free ID. */
364 ASSERT(0);
365 mutex_exit(&pool->id_mutex);
366 }
367
368 /*
369 * Initialize the llist delete queue object cache.
370 */
371 void
smb_llist_init(void)372 smb_llist_init(void)
373 {
374 if (smb_dtor_cache != NULL)
375 return;
376
377 smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
378 sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
379 }
380
381 /*
382 * Destroy the llist delete queue object cache.
383 */
384 void
smb_llist_fini(void)385 smb_llist_fini(void)
386 {
387 if (smb_dtor_cache != NULL) {
388 kmem_cache_destroy(smb_dtor_cache);
389 smb_dtor_cache = NULL;
390 }
391 }
392
393 /*
394 * smb_llist_constructor
395 *
396 * This function initializes a locked list.
397 */
398 void
smb_llist_constructor(smb_llist_t * ll,size_t size,size_t offset)399 smb_llist_constructor(
400 smb_llist_t *ll,
401 size_t size,
402 size_t offset)
403 {
404 rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
405 mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
406 list_create(&ll->ll_list, size, offset);
407 list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
408 offsetof(smb_dtor_t, dt_lnd));
409 ll->ll_count = 0;
410 ll->ll_wrop = 0;
411 ll->ll_deleteq_count = 0;
412 ll->ll_flushing = B_FALSE;
413 }
414
415 /*
416 * Flush the delete queue and destroy a locked list.
417 */
418 void
smb_llist_destructor(smb_llist_t * ll)419 smb_llist_destructor(
420 smb_llist_t *ll)
421 {
422 smb_llist_flush(ll);
423
424 ASSERT(ll->ll_count == 0);
425 ASSERT(ll->ll_deleteq_count == 0);
426
427 rw_destroy(&ll->ll_lock);
428 list_destroy(&ll->ll_list);
429 list_destroy(&ll->ll_deleteq);
430 mutex_destroy(&ll->ll_mutex);
431 }
432
433 /*
434 * Post an object to the delete queue. The delete queue will be processed
435 * during list exit or list destruction. Objects are often posted for
436 * deletion during list iteration (while the list is locked) but that is
437 * not required, and an object can be posted at any time.
438 */
439 void
smb_llist_post(smb_llist_t * ll,void * object,smb_dtorproc_t dtorproc)440 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
441 {
442 smb_dtor_t *dtor;
443
444 ASSERT((object != NULL) && (dtorproc != NULL));
445
446 dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
447 bzero(dtor, sizeof (smb_dtor_t));
448 dtor->dt_magic = SMB_DTOR_MAGIC;
449 dtor->dt_object = object;
450 dtor->dt_proc = dtorproc;
451
452 mutex_enter(&ll->ll_mutex);
453 list_insert_tail(&ll->ll_deleteq, dtor);
454 ++ll->ll_deleteq_count;
455 mutex_exit(&ll->ll_mutex);
456 }
457
458 /*
459 * Exit the list lock and process the delete queue.
460 */
461 void
smb_llist_exit(smb_llist_t * ll)462 smb_llist_exit(smb_llist_t *ll)
463 {
464 rw_exit(&ll->ll_lock);
465 smb_llist_flush(ll);
466 }
467
468 /*
469 * Flush the list delete queue. The mutex is dropped across the destructor
470 * call in case this leads to additional objects being posted to the delete
471 * queue.
472 */
473 void
smb_llist_flush(smb_llist_t * ll)474 smb_llist_flush(smb_llist_t *ll)
475 {
476 smb_dtor_t *dtor;
477
478 mutex_enter(&ll->ll_mutex);
479 if (ll->ll_flushing) {
480 mutex_exit(&ll->ll_mutex);
481 return;
482 }
483 ll->ll_flushing = B_TRUE;
484
485 dtor = list_head(&ll->ll_deleteq);
486 while (dtor != NULL) {
487 SMB_DTOR_VALID(dtor);
488 ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
489 list_remove(&ll->ll_deleteq, dtor);
490 --ll->ll_deleteq_count;
491 mutex_exit(&ll->ll_mutex);
492
493 dtor->dt_proc(dtor->dt_object);
494
495 dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
496 kmem_cache_free(smb_dtor_cache, dtor);
497 mutex_enter(&ll->ll_mutex);
498 dtor = list_head(&ll->ll_deleteq);
499 }
500 ll->ll_flushing = B_FALSE;
501
502 mutex_exit(&ll->ll_mutex);
503 }
504
505 /*
506 * smb_llist_upgrade
507 *
508 * This function tries to upgrade the lock of the locked list. It assumes the
509 * locked has already been entered in RW_READER mode. It first tries using the
510 * Solaris function rw_tryupgrade(). If that call fails the lock is released
511 * and reentered in RW_WRITER mode. In that last case a window is opened during
512 * which the contents of the list may have changed. The return code indicates
513 * whether or not the list was modified when the lock was exited.
514 */
smb_llist_upgrade(smb_llist_t * ll)515 int smb_llist_upgrade(
516 smb_llist_t *ll)
517 {
518 uint64_t wrop;
519
520 if (rw_tryupgrade(&ll->ll_lock) != 0) {
521 return (0);
522 }
523 wrop = ll->ll_wrop;
524 rw_exit(&ll->ll_lock);
525 rw_enter(&ll->ll_lock, RW_WRITER);
526 return (wrop != ll->ll_wrop);
527 }
528
529 /*
530 * smb_llist_insert_head
531 *
532 * This function inserts the object passed a the beginning of the list. This
533 * function assumes the lock of the list has already been entered.
534 */
535 void
smb_llist_insert_head(smb_llist_t * ll,void * obj)536 smb_llist_insert_head(
537 smb_llist_t *ll,
538 void *obj)
539 {
540 list_insert_head(&ll->ll_list, obj);
541 ++ll->ll_wrop;
542 ++ll->ll_count;
543 }
544
545 /*
546 * smb_llist_insert_tail
547 *
548 * This function appends to the object passed to the list. This function assumes
549 * the lock of the list has already been entered.
550 *
551 */
552 void
smb_llist_insert_tail(smb_llist_t * ll,void * obj)553 smb_llist_insert_tail(
554 smb_llist_t *ll,
555 void *obj)
556 {
557 list_insert_tail(&ll->ll_list, obj);
558 ++ll->ll_wrop;
559 ++ll->ll_count;
560 }
561
562 /*
563 * smb_llist_remove
564 *
565 * This function removes the object passed from the list. This function assumes
566 * the lock of the list has already been entered.
567 */
568 void
smb_llist_remove(smb_llist_t * ll,void * obj)569 smb_llist_remove(
570 smb_llist_t *ll,
571 void *obj)
572 {
573 list_remove(&ll->ll_list, obj);
574 ++ll->ll_wrop;
575 --ll->ll_count;
576 }
577
578 /*
579 * smb_llist_get_count
580 *
581 * This function returns the number of elements in the specified list.
582 */
583 uint32_t
smb_llist_get_count(smb_llist_t * ll)584 smb_llist_get_count(
585 smb_llist_t *ll)
586 {
587 return (ll->ll_count);
588 }
589
590 /*
591 * smb_slist_constructor
592 *
593 * Synchronized list constructor.
594 */
595 void
smb_slist_constructor(smb_slist_t * sl,size_t size,size_t offset)596 smb_slist_constructor(
597 smb_slist_t *sl,
598 size_t size,
599 size_t offset)
600 {
601 mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
602 cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
603 list_create(&sl->sl_list, size, offset);
604 sl->sl_count = 0;
605 sl->sl_waiting = B_FALSE;
606 }
607
608 /*
609 * smb_slist_destructor
610 *
611 * Synchronized list destructor.
612 */
613 void
smb_slist_destructor(smb_slist_t * sl)614 smb_slist_destructor(
615 smb_slist_t *sl)
616 {
617 VERIFY(sl->sl_count == 0);
618
619 mutex_destroy(&sl->sl_mutex);
620 cv_destroy(&sl->sl_cv);
621 list_destroy(&sl->sl_list);
622 }
623
624 /*
625 * smb_slist_insert_head
626 *
627 * This function inserts the object passed a the beginning of the list.
628 */
629 void
smb_slist_insert_head(smb_slist_t * sl,void * obj)630 smb_slist_insert_head(
631 smb_slist_t *sl,
632 void *obj)
633 {
634 mutex_enter(&sl->sl_mutex);
635 list_insert_head(&sl->sl_list, obj);
636 ++sl->sl_count;
637 mutex_exit(&sl->sl_mutex);
638 }
639
640 /*
641 * smb_slist_insert_tail
642 *
643 * This function appends the object passed to the list.
644 */
645 void
smb_slist_insert_tail(smb_slist_t * sl,void * obj)646 smb_slist_insert_tail(
647 smb_slist_t *sl,
648 void *obj)
649 {
650 mutex_enter(&sl->sl_mutex);
651 list_insert_tail(&sl->sl_list, obj);
652 ++sl->sl_count;
653 mutex_exit(&sl->sl_mutex);
654 }
655
656 /*
657 * smb_llist_remove
658 *
659 * This function removes the object passed by the caller from the list.
660 */
661 void
smb_slist_remove(smb_slist_t * sl,void * obj)662 smb_slist_remove(
663 smb_slist_t *sl,
664 void *obj)
665 {
666 mutex_enter(&sl->sl_mutex);
667 list_remove(&sl->sl_list, obj);
668 if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
669 sl->sl_waiting = B_FALSE;
670 cv_broadcast(&sl->sl_cv);
671 }
672 mutex_exit(&sl->sl_mutex);
673 }
674
675 /*
676 * smb_slist_move_tail
677 *
678 * This function transfers all the contents of the synchronized list to the
679 * list_t provided. It returns the number of objects transferred.
680 */
681 uint32_t
smb_slist_move_tail(list_t * lst,smb_slist_t * sl)682 smb_slist_move_tail(
683 list_t *lst,
684 smb_slist_t *sl)
685 {
686 uint32_t rv;
687
688 mutex_enter(&sl->sl_mutex);
689 rv = sl->sl_count;
690 if (sl->sl_count) {
691 list_move_tail(lst, &sl->sl_list);
692 sl->sl_count = 0;
693 if (sl->sl_waiting) {
694 sl->sl_waiting = B_FALSE;
695 cv_broadcast(&sl->sl_cv);
696 }
697 }
698 mutex_exit(&sl->sl_mutex);
699 return (rv);
700 }
701
702 /*
703 * smb_slist_obj_move
704 *
705 * This function moves an object from one list to the end of the other list. It
706 * assumes the mutex of each list has been entered.
707 */
708 void
smb_slist_obj_move(smb_slist_t * dst,smb_slist_t * src,void * obj)709 smb_slist_obj_move(
710 smb_slist_t *dst,
711 smb_slist_t *src,
712 void *obj)
713 {
714 ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
715 ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
716
717 list_remove(&src->sl_list, obj);
718 list_insert_tail(&dst->sl_list, obj);
719 dst->sl_count++;
720 src->sl_count--;
721 if ((src->sl_count == 0) && (src->sl_waiting)) {
722 src->sl_waiting = B_FALSE;
723 cv_broadcast(&src->sl_cv);
724 }
725 }
726
727 /*
728 * smb_slist_wait_for_empty
729 *
730 * This function waits for a list to be emptied.
731 */
732 void
smb_slist_wait_for_empty(smb_slist_t * sl)733 smb_slist_wait_for_empty(
734 smb_slist_t *sl)
735 {
736 mutex_enter(&sl->sl_mutex);
737 while (sl->sl_count) {
738 sl->sl_waiting = B_TRUE;
739 cv_wait(&sl->sl_cv, &sl->sl_mutex);
740 }
741 mutex_exit(&sl->sl_mutex);
742 }
743
744 /*
745 * smb_slist_exit
746 *
747 * This function exits the muetx of the list and signal the condition variable
748 * if the list is empty.
749 */
750 void
smb_slist_exit(smb_slist_t * sl)751 smb_slist_exit(smb_slist_t *sl)
752 {
753 if ((sl->sl_count == 0) && (sl->sl_waiting)) {
754 sl->sl_waiting = B_FALSE;
755 cv_broadcast(&sl->sl_cv);
756 }
757 mutex_exit(&sl->sl_mutex);
758 }
759
760 /* smb_thread_... moved to smb_thread.c */
761
762 /*
763 * smb_rwx_init
764 */
765 void
smb_rwx_init(smb_rwx_t * rwx)766 smb_rwx_init(
767 smb_rwx_t *rwx)
768 {
769 bzero(rwx, sizeof (smb_rwx_t));
770 cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
771 mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
772 rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
773 }
774
775 /*
776 * smb_rwx_destroy
777 */
778 void
smb_rwx_destroy(smb_rwx_t * rwx)779 smb_rwx_destroy(
780 smb_rwx_t *rwx)
781 {
782 mutex_destroy(&rwx->rwx_mutex);
783 cv_destroy(&rwx->rwx_cv);
784 rw_destroy(&rwx->rwx_lock);
785 }
786
787 /*
788 * smb_rwx_rwexit
789 */
790 void
smb_rwx_rwexit(smb_rwx_t * rwx)791 smb_rwx_rwexit(
792 smb_rwx_t *rwx)
793 {
794 if (rw_write_held(&rwx->rwx_lock)) {
795 ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
796 mutex_enter(&rwx->rwx_mutex);
797 if (rwx->rwx_waiting) {
798 rwx->rwx_waiting = B_FALSE;
799 cv_broadcast(&rwx->rwx_cv);
800 }
801 mutex_exit(&rwx->rwx_mutex);
802 }
803 rw_exit(&rwx->rwx_lock);
804 }
805
806 /*
807 * smb_rwx_rwupgrade
808 */
809 krw_t
smb_rwx_rwupgrade(smb_rwx_t * rwx)810 smb_rwx_rwupgrade(
811 smb_rwx_t *rwx)
812 {
813 if (rw_write_held(&rwx->rwx_lock)) {
814 ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
815 return (RW_WRITER);
816 }
817 if (!rw_tryupgrade(&rwx->rwx_lock)) {
818 rw_exit(&rwx->rwx_lock);
819 rw_enter(&rwx->rwx_lock, RW_WRITER);
820 }
821 return (RW_READER);
822 }
823
824 /*
825 * smb_rwx_rwrestore
826 */
827 void
smb_rwx_rwdowngrade(smb_rwx_t * rwx,krw_t mode)828 smb_rwx_rwdowngrade(
829 smb_rwx_t *rwx,
830 krw_t mode)
831 {
832 ASSERT(rw_write_held(&rwx->rwx_lock));
833 ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
834
835 if (mode == RW_WRITER) {
836 return;
837 }
838 ASSERT(mode == RW_READER);
839 mutex_enter(&rwx->rwx_mutex);
840 if (rwx->rwx_waiting) {
841 rwx->rwx_waiting = B_FALSE;
842 cv_broadcast(&rwx->rwx_cv);
843 }
844 mutex_exit(&rwx->rwx_mutex);
845 rw_downgrade(&rwx->rwx_lock);
846 }
847
848 /*
849 * smb_rwx_wait
850 *
851 * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER
852 * mode. It will:
853 *
854 * 1) release the lock and save its current mode.
855 * 2) wait until the condition variable is signaled. This can happen for
856 * 2 reasons: When a writer releases the lock or when the time out (if
857 * provided) expires.
858 * 3) re-acquire the lock in the mode saved in (1).
859 */
860 int
smb_rwx_rwwait(smb_rwx_t * rwx,clock_t timeout)861 smb_rwx_rwwait(
862 smb_rwx_t *rwx,
863 clock_t timeout)
864 {
865 krw_t mode;
866 int rc = 1;
867
868 mutex_enter(&rwx->rwx_mutex);
869 rwx->rwx_waiting = B_TRUE;
870 mutex_exit(&rwx->rwx_mutex);
871
872 if (rw_write_held(&rwx->rwx_lock)) {
873 ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
874 mode = RW_WRITER;
875 } else {
876 ASSERT(rw_read_held(&rwx->rwx_lock));
877 mode = RW_READER;
878 }
879 rw_exit(&rwx->rwx_lock);
880
881 mutex_enter(&rwx->rwx_mutex);
882 if (rwx->rwx_waiting) {
883 if (timeout == -1) {
884 cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
885 } else {
886 rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
887 timeout, TR_CLOCK_TICK);
888 }
889 }
890 mutex_exit(&rwx->rwx_mutex);
891
892 rw_enter(&rwx->rwx_lock, mode);
893 return (rc);
894 }
895
896 /* smb_idmap_... moved to smb_idmap.c */
897
898 uint64_t
smb_time_unix_to_nt(timestruc_t * unix_time)899 smb_time_unix_to_nt(timestruc_t *unix_time)
900 {
901 uint64_t nt_time;
902
903 if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
904 return (0);
905
906 nt_time = unix_time->tv_sec;
907 nt_time *= 10000000; /* seconds to 100ns */
908 nt_time += unix_time->tv_nsec / 100;
909 return (nt_time + NT_TIME_BIAS);
910 }
911
912 void
smb_time_nt_to_unix(uint64_t nt_time,timestruc_t * unix_time)913 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
914 {
915 uint32_t seconds;
916
917 ASSERT(unix_time);
918
919 if ((nt_time == 0) || (nt_time == -1)) {
920 unix_time->tv_sec = 0;
921 unix_time->tv_nsec = 0;
922 return;
923 }
924
925 /*
926 * Can't represent times less than or equal NT_TIME_BIAS,
927 * so convert them to the oldest date we can store.
928 * Note that time zero is "special" being converted
929 * both directions as 0:0 (unix-to-nt, nt-to-unix).
930 */
931 if (nt_time <= NT_TIME_BIAS) {
932 unix_time->tv_sec = 0;
933 unix_time->tv_nsec = 100;
934 return;
935 }
936
937 nt_time -= NT_TIME_BIAS;
938 seconds = nt_time / 10000000;
939 unix_time->tv_sec = seconds;
940 unix_time->tv_nsec = (nt_time % 10000000) * 100;
941 }
942
943 /*
944 * smb_time_gmt_to_local, smb_time_local_to_gmt
945 *
946 * Apply the gmt offset to convert between local time and gmt
947 */
948 int32_t
smb_time_gmt_to_local(smb_request_t * sr,int32_t gmt)949 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
950 {
951 if ((gmt == 0) || (gmt == -1))
952 return (0);
953
954 return (gmt - sr->sr_gmtoff);
955 }
956
957 int32_t
smb_time_local_to_gmt(smb_request_t * sr,int32_t local)958 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
959 {
960 if ((local == 0) || (local == -1))
961 return (0);
962
963 return (local + sr->sr_gmtoff);
964 }
965
966
967 /*
968 * smb_time_dos_to_unix
969 *
970 * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
971 *
972 * A date/time field of 0 means that that server file system
973 * assigned value need not be changed. The behaviour when the
974 * date/time field is set to -1 is not documented but is
975 * generally treated like 0.
976 * If date or time is 0 or -1 the unix time is returned as 0
977 * so that the caller can identify and handle this special case.
978 */
979 int32_t
smb_time_dos_to_unix(int16_t date,int16_t time)980 smb_time_dos_to_unix(int16_t date, int16_t time)
981 {
982 struct tm atm;
983
984 if (((date == 0) || (time == 0)) ||
985 ((date == -1) || (time == -1))) {
986 return (0);
987 }
988
989 atm.tm_year = ((date >> 9) & 0x3F) + 80;
990 atm.tm_mon = ((date >> 5) & 0x0F) - 1;
991 atm.tm_mday = ((date >> 0) & 0x1F);
992 atm.tm_hour = ((time >> 11) & 0x1F);
993 atm.tm_min = ((time >> 5) & 0x3F);
994 atm.tm_sec = ((time >> 0) & 0x1F) << 1;
995
996 return (smb_timegm(&atm));
997 }
998
999 void
smb_time_unix_to_dos(int32_t ux_time,int16_t * date_p,int16_t * time_p)1000 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1001 {
1002 struct tm atm;
1003 int i;
1004 time_t tmp_time;
1005
1006 if (ux_time == 0) {
1007 *date_p = 0;
1008 *time_p = 0;
1009 return;
1010 }
1011
1012 tmp_time = (time_t)ux_time;
1013 (void) smb_gmtime_r(&tmp_time, &atm);
1014
1015 if (date_p) {
1016 i = 0;
1017 i += atm.tm_year - 80;
1018 i <<= 4;
1019 i += atm.tm_mon + 1;
1020 i <<= 5;
1021 i += atm.tm_mday;
1022
1023 *date_p = (short)i;
1024 }
1025 if (time_p) {
1026 i = 0;
1027 i += atm.tm_hour;
1028 i <<= 6;
1029 i += atm.tm_min;
1030 i <<= 5;
1031 i += atm.tm_sec >> 1;
1032
1033 *time_p = (short)i;
1034 }
1035 }
1036
1037
1038 /*
1039 * smb_gmtime_r
1040 *
1041 * Thread-safe version of smb_gmtime. Returns a null pointer if either
1042 * input parameter is a null pointer. Otherwise returns a pointer
1043 * to result.
1044 *
1045 * Day of the week calculation: the Epoch was a thursday.
1046 *
1047 * There are no timezone corrections so tm_isdst and tm_gmtoff are
1048 * always zero, and the zone is always WET.
1049 */
1050 struct tm *
smb_gmtime_r(time_t * clock,struct tm * result)1051 smb_gmtime_r(time_t *clock, struct tm *result)
1052 {
1053 time_t tsec;
1054 int year;
1055 int month;
1056 int sec_per_month;
1057
1058 if (clock == 0 || result == 0)
1059 return (0);
1060
1061 bzero(result, sizeof (struct tm));
1062 tsec = *clock;
1063 tsec -= tzh_leapcnt;
1064
1065 result->tm_wday = tsec / SECSPERDAY;
1066 result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1067
1068 year = EPOCH_YEAR;
1069 while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1070 (SECSPERDAY * DAYSPERNYEAR))) {
1071 if (isleap(year))
1072 tsec -= SECSPERDAY * DAYSPERLYEAR;
1073 else
1074 tsec -= SECSPERDAY * DAYSPERNYEAR;
1075
1076 ++year;
1077 }
1078
1079 result->tm_year = year - TM_YEAR_BASE;
1080 result->tm_yday = tsec / SECSPERDAY;
1081
1082 for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1083 sec_per_month = days_in_month[month] * SECSPERDAY;
1084
1085 if (month == TM_FEBRUARY && isleap(year))
1086 sec_per_month += SECSPERDAY;
1087
1088 if (tsec < sec_per_month)
1089 break;
1090
1091 tsec -= sec_per_month;
1092 }
1093
1094 result->tm_mon = month;
1095 result->tm_mday = (tsec / SECSPERDAY) + 1;
1096 tsec %= SECSPERDAY;
1097 result->tm_sec = tsec % 60;
1098 tsec /= 60;
1099 result->tm_min = tsec % 60;
1100 tsec /= 60;
1101 result->tm_hour = (int)tsec;
1102
1103 return (result);
1104 }
1105
1106
1107 /*
1108 * smb_timegm
1109 *
1110 * Converts the broken-down time in tm to a time value, i.e. the number
1111 * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1112 * not a POSIX or ANSI function. Per the man page, the input values of
1113 * tm_wday and tm_yday are ignored and, as the input data is assumed to
1114 * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1115 *
1116 * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1117 * and tm_yday, and bring the other fields within normal range. I don't
1118 * think this is really how it should be done but it's convenient for
1119 * now.
1120 */
1121 time_t
smb_timegm(struct tm * tm)1122 smb_timegm(struct tm *tm)
1123 {
1124 time_t tsec;
1125 int dd;
1126 int mm;
1127 int yy;
1128 int year;
1129
1130 if (tm == 0)
1131 return (-1);
1132
1133 year = tm->tm_year + TM_YEAR_BASE;
1134 tsec = tzh_leapcnt;
1135
1136 for (yy = EPOCH_YEAR; yy < year; ++yy) {
1137 if (isleap(yy))
1138 tsec += SECSPERDAY * DAYSPERLYEAR;
1139 else
1140 tsec += SECSPERDAY * DAYSPERNYEAR;
1141 }
1142
1143 for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1144 dd = days_in_month[mm] * SECSPERDAY;
1145
1146 if (mm == TM_FEBRUARY && isleap(year))
1147 dd += SECSPERDAY;
1148
1149 tsec += dd;
1150 }
1151
1152 tsec += (tm->tm_mday - 1) * SECSPERDAY;
1153 tsec += tm->tm_sec;
1154 tsec += tm->tm_min * SECSPERMIN;
1155 tsec += tm->tm_hour * SECSPERHOUR;
1156
1157 tm->tm_isdst = 0;
1158 (void) smb_gmtime_r(&tsec, tm);
1159 return (tsec);
1160 }
1161
1162 /*
1163 * smb_pad_align
1164 *
1165 * Returns the number of bytes required to pad an offset to the
1166 * specified alignment.
1167 */
1168 uint32_t
smb_pad_align(uint32_t offset,uint32_t align)1169 smb_pad_align(uint32_t offset, uint32_t align)
1170 {
1171 uint32_t pad = offset % align;
1172
1173 if (pad != 0)
1174 pad = align - pad;
1175
1176 return (pad);
1177 }
1178
1179 /*
1180 * smb_panic
1181 *
1182 * Logs the file name, function name and line number passed in and panics the
1183 * system.
1184 */
1185 void
smb_panic(char * file,const char * func,int line)1186 smb_panic(char *file, const char *func, int line)
1187 {
1188 cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
1189 }
1190
1191 /*
1192 * Creates an AVL tree and initializes the given smb_avl_t
1193 * structure using the passed args
1194 */
1195 void
smb_avl_create(smb_avl_t * avl,size_t size,size_t offset,const smb_avl_nops_t * ops)1196 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset,
1197 const smb_avl_nops_t *ops)
1198 {
1199 ASSERT(avl);
1200 ASSERT(ops);
1201
1202 rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL);
1203 mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL);
1204
1205 avl->avl_nops = ops;
1206 avl->avl_state = SMB_AVL_STATE_READY;
1207 avl->avl_refcnt = 0;
1208 (void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence,
1209 sizeof (uint32_t));
1210
1211 avl_create(&avl->avl_tree, ops->avln_cmp, size, offset);
1212 }
1213
1214 /*
1215 * Destroys the specified AVL tree.
1216 * It waits for all the in-flight operations to finish
1217 * before destroying the AVL.
1218 */
1219 void
smb_avl_destroy(smb_avl_t * avl)1220 smb_avl_destroy(smb_avl_t *avl)
1221 {
1222 void *cookie = NULL;
1223 void *node;
1224
1225 ASSERT(avl);
1226
1227 mutex_enter(&avl->avl_mutex);
1228 if (avl->avl_state != SMB_AVL_STATE_READY) {
1229 mutex_exit(&avl->avl_mutex);
1230 return;
1231 }
1232
1233 avl->avl_state = SMB_AVL_STATE_DESTROYING;
1234
1235 while (avl->avl_refcnt > 0)
1236 (void) cv_wait(&avl->avl_cv, &avl->avl_mutex);
1237 mutex_exit(&avl->avl_mutex);
1238
1239 rw_enter(&avl->avl_lock, RW_WRITER);
1240 while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL)
1241 avl->avl_nops->avln_destroy(node);
1242
1243 avl_destroy(&avl->avl_tree);
1244 rw_exit(&avl->avl_lock);
1245
1246 rw_destroy(&avl->avl_lock);
1247
1248 mutex_destroy(&avl->avl_mutex);
1249 bzero(avl, sizeof (smb_avl_t));
1250 }
1251
1252 /*
1253 * Adds the given item to the AVL if it's
1254 * not already there.
1255 *
1256 * Returns:
1257 *
1258 * ENOTACTIVE AVL is not in READY state
1259 * EEXIST The item is already in AVL
1260 */
1261 int
smb_avl_add(smb_avl_t * avl,void * item)1262 smb_avl_add(smb_avl_t *avl, void *item)
1263 {
1264 avl_index_t where;
1265
1266 ASSERT(avl);
1267 ASSERT(item);
1268
1269 if (!smb_avl_hold(avl))
1270 return (ENOTACTIVE);
1271
1272 rw_enter(&avl->avl_lock, RW_WRITER);
1273 if (avl_find(&avl->avl_tree, item, &where) != NULL) {
1274 rw_exit(&avl->avl_lock);
1275 smb_avl_rele(avl);
1276 return (EEXIST);
1277 }
1278
1279 avl_insert(&avl->avl_tree, item, where);
1280 avl->avl_sequence++;
1281 rw_exit(&avl->avl_lock);
1282
1283 smb_avl_rele(avl);
1284 return (0);
1285 }
1286
1287 /*
1288 * Removes the given item from the AVL.
1289 * If no reference is left on the item
1290 * it will also be destroyed by calling the
1291 * registered destroy operation.
1292 */
1293 void
smb_avl_remove(smb_avl_t * avl,void * item)1294 smb_avl_remove(smb_avl_t *avl, void *item)
1295 {
1296 avl_index_t where;
1297 void *rm_item;
1298
1299 ASSERT(avl);
1300 ASSERT(item);
1301
1302 if (!smb_avl_hold(avl))
1303 return;
1304
1305 rw_enter(&avl->avl_lock, RW_WRITER);
1306 if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) {
1307 rw_exit(&avl->avl_lock);
1308 smb_avl_rele(avl);
1309 return;
1310 }
1311
1312 avl_remove(&avl->avl_tree, rm_item);
1313 if (avl->avl_nops->avln_rele(rm_item))
1314 avl->avl_nops->avln_destroy(rm_item);
1315 avl->avl_sequence++;
1316 rw_exit(&avl->avl_lock);
1317
1318 smb_avl_rele(avl);
1319 }
1320
1321 /*
1322 * Looks up the AVL for the given item.
1323 * If the item is found a hold on the object
1324 * is taken before the pointer to it is
1325 * returned to the caller. The caller MUST
1326 * always call smb_avl_release() after it's done
1327 * using the returned object to release the hold
1328 * taken on the object.
1329 */
1330 void *
smb_avl_lookup(smb_avl_t * avl,void * item)1331 smb_avl_lookup(smb_avl_t *avl, void *item)
1332 {
1333 void *node = NULL;
1334
1335 ASSERT(avl);
1336 ASSERT(item);
1337
1338 if (!smb_avl_hold(avl))
1339 return (NULL);
1340
1341 rw_enter(&avl->avl_lock, RW_READER);
1342 node = avl_find(&avl->avl_tree, item, NULL);
1343 if (node != NULL)
1344 avl->avl_nops->avln_hold(node);
1345 rw_exit(&avl->avl_lock);
1346
1347 if (node == NULL)
1348 smb_avl_rele(avl);
1349
1350 return (node);
1351 }
1352
1353 /*
1354 * The hold on the given object is released.
1355 * This function MUST always be called after
1356 * smb_avl_lookup() and smb_avl_iterate() for
1357 * the returned object.
1358 *
1359 * If AVL is in DESTROYING state, the destroying
1360 * thread will be notified.
1361 */
1362 void
smb_avl_release(smb_avl_t * avl,void * item)1363 smb_avl_release(smb_avl_t *avl, void *item)
1364 {
1365 ASSERT(avl);
1366 ASSERT(item);
1367
1368 if (avl->avl_nops->avln_rele(item))
1369 avl->avl_nops->avln_destroy(item);
1370
1371 smb_avl_rele(avl);
1372 }
1373
1374 /*
1375 * Initializes the given cursor for the AVL.
1376 * The cursor will be used to iterate through the AVL
1377 */
1378 void
smb_avl_iterinit(smb_avl_t * avl,smb_avl_cursor_t * cursor)1379 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1380 {
1381 ASSERT(avl);
1382 ASSERT(cursor);
1383
1384 cursor->avlc_next = NULL;
1385 cursor->avlc_sequence = avl->avl_sequence;
1386 }
1387
1388 /*
1389 * Iterates through the AVL using the given cursor.
1390 * It always starts at the beginning and then returns
1391 * a pointer to the next object on each subsequent call.
1392 *
1393 * If a new object is added to or removed from the AVL
1394 * between two calls to this function, the iteration
1395 * will terminate prematurely.
1396 *
1397 * The caller MUST always call smb_avl_release() after it's
1398 * done using the returned object to release the hold taken
1399 * on the object.
1400 */
1401 void *
smb_avl_iterate(smb_avl_t * avl,smb_avl_cursor_t * cursor)1402 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1403 {
1404 void *node;
1405
1406 ASSERT(avl);
1407 ASSERT(cursor);
1408
1409 if (!smb_avl_hold(avl))
1410 return (NULL);
1411
1412 rw_enter(&avl->avl_lock, RW_READER);
1413 if (cursor->avlc_sequence != avl->avl_sequence) {
1414 rw_exit(&avl->avl_lock);
1415 smb_avl_rele(avl);
1416 return (NULL);
1417 }
1418
1419 if (cursor->avlc_next == NULL)
1420 node = avl_first(&avl->avl_tree);
1421 else
1422 node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next);
1423
1424 if (node != NULL)
1425 avl->avl_nops->avln_hold(node);
1426
1427 cursor->avlc_next = node;
1428 rw_exit(&avl->avl_lock);
1429
1430 if (node == NULL)
1431 smb_avl_rele(avl);
1432
1433 return (node);
1434 }
1435
1436 /*
1437 * Increments the AVL reference count in order to
1438 * prevent the avl from being destroyed while it's
1439 * being accessed.
1440 */
1441 static boolean_t
smb_avl_hold(smb_avl_t * avl)1442 smb_avl_hold(smb_avl_t *avl)
1443 {
1444 mutex_enter(&avl->avl_mutex);
1445 if (avl->avl_state != SMB_AVL_STATE_READY) {
1446 mutex_exit(&avl->avl_mutex);
1447 return (B_FALSE);
1448 }
1449 avl->avl_refcnt++;
1450 mutex_exit(&avl->avl_mutex);
1451
1452 return (B_TRUE);
1453 }
1454
1455 /*
1456 * Decrements the AVL reference count to release the
1457 * hold. If another thread is trying to destroy the
1458 * AVL and is waiting for the reference count to become
1459 * 0, it is signaled to wake up.
1460 */
1461 static void
smb_avl_rele(smb_avl_t * avl)1462 smb_avl_rele(smb_avl_t *avl)
1463 {
1464 mutex_enter(&avl->avl_mutex);
1465 ASSERT(avl->avl_refcnt > 0);
1466 avl->avl_refcnt--;
1467 if (avl->avl_state == SMB_AVL_STATE_DESTROYING)
1468 cv_broadcast(&avl->avl_cv);
1469 mutex_exit(&avl->avl_mutex);
1470 }
1471
1472 /*
1473 * smb_latency_init
1474 */
1475 void
smb_latency_init(smb_latency_t * lat)1476 smb_latency_init(smb_latency_t *lat)
1477 {
1478 bzero(lat, sizeof (*lat));
1479 mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1480 }
1481
1482 /*
1483 * smb_latency_destroy
1484 */
1485 void
smb_latency_destroy(smb_latency_t * lat)1486 smb_latency_destroy(smb_latency_t *lat)
1487 {
1488 mutex_destroy(&lat->ly_mutex);
1489 }
1490
1491 /*
1492 * smb_latency_add_sample
1493 *
1494 * Uses the new sample to calculate the new mean and standard deviation. The
1495 * sample must be a scaled value.
1496 */
1497 void
smb_latency_add_sample(smb_latency_t * lat,hrtime_t sample)1498 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample)
1499 {
1500 hrtime_t a_mean;
1501 hrtime_t d_mean;
1502
1503 mutex_enter(&lat->ly_mutex);
1504 lat->ly_a_nreq++;
1505 lat->ly_a_sum += sample;
1506 if (lat->ly_a_nreq != 0) {
1507 a_mean = lat->ly_a_sum / lat->ly_a_nreq;
1508 lat->ly_a_stddev =
1509 (sample - a_mean) * (sample - lat->ly_a_mean);
1510 lat->ly_a_mean = a_mean;
1511 }
1512 lat->ly_d_nreq++;
1513 lat->ly_d_sum += sample;
1514 if (lat->ly_d_nreq != 0) {
1515 d_mean = lat->ly_d_sum / lat->ly_d_nreq;
1516 lat->ly_d_stddev =
1517 (sample - d_mean) * (sample - lat->ly_d_mean);
1518 lat->ly_d_mean = d_mean;
1519 }
1520 mutex_exit(&lat->ly_mutex);
1521 }
1522
1523 /*
1524 * smb_srqueue_init
1525 */
1526 void
smb_srqueue_init(smb_srqueue_t * srq)1527 smb_srqueue_init(smb_srqueue_t *srq)
1528 {
1529 bzero(srq, sizeof (*srq));
1530 mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1531 srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled();
1532 }
1533
1534 /*
1535 * smb_srqueue_destroy
1536 */
1537 void
smb_srqueue_destroy(smb_srqueue_t * srq)1538 smb_srqueue_destroy(smb_srqueue_t *srq)
1539 {
1540 mutex_destroy(&srq->srq_mutex);
1541 }
1542
1543 /*
1544 * smb_srqueue_waitq_enter
1545 */
1546 void
smb_srqueue_waitq_enter(smb_srqueue_t * srq)1547 smb_srqueue_waitq_enter(smb_srqueue_t *srq)
1548 {
1549 hrtime_t new;
1550 hrtime_t delta;
1551 uint32_t wcnt;
1552
1553 mutex_enter(&srq->srq_mutex);
1554 new = gethrtime_unscaled();
1555 delta = new - srq->srq_wlastupdate;
1556 srq->srq_wlastupdate = new;
1557 wcnt = srq->srq_wcnt++;
1558 if (wcnt != 0) {
1559 srq->srq_wlentime += delta * wcnt;
1560 srq->srq_wtime += delta;
1561 }
1562 mutex_exit(&srq->srq_mutex);
1563 }
1564
1565 /*
1566 * smb_srqueue_runq_exit
1567 */
1568 void
smb_srqueue_runq_exit(smb_srqueue_t * srq)1569 smb_srqueue_runq_exit(smb_srqueue_t *srq)
1570 {
1571 hrtime_t new;
1572 hrtime_t delta;
1573 uint32_t rcnt;
1574
1575 mutex_enter(&srq->srq_mutex);
1576 new = gethrtime_unscaled();
1577 delta = new - srq->srq_rlastupdate;
1578 srq->srq_rlastupdate = new;
1579 rcnt = srq->srq_rcnt--;
1580 ASSERT(rcnt > 0);
1581 srq->srq_rlentime += delta * rcnt;
1582 srq->srq_rtime += delta;
1583 mutex_exit(&srq->srq_mutex);
1584 }
1585
1586 /*
1587 * smb_srqueue_waitq_to_runq
1588 */
1589 void
smb_srqueue_waitq_to_runq(smb_srqueue_t * srq)1590 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq)
1591 {
1592 hrtime_t new;
1593 hrtime_t delta;
1594 uint32_t wcnt;
1595 uint32_t rcnt;
1596
1597 mutex_enter(&srq->srq_mutex);
1598 new = gethrtime_unscaled();
1599 delta = new - srq->srq_wlastupdate;
1600 srq->srq_wlastupdate = new;
1601 wcnt = srq->srq_wcnt--;
1602 ASSERT(wcnt > 0);
1603 srq->srq_wlentime += delta * wcnt;
1604 srq->srq_wtime += delta;
1605 delta = new - srq->srq_rlastupdate;
1606 srq->srq_rlastupdate = new;
1607 rcnt = srq->srq_rcnt++;
1608 if (rcnt != 0) {
1609 srq->srq_rlentime += delta * rcnt;
1610 srq->srq_rtime += delta;
1611 }
1612 mutex_exit(&srq->srq_mutex);
1613 }
1614
1615 /*
1616 * smb_srqueue_update
1617 *
1618 * Takes a snapshot of the smb_sr_stat_t structure passed in.
1619 */
1620 void
smb_srqueue_update(smb_srqueue_t * srq,smb_kstat_utilization_t * kd)1621 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd)
1622 {
1623 hrtime_t delta;
1624 hrtime_t snaptime;
1625
1626 mutex_enter(&srq->srq_mutex);
1627 snaptime = gethrtime_unscaled();
1628 delta = snaptime - srq->srq_wlastupdate;
1629 srq->srq_wlastupdate = snaptime;
1630 if (srq->srq_wcnt != 0) {
1631 srq->srq_wlentime += delta * srq->srq_wcnt;
1632 srq->srq_wtime += delta;
1633 }
1634 delta = snaptime - srq->srq_rlastupdate;
1635 srq->srq_rlastupdate = snaptime;
1636 if (srq->srq_rcnt != 0) {
1637 srq->srq_rlentime += delta * srq->srq_rcnt;
1638 srq->srq_rtime += delta;
1639 }
1640 kd->ku_rlentime = srq->srq_rlentime;
1641 kd->ku_rtime = srq->srq_rtime;
1642 kd->ku_wlentime = srq->srq_wlentime;
1643 kd->ku_wtime = srq->srq_wtime;
1644 mutex_exit(&srq->srq_mutex);
1645 scalehrtime(&kd->ku_rlentime);
1646 scalehrtime(&kd->ku_rtime);
1647 scalehrtime(&kd->ku_wlentime);
1648 scalehrtime(&kd->ku_wtime);
1649 }
1650
1651 void
smb_threshold_init(smb_cmd_threshold_t * ct,char * cmd,uint_t threshold,uint_t timeout)1652 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd,
1653 uint_t threshold, uint_t timeout)
1654 {
1655 bzero(ct, sizeof (smb_cmd_threshold_t));
1656 mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL);
1657 cv_init(&ct->ct_cond, NULL, CV_DEFAULT, NULL);
1658
1659 ct->ct_cmd = cmd;
1660 ct->ct_threshold = threshold;
1661 ct->ct_timeout = timeout;
1662 }
1663
1664 void
smb_threshold_fini(smb_cmd_threshold_t * ct)1665 smb_threshold_fini(smb_cmd_threshold_t *ct)
1666 {
1667 cv_destroy(&ct->ct_cond);
1668 mutex_destroy(&ct->ct_mutex);
1669 }
1670
1671 /*
1672 * This threshold mechanism is used to limit the number of simultaneous
1673 * named pipe connections, concurrent authentication conversations, etc.
1674 * Requests that would take us over the threshold wait until either the
1675 * resources are available (return zero) or timeout (return error).
1676 */
1677 int
smb_threshold_enter(smb_cmd_threshold_t * ct)1678 smb_threshold_enter(smb_cmd_threshold_t *ct)
1679 {
1680 clock_t time, rem;
1681
1682 time = MSEC_TO_TICK(ct->ct_timeout) + ddi_get_lbolt();
1683 mutex_enter(&ct->ct_mutex);
1684
1685 while (ct->ct_threshold != 0 &&
1686 ct->ct_threshold <= ct->ct_active_cnt) {
1687 ct->ct_blocked_cnt++;
1688 rem = cv_timedwait(&ct->ct_cond, &ct->ct_mutex, time);
1689 ct->ct_blocked_cnt--;
1690 if (rem < 0) {
1691 mutex_exit(&ct->ct_mutex);
1692 return (ETIME);
1693 }
1694 }
1695 if (ct->ct_threshold == 0) {
1696 mutex_exit(&ct->ct_mutex);
1697 return (ECANCELED);
1698 }
1699
1700 ASSERT3U(ct->ct_active_cnt, <, ct->ct_threshold);
1701 ct->ct_active_cnt++;
1702
1703 mutex_exit(&ct->ct_mutex);
1704 return (0);
1705 }
1706
1707 void
smb_threshold_exit(smb_cmd_threshold_t * ct)1708 smb_threshold_exit(smb_cmd_threshold_t *ct)
1709 {
1710 mutex_enter(&ct->ct_mutex);
1711 ASSERT3U(ct->ct_active_cnt, >, 0);
1712 ct->ct_active_cnt--;
1713 if (ct->ct_blocked_cnt)
1714 cv_signal(&ct->ct_cond);
1715 mutex_exit(&ct->ct_mutex);
1716 }
1717
1718 void
smb_threshold_wake_all(smb_cmd_threshold_t * ct)1719 smb_threshold_wake_all(smb_cmd_threshold_t *ct)
1720 {
1721 mutex_enter(&ct->ct_mutex);
1722 ct->ct_threshold = 0;
1723 cv_broadcast(&ct->ct_cond);
1724 mutex_exit(&ct->ct_mutex);
1725 }
1726