1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/types.h> 29 #include <sys/tzfile.h> 30 #include <sys/atomic.h> 31 #include <sys/time.h> 32 #include <sys/spl.h> 33 #include <sys/random.h> 34 #include <smbsrv/smb_kproto.h> 35 #include <smbsrv/smb_fsops.h> 36 #include <smbsrv/smbinfo.h> 37 #include <smbsrv/smb_xdr.h> 38 #include <smbsrv/smb_vops.h> 39 #include <smbsrv/smb_idmap.h> 40 41 #include <sys/sid.h> 42 #include <sys/priv_names.h> 43 44 static kmem_cache_t *smb_dtor_cache = NULL; 45 46 static boolean_t smb_avl_hold(smb_avl_t *); 47 static void smb_avl_rele(smb_avl_t *); 48 49 time_t tzh_leapcnt = 0; 50 51 struct tm 52 *smb_gmtime_r(time_t *clock, struct tm *result); 53 54 time_t 55 smb_timegm(struct tm *tm); 56 57 struct tm { 58 int tm_sec; 59 int tm_min; 60 int tm_hour; 61 int tm_mday; 62 int tm_mon; 63 int tm_year; 64 int tm_wday; 65 int tm_yday; 66 int tm_isdst; 67 }; 68 69 static const int days_in_month[] = { 70 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 71 }; 72 73 int 74 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str) 75 { 76 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 77 return (smb_wcequiv_strlen(str)); 78 return (strlen(str)); 79 } 80 81 int 82 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str) 83 { 84 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 85 return (smb_wcequiv_strlen(str) + 2); 86 return (strlen(str) + 1); 87 } 88 89 int 90 smb_ascii_or_unicode_null_len(struct smb_request *sr) 91 { 92 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 93 return (2); 94 return (1); 95 } 96 97 /* 98 * 99 * Convert old-style (DOS, LanMan) wildcard strings to NT style. 100 * This should ONLY happen to patterns that come from old clients, 101 * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12). 102 * 103 * ? is converted to > 104 * * is converted to < if it is followed by . 105 * . is converted to " if it is followed by ? or * or end of pattern 106 * 107 * Note: modifies pattern in place. 108 */ 109 void 110 smb_convert_wildcards(char *pattern) 111 { 112 char *p; 113 114 for (p = pattern; *p != '\0'; p++) { 115 switch (*p) { 116 case '?': 117 *p = '>'; 118 break; 119 case '*': 120 if (p[1] == '.') 121 *p = '<'; 122 break; 123 case '.': 124 if (p[1] == '?' || p[1] == '*' || p[1] == '\0') 125 *p = '\"'; 126 break; 127 } 128 } 129 } 130 131 /* 132 * smb_sattr_check 133 * 134 * Check file attributes against a search attribute (sattr) mask. 135 * 136 * Normal files, which includes READONLY and ARCHIVE, always pass 137 * this check. If the DIRECTORY, HIDDEN or SYSTEM special attributes 138 * are set then they must appear in the search mask. The special 139 * attributes are inclusive, i.e. all special attributes that appear 140 * in sattr must also appear in the file attributes for the check to 141 * pass. 142 * 143 * The following examples show how this works: 144 * 145 * fileA: READONLY 146 * fileB: 0 (no attributes = normal file) 147 * fileC: READONLY, ARCHIVE 148 * fileD: HIDDEN 149 * fileE: READONLY, HIDDEN, SYSTEM 150 * dirA: DIRECTORY 151 * 152 * search attribute: 0 153 * Returns: fileA, fileB and fileC. 154 * search attribute: HIDDEN 155 * Returns: fileA, fileB, fileC and fileD. 156 * search attribute: SYSTEM 157 * Returns: fileA, fileB and fileC. 158 * search attribute: DIRECTORY 159 * Returns: fileA, fileB, fileC and dirA. 160 * search attribute: HIDDEN and SYSTEM 161 * Returns: fileA, fileB, fileC, fileD and fileE. 162 * 163 * Returns true if the file and sattr match; otherwise, returns false. 164 */ 165 boolean_t 166 smb_sattr_check(uint16_t dosattr, uint16_t sattr) 167 { 168 if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) && 169 !(sattr & FILE_ATTRIBUTE_DIRECTORY)) 170 return (B_FALSE); 171 172 if ((dosattr & FILE_ATTRIBUTE_HIDDEN) && 173 !(sattr & FILE_ATTRIBUTE_HIDDEN)) 174 return (B_FALSE); 175 176 if ((dosattr & FILE_ATTRIBUTE_SYSTEM) && 177 !(sattr & FILE_ATTRIBUTE_SYSTEM)) 178 return (B_FALSE); 179 180 return (B_TRUE); 181 } 182 183 int 184 microtime(timestruc_t *tvp) 185 { 186 tvp->tv_sec = gethrestime_sec(); 187 tvp->tv_nsec = 0; 188 return (0); 189 } 190 191 int32_t 192 clock_get_milli_uptime() 193 { 194 return (TICK_TO_MSEC(ddi_get_lbolt())); 195 } 196 197 /* 198 * smb_idpool_increment 199 * 200 * This function increments the ID pool by doubling the current size. This 201 * function assumes the caller entered the mutex of the pool. 202 */ 203 static int 204 smb_idpool_increment( 205 smb_idpool_t *pool) 206 { 207 uint8_t *new_pool; 208 uint32_t new_size; 209 210 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 211 212 new_size = pool->id_size * 2; 213 if (new_size <= SMB_IDPOOL_MAX_SIZE) { 214 new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP); 215 if (new_pool) { 216 bzero(new_pool, new_size / 8); 217 bcopy(pool->id_pool, new_pool, pool->id_size / 8); 218 kmem_free(pool->id_pool, pool->id_size / 8); 219 pool->id_pool = new_pool; 220 pool->id_free_counter += new_size - pool->id_size; 221 pool->id_max_free_counter += new_size - pool->id_size; 222 pool->id_size = new_size; 223 pool->id_idx_msk = (new_size / 8) - 1; 224 if (new_size >= SMB_IDPOOL_MAX_SIZE) { 225 /* id -1 made unavailable */ 226 pool->id_pool[pool->id_idx_msk] = 0x80; 227 pool->id_free_counter--; 228 pool->id_max_free_counter--; 229 } 230 return (0); 231 } 232 } 233 return (-1); 234 } 235 236 /* 237 * smb_idpool_constructor 238 * 239 * This function initializes the pool structure provided. 240 */ 241 int 242 smb_idpool_constructor( 243 smb_idpool_t *pool) 244 { 245 246 ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC); 247 248 pool->id_size = SMB_IDPOOL_MIN_SIZE; 249 pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1; 250 pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1; 251 pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1; 252 pool->id_bit = 0x02; 253 pool->id_bit_idx = 1; 254 pool->id_idx = 0; 255 pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8), 256 KM_SLEEP); 257 bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8)); 258 /* -1 id made unavailable */ 259 pool->id_pool[0] = 0x01; /* id 0 made unavailable */ 260 mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL); 261 pool->id_magic = SMB_IDPOOL_MAGIC; 262 return (0); 263 } 264 265 /* 266 * smb_idpool_destructor 267 * 268 * This function tears down and frees the resources associated with the 269 * pool provided. 270 */ 271 void 272 smb_idpool_destructor( 273 smb_idpool_t *pool) 274 { 275 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 276 ASSERT(pool->id_free_counter == pool->id_max_free_counter); 277 pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC; 278 mutex_destroy(&pool->id_mutex); 279 kmem_free(pool->id_pool, (size_t)(pool->id_size / 8)); 280 } 281 282 /* 283 * smb_idpool_alloc 284 * 285 * This function allocates an ID from the pool provided. 286 */ 287 int 288 smb_idpool_alloc( 289 smb_idpool_t *pool, 290 uint16_t *id) 291 { 292 uint32_t i; 293 uint8_t bit; 294 uint8_t bit_idx; 295 uint8_t byte; 296 297 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 298 299 mutex_enter(&pool->id_mutex); 300 if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) { 301 mutex_exit(&pool->id_mutex); 302 return (-1); 303 } 304 305 i = pool->id_size; 306 while (i) { 307 bit = pool->id_bit; 308 bit_idx = pool->id_bit_idx; 309 byte = pool->id_pool[pool->id_idx]; 310 while (bit) { 311 if (byte & bit) { 312 bit = bit << 1; 313 bit_idx++; 314 continue; 315 } 316 pool->id_pool[pool->id_idx] |= bit; 317 *id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx); 318 pool->id_free_counter--; 319 pool->id_bit = bit; 320 pool->id_bit_idx = bit_idx; 321 mutex_exit(&pool->id_mutex); 322 return (0); 323 } 324 pool->id_bit = 1; 325 pool->id_bit_idx = 0; 326 pool->id_idx++; 327 pool->id_idx &= pool->id_idx_msk; 328 --i; 329 } 330 /* 331 * This section of code shouldn't be reached. If there are IDs 332 * available and none could be found there's a problem. 333 */ 334 ASSERT(0); 335 mutex_exit(&pool->id_mutex); 336 return (-1); 337 } 338 339 /* 340 * smb_idpool_free 341 * 342 * This function frees the ID provided. 343 */ 344 void 345 smb_idpool_free( 346 smb_idpool_t *pool, 347 uint16_t id) 348 { 349 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 350 ASSERT(id != 0); 351 ASSERT(id != 0xFFFF); 352 353 mutex_enter(&pool->id_mutex); 354 if (pool->id_pool[id >> 3] & (1 << (id & 7))) { 355 pool->id_pool[id >> 3] &= ~(1 << (id & 7)); 356 pool->id_free_counter++; 357 ASSERT(pool->id_free_counter <= pool->id_max_free_counter); 358 mutex_exit(&pool->id_mutex); 359 return; 360 } 361 /* Freeing a free ID. */ 362 ASSERT(0); 363 mutex_exit(&pool->id_mutex); 364 } 365 366 /* 367 * Initialize the llist delete queue object cache. 368 */ 369 void 370 smb_llist_init(void) 371 { 372 if (smb_dtor_cache != NULL) 373 return; 374 375 smb_dtor_cache = kmem_cache_create("smb_dtor_cache", 376 sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0); 377 } 378 379 /* 380 * Destroy the llist delete queue object cache. 381 */ 382 void 383 smb_llist_fini(void) 384 { 385 if (smb_dtor_cache != NULL) { 386 kmem_cache_destroy(smb_dtor_cache); 387 smb_dtor_cache = NULL; 388 } 389 } 390 391 /* 392 * smb_llist_constructor 393 * 394 * This function initializes a locked list. 395 */ 396 void 397 smb_llist_constructor( 398 smb_llist_t *ll, 399 size_t size, 400 size_t offset) 401 { 402 rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL); 403 mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL); 404 list_create(&ll->ll_list, size, offset); 405 list_create(&ll->ll_deleteq, sizeof (smb_dtor_t), 406 offsetof(smb_dtor_t, dt_lnd)); 407 ll->ll_count = 0; 408 ll->ll_wrop = 0; 409 ll->ll_deleteq_count = 0; 410 ll->ll_flushing = B_FALSE; 411 } 412 413 /* 414 * Flush the delete queue and destroy a locked list. 415 */ 416 void 417 smb_llist_destructor( 418 smb_llist_t *ll) 419 { 420 smb_llist_flush(ll); 421 422 ASSERT(ll->ll_count == 0); 423 ASSERT(ll->ll_deleteq_count == 0); 424 425 rw_destroy(&ll->ll_lock); 426 list_destroy(&ll->ll_list); 427 list_destroy(&ll->ll_deleteq); 428 mutex_destroy(&ll->ll_mutex); 429 } 430 431 /* 432 * Post an object to the delete queue. The delete queue will be processed 433 * during list exit or list destruction. Objects are often posted for 434 * deletion during list iteration (while the list is locked) but that is 435 * not required, and an object can be posted at any time. 436 */ 437 void 438 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc) 439 { 440 smb_dtor_t *dtor; 441 442 ASSERT((object != NULL) && (dtorproc != NULL)); 443 444 dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP); 445 bzero(dtor, sizeof (smb_dtor_t)); 446 dtor->dt_magic = SMB_DTOR_MAGIC; 447 dtor->dt_object = object; 448 dtor->dt_proc = dtorproc; 449 450 mutex_enter(&ll->ll_mutex); 451 list_insert_tail(&ll->ll_deleteq, dtor); 452 ++ll->ll_deleteq_count; 453 mutex_exit(&ll->ll_mutex); 454 } 455 456 /* 457 * Exit the list lock and process the delete queue. 458 */ 459 void 460 smb_llist_exit(smb_llist_t *ll) 461 { 462 rw_exit(&ll->ll_lock); 463 smb_llist_flush(ll); 464 } 465 466 /* 467 * Flush the list delete queue. The mutex is dropped across the destructor 468 * call in case this leads to additional objects being posted to the delete 469 * queue. 470 */ 471 void 472 smb_llist_flush(smb_llist_t *ll) 473 { 474 smb_dtor_t *dtor; 475 476 mutex_enter(&ll->ll_mutex); 477 if (ll->ll_flushing) { 478 mutex_exit(&ll->ll_mutex); 479 return; 480 } 481 ll->ll_flushing = B_TRUE; 482 483 dtor = list_head(&ll->ll_deleteq); 484 while (dtor != NULL) { 485 SMB_DTOR_VALID(dtor); 486 ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL)); 487 list_remove(&ll->ll_deleteq, dtor); 488 --ll->ll_deleteq_count; 489 mutex_exit(&ll->ll_mutex); 490 491 dtor->dt_proc(dtor->dt_object); 492 493 dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC; 494 kmem_cache_free(smb_dtor_cache, dtor); 495 mutex_enter(&ll->ll_mutex); 496 dtor = list_head(&ll->ll_deleteq); 497 } 498 ll->ll_flushing = B_FALSE; 499 500 mutex_exit(&ll->ll_mutex); 501 } 502 503 /* 504 * smb_llist_upgrade 505 * 506 * This function tries to upgrade the lock of the locked list. It assumes the 507 * locked has already been entered in RW_READER mode. It first tries using the 508 * Solaris function rw_tryupgrade(). If that call fails the lock is released 509 * and reentered in RW_WRITER mode. In that last case a window is opened during 510 * which the contents of the list may have changed. The return code indicates 511 * whether or not the list was modified when the lock was exited. 512 */ 513 int smb_llist_upgrade( 514 smb_llist_t *ll) 515 { 516 uint64_t wrop; 517 518 if (rw_tryupgrade(&ll->ll_lock) != 0) { 519 return (0); 520 } 521 wrop = ll->ll_wrop; 522 rw_exit(&ll->ll_lock); 523 rw_enter(&ll->ll_lock, RW_WRITER); 524 return (wrop != ll->ll_wrop); 525 } 526 527 /* 528 * smb_llist_insert_head 529 * 530 * This function inserts the object passed a the beginning of the list. This 531 * function assumes the lock of the list has already been entered. 532 */ 533 void 534 smb_llist_insert_head( 535 smb_llist_t *ll, 536 void *obj) 537 { 538 list_insert_head(&ll->ll_list, obj); 539 ++ll->ll_wrop; 540 ++ll->ll_count; 541 } 542 543 /* 544 * smb_llist_insert_tail 545 * 546 * This function appends to the object passed to the list. This function assumes 547 * the lock of the list has already been entered. 548 * 549 */ 550 void 551 smb_llist_insert_tail( 552 smb_llist_t *ll, 553 void *obj) 554 { 555 list_insert_tail(&ll->ll_list, obj); 556 ++ll->ll_wrop; 557 ++ll->ll_count; 558 } 559 560 /* 561 * smb_llist_remove 562 * 563 * This function removes the object passed from the list. This function assumes 564 * the lock of the list has already been entered. 565 */ 566 void 567 smb_llist_remove( 568 smb_llist_t *ll, 569 void *obj) 570 { 571 list_remove(&ll->ll_list, obj); 572 ++ll->ll_wrop; 573 --ll->ll_count; 574 } 575 576 /* 577 * smb_llist_get_count 578 * 579 * This function returns the number of elements in the specified list. 580 */ 581 uint32_t 582 smb_llist_get_count( 583 smb_llist_t *ll) 584 { 585 return (ll->ll_count); 586 } 587 588 /* 589 * smb_slist_constructor 590 * 591 * Synchronized list constructor. 592 */ 593 void 594 smb_slist_constructor( 595 smb_slist_t *sl, 596 size_t size, 597 size_t offset) 598 { 599 mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL); 600 cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL); 601 list_create(&sl->sl_list, size, offset); 602 sl->sl_count = 0; 603 sl->sl_waiting = B_FALSE; 604 } 605 606 /* 607 * smb_slist_destructor 608 * 609 * Synchronized list destructor. 610 */ 611 void 612 smb_slist_destructor( 613 smb_slist_t *sl) 614 { 615 VERIFY(sl->sl_count == 0); 616 617 mutex_destroy(&sl->sl_mutex); 618 cv_destroy(&sl->sl_cv); 619 list_destroy(&sl->sl_list); 620 } 621 622 /* 623 * smb_slist_insert_head 624 * 625 * This function inserts the object passed a the beginning of the list. 626 */ 627 void 628 smb_slist_insert_head( 629 smb_slist_t *sl, 630 void *obj) 631 { 632 mutex_enter(&sl->sl_mutex); 633 list_insert_head(&sl->sl_list, obj); 634 ++sl->sl_count; 635 mutex_exit(&sl->sl_mutex); 636 } 637 638 /* 639 * smb_slist_insert_tail 640 * 641 * This function appends the object passed to the list. 642 */ 643 void 644 smb_slist_insert_tail( 645 smb_slist_t *sl, 646 void *obj) 647 { 648 mutex_enter(&sl->sl_mutex); 649 list_insert_tail(&sl->sl_list, obj); 650 ++sl->sl_count; 651 mutex_exit(&sl->sl_mutex); 652 } 653 654 /* 655 * smb_llist_remove 656 * 657 * This function removes the object passed by the caller from the list. 658 */ 659 void 660 smb_slist_remove( 661 smb_slist_t *sl, 662 void *obj) 663 { 664 mutex_enter(&sl->sl_mutex); 665 list_remove(&sl->sl_list, obj); 666 if ((--sl->sl_count == 0) && (sl->sl_waiting)) { 667 sl->sl_waiting = B_FALSE; 668 cv_broadcast(&sl->sl_cv); 669 } 670 mutex_exit(&sl->sl_mutex); 671 } 672 673 /* 674 * smb_slist_move_tail 675 * 676 * This function transfers all the contents of the synchronized list to the 677 * list_t provided. It returns the number of objects transferred. 678 */ 679 uint32_t 680 smb_slist_move_tail( 681 list_t *lst, 682 smb_slist_t *sl) 683 { 684 uint32_t rv; 685 686 mutex_enter(&sl->sl_mutex); 687 rv = sl->sl_count; 688 if (sl->sl_count) { 689 list_move_tail(lst, &sl->sl_list); 690 sl->sl_count = 0; 691 if (sl->sl_waiting) { 692 sl->sl_waiting = B_FALSE; 693 cv_broadcast(&sl->sl_cv); 694 } 695 } 696 mutex_exit(&sl->sl_mutex); 697 return (rv); 698 } 699 700 /* 701 * smb_slist_obj_move 702 * 703 * This function moves an object from one list to the end of the other list. It 704 * assumes the mutex of each list has been entered. 705 */ 706 void 707 smb_slist_obj_move( 708 smb_slist_t *dst, 709 smb_slist_t *src, 710 void *obj) 711 { 712 ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset); 713 ASSERT(dst->sl_list.list_size == src->sl_list.list_size); 714 715 list_remove(&src->sl_list, obj); 716 list_insert_tail(&dst->sl_list, obj); 717 dst->sl_count++; 718 src->sl_count--; 719 if ((src->sl_count == 0) && (src->sl_waiting)) { 720 src->sl_waiting = B_FALSE; 721 cv_broadcast(&src->sl_cv); 722 } 723 } 724 725 /* 726 * smb_slist_wait_for_empty 727 * 728 * This function waits for a list to be emptied. 729 */ 730 void 731 smb_slist_wait_for_empty( 732 smb_slist_t *sl) 733 { 734 mutex_enter(&sl->sl_mutex); 735 while (sl->sl_count) { 736 sl->sl_waiting = B_TRUE; 737 cv_wait(&sl->sl_cv, &sl->sl_mutex); 738 } 739 mutex_exit(&sl->sl_mutex); 740 } 741 742 /* 743 * smb_slist_exit 744 * 745 * This function exits the muetx of the list and signal the condition variable 746 * if the list is empty. 747 */ 748 void 749 smb_slist_exit(smb_slist_t *sl) 750 { 751 if ((sl->sl_count == 0) && (sl->sl_waiting)) { 752 sl->sl_waiting = B_FALSE; 753 cv_broadcast(&sl->sl_cv); 754 } 755 mutex_exit(&sl->sl_mutex); 756 } 757 758 /* smb_thread_... moved to smb_thread.c */ 759 760 /* 761 * smb_rwx_init 762 */ 763 void 764 smb_rwx_init( 765 smb_rwx_t *rwx) 766 { 767 bzero(rwx, sizeof (smb_rwx_t)); 768 cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL); 769 mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL); 770 rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL); 771 } 772 773 /* 774 * smb_rwx_destroy 775 */ 776 void 777 smb_rwx_destroy( 778 smb_rwx_t *rwx) 779 { 780 mutex_destroy(&rwx->rwx_mutex); 781 cv_destroy(&rwx->rwx_cv); 782 rw_destroy(&rwx->rwx_lock); 783 } 784 785 /* 786 * smb_rwx_rwexit 787 */ 788 void 789 smb_rwx_rwexit( 790 smb_rwx_t *rwx) 791 { 792 if (rw_write_held(&rwx->rwx_lock)) { 793 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 794 mutex_enter(&rwx->rwx_mutex); 795 if (rwx->rwx_waiting) { 796 rwx->rwx_waiting = B_FALSE; 797 cv_broadcast(&rwx->rwx_cv); 798 } 799 mutex_exit(&rwx->rwx_mutex); 800 } 801 rw_exit(&rwx->rwx_lock); 802 } 803 804 /* 805 * smb_rwx_rwupgrade 806 */ 807 krw_t 808 smb_rwx_rwupgrade( 809 smb_rwx_t *rwx) 810 { 811 if (rw_write_held(&rwx->rwx_lock)) { 812 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 813 return (RW_WRITER); 814 } 815 if (!rw_tryupgrade(&rwx->rwx_lock)) { 816 rw_exit(&rwx->rwx_lock); 817 rw_enter(&rwx->rwx_lock, RW_WRITER); 818 } 819 return (RW_READER); 820 } 821 822 /* 823 * smb_rwx_rwrestore 824 */ 825 void 826 smb_rwx_rwdowngrade( 827 smb_rwx_t *rwx, 828 krw_t mode) 829 { 830 ASSERT(rw_write_held(&rwx->rwx_lock)); 831 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 832 833 if (mode == RW_WRITER) { 834 return; 835 } 836 ASSERT(mode == RW_READER); 837 mutex_enter(&rwx->rwx_mutex); 838 if (rwx->rwx_waiting) { 839 rwx->rwx_waiting = B_FALSE; 840 cv_broadcast(&rwx->rwx_cv); 841 } 842 mutex_exit(&rwx->rwx_mutex); 843 rw_downgrade(&rwx->rwx_lock); 844 } 845 846 /* 847 * smb_rwx_wait 848 * 849 * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER 850 * mode. It will: 851 * 852 * 1) release the lock and save its current mode. 853 * 2) wait until the condition variable is signaled. This can happen for 854 * 2 reasons: When a writer releases the lock or when the time out (if 855 * provided) expires. 856 * 3) re-acquire the lock in the mode saved in (1). 857 */ 858 int 859 smb_rwx_rwwait( 860 smb_rwx_t *rwx, 861 clock_t timeout) 862 { 863 krw_t mode; 864 int rc = 1; 865 866 mutex_enter(&rwx->rwx_mutex); 867 rwx->rwx_waiting = B_TRUE; 868 mutex_exit(&rwx->rwx_mutex); 869 870 if (rw_write_held(&rwx->rwx_lock)) { 871 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 872 mode = RW_WRITER; 873 } else { 874 ASSERT(rw_read_held(&rwx->rwx_lock)); 875 mode = RW_READER; 876 } 877 rw_exit(&rwx->rwx_lock); 878 879 mutex_enter(&rwx->rwx_mutex); 880 if (rwx->rwx_waiting) { 881 if (timeout == -1) { 882 cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex); 883 } else { 884 rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex, 885 timeout, TR_CLOCK_TICK); 886 } 887 } 888 mutex_exit(&rwx->rwx_mutex); 889 890 rw_enter(&rwx->rwx_lock, mode); 891 return (rc); 892 } 893 894 /* smb_idmap_... moved to smb_idmap.c */ 895 896 uint64_t 897 smb_time_unix_to_nt(timestruc_t *unix_time) 898 { 899 uint64_t nt_time; 900 901 if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0)) 902 return (0); 903 904 nt_time = unix_time->tv_sec; 905 nt_time *= 10000000; /* seconds to 100ns */ 906 nt_time += unix_time->tv_nsec / 100; 907 return (nt_time + NT_TIME_BIAS); 908 } 909 910 void 911 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time) 912 { 913 uint32_t seconds; 914 915 ASSERT(unix_time); 916 917 if ((nt_time == 0) || (nt_time == -1)) { 918 unix_time->tv_sec = 0; 919 unix_time->tv_nsec = 0; 920 return; 921 } 922 923 nt_time -= NT_TIME_BIAS; 924 seconds = nt_time / 10000000; 925 unix_time->tv_sec = seconds; 926 unix_time->tv_nsec = (nt_time % 10000000) * 100; 927 } 928 929 /* 930 * smb_time_gmt_to_local, smb_time_local_to_gmt 931 * 932 * Apply the gmt offset to convert between local time and gmt 933 */ 934 int32_t 935 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt) 936 { 937 if ((gmt == 0) || (gmt == -1)) 938 return (0); 939 940 return (gmt - sr->sr_gmtoff); 941 } 942 943 int32_t 944 smb_time_local_to_gmt(smb_request_t *sr, int32_t local) 945 { 946 if ((local == 0) || (local == -1)) 947 return (0); 948 949 return (local + sr->sr_gmtoff); 950 } 951 952 953 /* 954 * smb_time_dos_to_unix 955 * 956 * Convert SMB_DATE & SMB_TIME values to a unix timestamp. 957 * 958 * A date/time field of 0 means that that server file system 959 * assigned value need not be changed. The behaviour when the 960 * date/time field is set to -1 is not documented but is 961 * generally treated like 0. 962 * If date or time is 0 or -1 the unix time is returned as 0 963 * so that the caller can identify and handle this special case. 964 */ 965 int32_t 966 smb_time_dos_to_unix(int16_t date, int16_t time) 967 { 968 struct tm atm; 969 970 if (((date == 0) || (time == 0)) || 971 ((date == -1) || (time == -1))) { 972 return (0); 973 } 974 975 atm.tm_year = ((date >> 9) & 0x3F) + 80; 976 atm.tm_mon = ((date >> 5) & 0x0F) - 1; 977 atm.tm_mday = ((date >> 0) & 0x1F); 978 atm.tm_hour = ((time >> 11) & 0x1F); 979 atm.tm_min = ((time >> 5) & 0x3F); 980 atm.tm_sec = ((time >> 0) & 0x1F) << 1; 981 982 return (smb_timegm(&atm)); 983 } 984 985 void 986 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p) 987 { 988 struct tm atm; 989 int i; 990 time_t tmp_time; 991 992 if (ux_time == 0) { 993 *date_p = 0; 994 *time_p = 0; 995 return; 996 } 997 998 tmp_time = (time_t)ux_time; 999 (void) smb_gmtime_r(&tmp_time, &atm); 1000 1001 if (date_p) { 1002 i = 0; 1003 i += atm.tm_year - 80; 1004 i <<= 4; 1005 i += atm.tm_mon + 1; 1006 i <<= 5; 1007 i += atm.tm_mday; 1008 1009 *date_p = (short)i; 1010 } 1011 if (time_p) { 1012 i = 0; 1013 i += atm.tm_hour; 1014 i <<= 6; 1015 i += atm.tm_min; 1016 i <<= 5; 1017 i += atm.tm_sec >> 1; 1018 1019 *time_p = (short)i; 1020 } 1021 } 1022 1023 1024 /* 1025 * smb_gmtime_r 1026 * 1027 * Thread-safe version of smb_gmtime. Returns a null pointer if either 1028 * input parameter is a null pointer. Otherwise returns a pointer 1029 * to result. 1030 * 1031 * Day of the week calculation: the Epoch was a thursday. 1032 * 1033 * There are no timezone corrections so tm_isdst and tm_gmtoff are 1034 * always zero, and the zone is always WET. 1035 */ 1036 struct tm * 1037 smb_gmtime_r(time_t *clock, struct tm *result) 1038 { 1039 time_t tsec; 1040 int year; 1041 int month; 1042 int sec_per_month; 1043 1044 if (clock == 0 || result == 0) 1045 return (0); 1046 1047 bzero(result, sizeof (struct tm)); 1048 tsec = *clock; 1049 tsec -= tzh_leapcnt; 1050 1051 result->tm_wday = tsec / SECSPERDAY; 1052 result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK; 1053 1054 year = EPOCH_YEAR; 1055 while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) : 1056 (SECSPERDAY * DAYSPERNYEAR))) { 1057 if (isleap(year)) 1058 tsec -= SECSPERDAY * DAYSPERLYEAR; 1059 else 1060 tsec -= SECSPERDAY * DAYSPERNYEAR; 1061 1062 ++year; 1063 } 1064 1065 result->tm_year = year - TM_YEAR_BASE; 1066 result->tm_yday = tsec / SECSPERDAY; 1067 1068 for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) { 1069 sec_per_month = days_in_month[month] * SECSPERDAY; 1070 1071 if (month == TM_FEBRUARY && isleap(year)) 1072 sec_per_month += SECSPERDAY; 1073 1074 if (tsec < sec_per_month) 1075 break; 1076 1077 tsec -= sec_per_month; 1078 } 1079 1080 result->tm_mon = month; 1081 result->tm_mday = (tsec / SECSPERDAY) + 1; 1082 tsec %= SECSPERDAY; 1083 result->tm_sec = tsec % 60; 1084 tsec /= 60; 1085 result->tm_min = tsec % 60; 1086 tsec /= 60; 1087 result->tm_hour = (int)tsec; 1088 1089 return (result); 1090 } 1091 1092 1093 /* 1094 * smb_timegm 1095 * 1096 * Converts the broken-down time in tm to a time value, i.e. the number 1097 * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is 1098 * not a POSIX or ANSI function. Per the man page, the input values of 1099 * tm_wday and tm_yday are ignored and, as the input data is assumed to 1100 * represent GMT, we force tm_isdst and tm_gmtoff to 0. 1101 * 1102 * Before returning the clock time, we use smb_gmtime_r to set up tm_wday 1103 * and tm_yday, and bring the other fields within normal range. I don't 1104 * think this is really how it should be done but it's convenient for 1105 * now. 1106 */ 1107 time_t 1108 smb_timegm(struct tm *tm) 1109 { 1110 time_t tsec; 1111 int dd; 1112 int mm; 1113 int yy; 1114 int year; 1115 1116 if (tm == 0) 1117 return (-1); 1118 1119 year = tm->tm_year + TM_YEAR_BASE; 1120 tsec = tzh_leapcnt; 1121 1122 for (yy = EPOCH_YEAR; yy < year; ++yy) { 1123 if (isleap(yy)) 1124 tsec += SECSPERDAY * DAYSPERLYEAR; 1125 else 1126 tsec += SECSPERDAY * DAYSPERNYEAR; 1127 } 1128 1129 for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) { 1130 dd = days_in_month[mm] * SECSPERDAY; 1131 1132 if (mm == TM_FEBRUARY && isleap(year)) 1133 dd += SECSPERDAY; 1134 1135 tsec += dd; 1136 } 1137 1138 tsec += (tm->tm_mday - 1) * SECSPERDAY; 1139 tsec += tm->tm_sec; 1140 tsec += tm->tm_min * SECSPERMIN; 1141 tsec += tm->tm_hour * SECSPERHOUR; 1142 1143 tm->tm_isdst = 0; 1144 (void) smb_gmtime_r(&tsec, tm); 1145 return (tsec); 1146 } 1147 1148 /* 1149 * smb_pad_align 1150 * 1151 * Returns the number of bytes required to pad an offset to the 1152 * specified alignment. 1153 */ 1154 uint32_t 1155 smb_pad_align(uint32_t offset, uint32_t align) 1156 { 1157 uint32_t pad = offset % align; 1158 1159 if (pad != 0) 1160 pad = align - pad; 1161 1162 return (pad); 1163 } 1164 1165 /* 1166 * smb_panic 1167 * 1168 * Logs the file name, function name and line number passed in and panics the 1169 * system. 1170 */ 1171 void 1172 smb_panic(char *file, const char *func, int line) 1173 { 1174 cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line); 1175 } 1176 1177 /* 1178 * Creates an AVL tree and initializes the given smb_avl_t 1179 * structure using the passed args 1180 */ 1181 void 1182 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset, 1183 const smb_avl_nops_t *ops) 1184 { 1185 ASSERT(avl); 1186 ASSERT(ops); 1187 1188 rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL); 1189 mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL); 1190 1191 avl->avl_nops = ops; 1192 avl->avl_state = SMB_AVL_STATE_READY; 1193 avl->avl_refcnt = 0; 1194 (void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence, 1195 sizeof (uint32_t)); 1196 1197 avl_create(&avl->avl_tree, ops->avln_cmp, size, offset); 1198 } 1199 1200 /* 1201 * Destroys the specified AVL tree. 1202 * It waits for all the in-flight operations to finish 1203 * before destroying the AVL. 1204 */ 1205 void 1206 smb_avl_destroy(smb_avl_t *avl) 1207 { 1208 void *cookie = NULL; 1209 void *node; 1210 1211 ASSERT(avl); 1212 1213 mutex_enter(&avl->avl_mutex); 1214 if (avl->avl_state != SMB_AVL_STATE_READY) { 1215 mutex_exit(&avl->avl_mutex); 1216 return; 1217 } 1218 1219 avl->avl_state = SMB_AVL_STATE_DESTROYING; 1220 1221 while (avl->avl_refcnt > 0) 1222 (void) cv_wait(&avl->avl_cv, &avl->avl_mutex); 1223 mutex_exit(&avl->avl_mutex); 1224 1225 rw_enter(&avl->avl_lock, RW_WRITER); 1226 while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL) 1227 avl->avl_nops->avln_destroy(node); 1228 1229 avl_destroy(&avl->avl_tree); 1230 rw_exit(&avl->avl_lock); 1231 1232 rw_destroy(&avl->avl_lock); 1233 1234 mutex_destroy(&avl->avl_mutex); 1235 bzero(avl, sizeof (smb_avl_t)); 1236 } 1237 1238 /* 1239 * Adds the given item to the AVL if it's 1240 * not already there. 1241 * 1242 * Returns: 1243 * 1244 * ENOTACTIVE AVL is not in READY state 1245 * EEXIST The item is already in AVL 1246 */ 1247 int 1248 smb_avl_add(smb_avl_t *avl, void *item) 1249 { 1250 avl_index_t where; 1251 1252 ASSERT(avl); 1253 ASSERT(item); 1254 1255 if (!smb_avl_hold(avl)) 1256 return (ENOTACTIVE); 1257 1258 rw_enter(&avl->avl_lock, RW_WRITER); 1259 if (avl_find(&avl->avl_tree, item, &where) != NULL) { 1260 rw_exit(&avl->avl_lock); 1261 smb_avl_rele(avl); 1262 return (EEXIST); 1263 } 1264 1265 avl_insert(&avl->avl_tree, item, where); 1266 avl->avl_sequence++; 1267 rw_exit(&avl->avl_lock); 1268 1269 smb_avl_rele(avl); 1270 return (0); 1271 } 1272 1273 /* 1274 * Removes the given item from the AVL. 1275 * If no reference is left on the item 1276 * it will also be destroyed by calling the 1277 * registered destroy operation. 1278 */ 1279 void 1280 smb_avl_remove(smb_avl_t *avl, void *item) 1281 { 1282 avl_index_t where; 1283 void *rm_item; 1284 1285 ASSERT(avl); 1286 ASSERT(item); 1287 1288 if (!smb_avl_hold(avl)) 1289 return; 1290 1291 rw_enter(&avl->avl_lock, RW_WRITER); 1292 if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) { 1293 rw_exit(&avl->avl_lock); 1294 smb_avl_rele(avl); 1295 return; 1296 } 1297 1298 avl_remove(&avl->avl_tree, rm_item); 1299 if (avl->avl_nops->avln_rele(rm_item)) 1300 avl->avl_nops->avln_destroy(rm_item); 1301 avl->avl_sequence++; 1302 rw_exit(&avl->avl_lock); 1303 1304 smb_avl_rele(avl); 1305 } 1306 1307 /* 1308 * Looks up the AVL for the given item. 1309 * If the item is found a hold on the object 1310 * is taken before the pointer to it is 1311 * returned to the caller. The caller MUST 1312 * always call smb_avl_release() after it's done 1313 * using the returned object to release the hold 1314 * taken on the object. 1315 */ 1316 void * 1317 smb_avl_lookup(smb_avl_t *avl, void *item) 1318 { 1319 void *node = NULL; 1320 1321 ASSERT(avl); 1322 ASSERT(item); 1323 1324 if (!smb_avl_hold(avl)) 1325 return (NULL); 1326 1327 rw_enter(&avl->avl_lock, RW_READER); 1328 node = avl_find(&avl->avl_tree, item, NULL); 1329 if (node != NULL) 1330 avl->avl_nops->avln_hold(node); 1331 rw_exit(&avl->avl_lock); 1332 1333 if (node == NULL) 1334 smb_avl_rele(avl); 1335 1336 return (node); 1337 } 1338 1339 /* 1340 * The hold on the given object is released. 1341 * This function MUST always be called after 1342 * smb_avl_lookup() and smb_avl_iterate() for 1343 * the returned object. 1344 * 1345 * If AVL is in DESTROYING state, the destroying 1346 * thread will be notified. 1347 */ 1348 void 1349 smb_avl_release(smb_avl_t *avl, void *item) 1350 { 1351 ASSERT(avl); 1352 ASSERT(item); 1353 1354 if (avl->avl_nops->avln_rele(item)) 1355 avl->avl_nops->avln_destroy(item); 1356 1357 smb_avl_rele(avl); 1358 } 1359 1360 /* 1361 * Initializes the given cursor for the AVL. 1362 * The cursor will be used to iterate through the AVL 1363 */ 1364 void 1365 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor) 1366 { 1367 ASSERT(avl); 1368 ASSERT(cursor); 1369 1370 cursor->avlc_next = NULL; 1371 cursor->avlc_sequence = avl->avl_sequence; 1372 } 1373 1374 /* 1375 * Iterates through the AVL using the given cursor. 1376 * It always starts at the beginning and then returns 1377 * a pointer to the next object on each subsequent call. 1378 * 1379 * If a new object is added to or removed from the AVL 1380 * between two calls to this function, the iteration 1381 * will terminate prematurely. 1382 * 1383 * The caller MUST always call smb_avl_release() after it's 1384 * done using the returned object to release the hold taken 1385 * on the object. 1386 */ 1387 void * 1388 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor) 1389 { 1390 void *node; 1391 1392 ASSERT(avl); 1393 ASSERT(cursor); 1394 1395 if (!smb_avl_hold(avl)) 1396 return (NULL); 1397 1398 rw_enter(&avl->avl_lock, RW_READER); 1399 if (cursor->avlc_sequence != avl->avl_sequence) { 1400 rw_exit(&avl->avl_lock); 1401 smb_avl_rele(avl); 1402 return (NULL); 1403 } 1404 1405 if (cursor->avlc_next == NULL) 1406 node = avl_first(&avl->avl_tree); 1407 else 1408 node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next); 1409 1410 if (node != NULL) 1411 avl->avl_nops->avln_hold(node); 1412 1413 cursor->avlc_next = node; 1414 rw_exit(&avl->avl_lock); 1415 1416 if (node == NULL) 1417 smb_avl_rele(avl); 1418 1419 return (node); 1420 } 1421 1422 /* 1423 * Increments the AVL reference count in order to 1424 * prevent the avl from being destroyed while it's 1425 * being accessed. 1426 */ 1427 static boolean_t 1428 smb_avl_hold(smb_avl_t *avl) 1429 { 1430 mutex_enter(&avl->avl_mutex); 1431 if (avl->avl_state != SMB_AVL_STATE_READY) { 1432 mutex_exit(&avl->avl_mutex); 1433 return (B_FALSE); 1434 } 1435 avl->avl_refcnt++; 1436 mutex_exit(&avl->avl_mutex); 1437 1438 return (B_TRUE); 1439 } 1440 1441 /* 1442 * Decrements the AVL reference count to release the 1443 * hold. If another thread is trying to destroy the 1444 * AVL and is waiting for the reference count to become 1445 * 0, it is signaled to wake up. 1446 */ 1447 static void 1448 smb_avl_rele(smb_avl_t *avl) 1449 { 1450 mutex_enter(&avl->avl_mutex); 1451 ASSERT(avl->avl_refcnt > 0); 1452 avl->avl_refcnt--; 1453 if (avl->avl_state == SMB_AVL_STATE_DESTROYING) 1454 cv_broadcast(&avl->avl_cv); 1455 mutex_exit(&avl->avl_mutex); 1456 } 1457 1458 /* 1459 * smb_latency_init 1460 */ 1461 void 1462 smb_latency_init(smb_latency_t *lat) 1463 { 1464 bzero(lat, sizeof (*lat)); 1465 mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7)); 1466 } 1467 1468 /* 1469 * smb_latency_destroy 1470 */ 1471 void 1472 smb_latency_destroy(smb_latency_t *lat) 1473 { 1474 mutex_destroy(&lat->ly_mutex); 1475 } 1476 1477 /* 1478 * smb_latency_add_sample 1479 * 1480 * Uses the new sample to calculate the new mean and standard deviation. The 1481 * sample must be a scaled value. 1482 */ 1483 void 1484 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample) 1485 { 1486 hrtime_t a_mean; 1487 hrtime_t d_mean; 1488 1489 mutex_enter(&lat->ly_mutex); 1490 lat->ly_a_nreq++; 1491 lat->ly_a_sum += sample; 1492 if (lat->ly_a_nreq != 0) { 1493 a_mean = lat->ly_a_sum / lat->ly_a_nreq; 1494 lat->ly_a_stddev = 1495 (sample - a_mean) * (sample - lat->ly_a_mean); 1496 lat->ly_a_mean = a_mean; 1497 } 1498 lat->ly_d_nreq++; 1499 lat->ly_d_sum += sample; 1500 if (lat->ly_d_nreq != 0) { 1501 d_mean = lat->ly_d_sum / lat->ly_d_nreq; 1502 lat->ly_d_stddev = 1503 (sample - d_mean) * (sample - lat->ly_d_mean); 1504 lat->ly_d_mean = d_mean; 1505 } 1506 mutex_exit(&lat->ly_mutex); 1507 } 1508 1509 /* 1510 * smb_srqueue_init 1511 */ 1512 void 1513 smb_srqueue_init(smb_srqueue_t *srq) 1514 { 1515 bzero(srq, sizeof (*srq)); 1516 mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7)); 1517 srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled(); 1518 } 1519 1520 /* 1521 * smb_srqueue_destroy 1522 */ 1523 void 1524 smb_srqueue_destroy(smb_srqueue_t *srq) 1525 { 1526 mutex_destroy(&srq->srq_mutex); 1527 } 1528 1529 /* 1530 * smb_srqueue_waitq_enter 1531 */ 1532 void 1533 smb_srqueue_waitq_enter(smb_srqueue_t *srq) 1534 { 1535 hrtime_t new; 1536 hrtime_t delta; 1537 uint32_t wcnt; 1538 1539 mutex_enter(&srq->srq_mutex); 1540 new = gethrtime_unscaled(); 1541 delta = new - srq->srq_wlastupdate; 1542 srq->srq_wlastupdate = new; 1543 wcnt = srq->srq_wcnt++; 1544 if (wcnt != 0) { 1545 srq->srq_wlentime += delta * wcnt; 1546 srq->srq_wtime += delta; 1547 } 1548 mutex_exit(&srq->srq_mutex); 1549 } 1550 1551 /* 1552 * smb_srqueue_runq_exit 1553 */ 1554 void 1555 smb_srqueue_runq_exit(smb_srqueue_t *srq) 1556 { 1557 hrtime_t new; 1558 hrtime_t delta; 1559 uint32_t rcnt; 1560 1561 mutex_enter(&srq->srq_mutex); 1562 new = gethrtime_unscaled(); 1563 delta = new - srq->srq_rlastupdate; 1564 srq->srq_rlastupdate = new; 1565 rcnt = srq->srq_rcnt--; 1566 ASSERT(rcnt > 0); 1567 srq->srq_rlentime += delta * rcnt; 1568 srq->srq_rtime += delta; 1569 mutex_exit(&srq->srq_mutex); 1570 } 1571 1572 /* 1573 * smb_srqueue_waitq_to_runq 1574 */ 1575 void 1576 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq) 1577 { 1578 hrtime_t new; 1579 hrtime_t delta; 1580 uint32_t wcnt; 1581 uint32_t rcnt; 1582 1583 mutex_enter(&srq->srq_mutex); 1584 new = gethrtime_unscaled(); 1585 delta = new - srq->srq_wlastupdate; 1586 srq->srq_wlastupdate = new; 1587 wcnt = srq->srq_wcnt--; 1588 ASSERT(wcnt > 0); 1589 srq->srq_wlentime += delta * wcnt; 1590 srq->srq_wtime += delta; 1591 delta = new - srq->srq_rlastupdate; 1592 srq->srq_rlastupdate = new; 1593 rcnt = srq->srq_rcnt++; 1594 if (rcnt != 0) { 1595 srq->srq_rlentime += delta * rcnt; 1596 srq->srq_rtime += delta; 1597 } 1598 mutex_exit(&srq->srq_mutex); 1599 } 1600 1601 /* 1602 * smb_srqueue_update 1603 * 1604 * Takes a snapshot of the smb_sr_stat_t structure passed in. 1605 */ 1606 void 1607 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd) 1608 { 1609 hrtime_t delta; 1610 hrtime_t snaptime; 1611 1612 mutex_enter(&srq->srq_mutex); 1613 snaptime = gethrtime_unscaled(); 1614 delta = snaptime - srq->srq_wlastupdate; 1615 srq->srq_wlastupdate = snaptime; 1616 if (srq->srq_wcnt != 0) { 1617 srq->srq_wlentime += delta * srq->srq_wcnt; 1618 srq->srq_wtime += delta; 1619 } 1620 delta = snaptime - srq->srq_rlastupdate; 1621 srq->srq_rlastupdate = snaptime; 1622 if (srq->srq_rcnt != 0) { 1623 srq->srq_rlentime += delta * srq->srq_rcnt; 1624 srq->srq_rtime += delta; 1625 } 1626 kd->ku_rlentime = srq->srq_rlentime; 1627 kd->ku_rtime = srq->srq_rtime; 1628 kd->ku_wlentime = srq->srq_wlentime; 1629 kd->ku_wtime = srq->srq_wtime; 1630 mutex_exit(&srq->srq_mutex); 1631 scalehrtime(&kd->ku_rlentime); 1632 scalehrtime(&kd->ku_rtime); 1633 scalehrtime(&kd->ku_wlentime); 1634 scalehrtime(&kd->ku_wtime); 1635 } 1636 1637 void 1638 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd, 1639 uint_t threshold, uint_t timeout) 1640 { 1641 bzero(ct, sizeof (smb_cmd_threshold_t)); 1642 mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL); 1643 cv_init(&ct->ct_cond, NULL, CV_DEFAULT, NULL); 1644 1645 ct->ct_cmd = cmd; 1646 ct->ct_threshold = threshold; 1647 ct->ct_timeout = timeout; 1648 } 1649 1650 void 1651 smb_threshold_fini(smb_cmd_threshold_t *ct) 1652 { 1653 cv_destroy(&ct->ct_cond); 1654 mutex_destroy(&ct->ct_mutex); 1655 } 1656 1657 /* 1658 * This threshold mechanism is used to limit the number of simultaneous 1659 * named pipe connections, concurrent authentication conversations, etc. 1660 * Requests that would take us over the threshold wait until either the 1661 * resources are available (return zero) or timeout (return error). 1662 */ 1663 int 1664 smb_threshold_enter(smb_cmd_threshold_t *ct) 1665 { 1666 clock_t time, rem; 1667 1668 time = MSEC_TO_TICK(ct->ct_timeout) + ddi_get_lbolt(); 1669 mutex_enter(&ct->ct_mutex); 1670 1671 while (ct->ct_threshold != 0 && 1672 ct->ct_threshold <= ct->ct_active_cnt) { 1673 ct->ct_blocked_cnt++; 1674 rem = cv_timedwait(&ct->ct_cond, &ct->ct_mutex, time); 1675 ct->ct_blocked_cnt--; 1676 if (rem < 0) { 1677 mutex_exit(&ct->ct_mutex); 1678 return (ETIME); 1679 } 1680 } 1681 if (ct->ct_threshold == 0) { 1682 mutex_exit(&ct->ct_mutex); 1683 return (ECANCELED); 1684 } 1685 1686 ASSERT3U(ct->ct_active_cnt, <, ct->ct_threshold); 1687 ct->ct_active_cnt++; 1688 1689 mutex_exit(&ct->ct_mutex); 1690 return (0); 1691 } 1692 1693 void 1694 smb_threshold_exit(smb_cmd_threshold_t *ct) 1695 { 1696 mutex_enter(&ct->ct_mutex); 1697 ASSERT3U(ct->ct_active_cnt, >, 0); 1698 ct->ct_active_cnt--; 1699 if (ct->ct_blocked_cnt) 1700 cv_signal(&ct->ct_cond); 1701 mutex_exit(&ct->ct_mutex); 1702 } 1703 1704 void 1705 smb_threshold_wake_all(smb_cmd_threshold_t *ct) 1706 { 1707 mutex_enter(&ct->ct_mutex); 1708 ct->ct_threshold = 0; 1709 cv_broadcast(&ct->ct_cond); 1710 mutex_exit(&ct->ct_mutex); 1711 } 1712