1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/types.h> 29 #include <sys/tzfile.h> 30 #include <sys/atomic.h> 31 #include <sys/kidmap.h> 32 #include <sys/time.h> 33 #include <sys/spl.h> 34 #include <sys/cpuvar.h> 35 #include <sys/random.h> 36 #include <smbsrv/smb_kproto.h> 37 #include <smbsrv/smb_fsops.h> 38 #include <smbsrv/smbinfo.h> 39 #include <smbsrv/smb_xdr.h> 40 #include <smbsrv/smb_vops.h> 41 #include <smbsrv/smb_idmap.h> 42 43 #include <sys/sid.h> 44 #include <sys/priv_names.h> 45 46 static kmem_cache_t *smb_dtor_cache; 47 static boolean_t smb_llist_initialized = B_FALSE; 48 49 static boolean_t smb_thread_continue_timedwait_locked(smb_thread_t *, int); 50 51 static boolean_t smb_avl_hold(smb_avl_t *); 52 static void smb_avl_rele(smb_avl_t *); 53 54 time_t tzh_leapcnt = 0; 55 56 struct tm 57 *smb_gmtime_r(time_t *clock, struct tm *result); 58 59 time_t 60 smb_timegm(struct tm *tm); 61 62 struct tm { 63 int tm_sec; 64 int tm_min; 65 int tm_hour; 66 int tm_mday; 67 int tm_mon; 68 int tm_year; 69 int tm_wday; 70 int tm_yday; 71 int tm_isdst; 72 }; 73 74 static int days_in_month[] = { 75 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 76 }; 77 78 int 79 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str) 80 { 81 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 82 return (smb_wcequiv_strlen(str)); 83 return (strlen(str)); 84 } 85 86 int 87 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str) 88 { 89 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 90 return (smb_wcequiv_strlen(str) + 2); 91 return (strlen(str) + 1); 92 } 93 94 int 95 smb_ascii_or_unicode_null_len(struct smb_request *sr) 96 { 97 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 98 return (2); 99 return (1); 100 } 101 102 /* 103 * 104 * Convert old-style (DOS, LanMan) wildcard strings to NT style. 105 * This should ONLY happen to patterns that come from old clients, 106 * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12). 107 * 108 * ? is converted to > 109 * * is converted to < if it is followed by . 110 * . is converted to " if it is followed by ? or * or end of pattern 111 * 112 * Note: modifies pattern in place. 113 */ 114 void 115 smb_convert_wildcards(char *pattern) 116 { 117 char *p; 118 119 for (p = pattern; *p != '\0'; p++) { 120 switch (*p) { 121 case '?': 122 *p = '>'; 123 break; 124 case '*': 125 if (p[1] == '.') 126 *p = '<'; 127 break; 128 case '.': 129 if (p[1] == '?' || p[1] == '*' || p[1] == '\0') 130 *p = '\"'; 131 break; 132 } 133 } 134 } 135 136 /* 137 * smb_sattr_check 138 * 139 * Check file attributes against a search attribute (sattr) mask. 140 * 141 * Normal files, which includes READONLY and ARCHIVE, always pass 142 * this check. If the DIRECTORY, HIDDEN or SYSTEM special attributes 143 * are set then they must appear in the search mask. The special 144 * attributes are inclusive, i.e. all special attributes that appear 145 * in sattr must also appear in the file attributes for the check to 146 * pass. 147 * 148 * The following examples show how this works: 149 * 150 * fileA: READONLY 151 * fileB: 0 (no attributes = normal file) 152 * fileC: READONLY, ARCHIVE 153 * fileD: HIDDEN 154 * fileE: READONLY, HIDDEN, SYSTEM 155 * dirA: DIRECTORY 156 * 157 * search attribute: 0 158 * Returns: fileA, fileB and fileC. 159 * search attribute: HIDDEN 160 * Returns: fileA, fileB, fileC and fileD. 161 * search attribute: SYSTEM 162 * Returns: fileA, fileB and fileC. 163 * search attribute: DIRECTORY 164 * Returns: fileA, fileB, fileC and dirA. 165 * search attribute: HIDDEN and SYSTEM 166 * Returns: fileA, fileB, fileC, fileD and fileE. 167 * 168 * Returns true if the file and sattr match; otherwise, returns false. 169 */ 170 boolean_t 171 smb_sattr_check(uint16_t dosattr, uint16_t sattr) 172 { 173 if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) && 174 !(sattr & FILE_ATTRIBUTE_DIRECTORY)) 175 return (B_FALSE); 176 177 if ((dosattr & FILE_ATTRIBUTE_HIDDEN) && 178 !(sattr & FILE_ATTRIBUTE_HIDDEN)) 179 return (B_FALSE); 180 181 if ((dosattr & FILE_ATTRIBUTE_SYSTEM) && 182 !(sattr & FILE_ATTRIBUTE_SYSTEM)) 183 return (B_FALSE); 184 185 return (B_TRUE); 186 } 187 188 int 189 microtime(timestruc_t *tvp) 190 { 191 tvp->tv_sec = gethrestime_sec(); 192 tvp->tv_nsec = 0; 193 return (0); 194 } 195 196 int32_t 197 clock_get_milli_uptime() 198 { 199 return (TICK_TO_MSEC(ddi_get_lbolt())); 200 } 201 202 /* 203 * smb_idpool_increment 204 * 205 * This function increments the ID pool by doubling the current size. This 206 * function assumes the caller entered the mutex of the pool. 207 */ 208 static int 209 smb_idpool_increment( 210 smb_idpool_t *pool) 211 { 212 uint8_t *new_pool; 213 uint32_t new_size; 214 215 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 216 217 new_size = pool->id_size * 2; 218 if (new_size <= SMB_IDPOOL_MAX_SIZE) { 219 new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP); 220 if (new_pool) { 221 bzero(new_pool, new_size / 8); 222 bcopy(pool->id_pool, new_pool, pool->id_size / 8); 223 kmem_free(pool->id_pool, pool->id_size / 8); 224 pool->id_pool = new_pool; 225 pool->id_free_counter += new_size - pool->id_size; 226 pool->id_max_free_counter += new_size - pool->id_size; 227 pool->id_size = new_size; 228 pool->id_idx_msk = (new_size / 8) - 1; 229 if (new_size >= SMB_IDPOOL_MAX_SIZE) { 230 /* id -1 made unavailable */ 231 pool->id_pool[pool->id_idx_msk] = 0x80; 232 pool->id_free_counter--; 233 pool->id_max_free_counter--; 234 } 235 return (0); 236 } 237 } 238 return (-1); 239 } 240 241 /* 242 * smb_idpool_constructor 243 * 244 * This function initializes the pool structure provided. 245 */ 246 int 247 smb_idpool_constructor( 248 smb_idpool_t *pool) 249 { 250 251 ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC); 252 253 pool->id_size = SMB_IDPOOL_MIN_SIZE; 254 pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1; 255 pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1; 256 pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1; 257 pool->id_bit = 0x02; 258 pool->id_bit_idx = 1; 259 pool->id_idx = 0; 260 pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8), 261 KM_SLEEP); 262 bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8)); 263 /* -1 id made unavailable */ 264 pool->id_pool[0] = 0x01; /* id 0 made unavailable */ 265 mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL); 266 pool->id_magic = SMB_IDPOOL_MAGIC; 267 return (0); 268 } 269 270 /* 271 * smb_idpool_destructor 272 * 273 * This function tears down and frees the resources associated with the 274 * pool provided. 275 */ 276 void 277 smb_idpool_destructor( 278 smb_idpool_t *pool) 279 { 280 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 281 ASSERT(pool->id_free_counter == pool->id_max_free_counter); 282 pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC; 283 mutex_destroy(&pool->id_mutex); 284 kmem_free(pool->id_pool, (size_t)(pool->id_size / 8)); 285 } 286 287 /* 288 * smb_idpool_alloc 289 * 290 * This function allocates an ID from the pool provided. 291 */ 292 int 293 smb_idpool_alloc( 294 smb_idpool_t *pool, 295 uint16_t *id) 296 { 297 uint32_t i; 298 uint8_t bit; 299 uint8_t bit_idx; 300 uint8_t byte; 301 302 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 303 304 mutex_enter(&pool->id_mutex); 305 if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) { 306 mutex_exit(&pool->id_mutex); 307 return (-1); 308 } 309 310 i = pool->id_size; 311 while (i) { 312 bit = pool->id_bit; 313 bit_idx = pool->id_bit_idx; 314 byte = pool->id_pool[pool->id_idx]; 315 while (bit) { 316 if (byte & bit) { 317 bit = bit << 1; 318 bit_idx++; 319 continue; 320 } 321 pool->id_pool[pool->id_idx] |= bit; 322 *id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx); 323 pool->id_free_counter--; 324 pool->id_bit = bit; 325 pool->id_bit_idx = bit_idx; 326 mutex_exit(&pool->id_mutex); 327 return (0); 328 } 329 pool->id_bit = 1; 330 pool->id_bit_idx = 0; 331 pool->id_idx++; 332 pool->id_idx &= pool->id_idx_msk; 333 --i; 334 } 335 /* 336 * This section of code shouldn't be reached. If there are IDs 337 * available and none could be found there's a problem. 338 */ 339 ASSERT(0); 340 mutex_exit(&pool->id_mutex); 341 return (-1); 342 } 343 344 /* 345 * smb_idpool_free 346 * 347 * This function frees the ID provided. 348 */ 349 void 350 smb_idpool_free( 351 smb_idpool_t *pool, 352 uint16_t id) 353 { 354 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 355 ASSERT(id != 0); 356 ASSERT(id != 0xFFFF); 357 358 mutex_enter(&pool->id_mutex); 359 if (pool->id_pool[id >> 3] & (1 << (id & 7))) { 360 pool->id_pool[id >> 3] &= ~(1 << (id & 7)); 361 pool->id_free_counter++; 362 ASSERT(pool->id_free_counter <= pool->id_max_free_counter); 363 mutex_exit(&pool->id_mutex); 364 return; 365 } 366 /* Freeing a free ID. */ 367 ASSERT(0); 368 mutex_exit(&pool->id_mutex); 369 } 370 371 /* 372 * Initialize the llist delete queue object cache. 373 */ 374 void 375 smb_llist_init(void) 376 { 377 if (smb_llist_initialized) 378 return; 379 380 smb_dtor_cache = kmem_cache_create("smb_dtor_cache", 381 sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0); 382 383 smb_llist_initialized = B_TRUE; 384 } 385 386 /* 387 * Destroy the llist delete queue object cache. 388 */ 389 void 390 smb_llist_fini(void) 391 { 392 if (!smb_llist_initialized) 393 return; 394 395 kmem_cache_destroy(smb_dtor_cache); 396 smb_llist_initialized = B_FALSE; 397 } 398 399 /* 400 * smb_llist_constructor 401 * 402 * This function initializes a locked list. 403 */ 404 void 405 smb_llist_constructor( 406 smb_llist_t *ll, 407 size_t size, 408 size_t offset) 409 { 410 rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL); 411 mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL); 412 list_create(&ll->ll_list, size, offset); 413 list_create(&ll->ll_deleteq, sizeof (smb_dtor_t), 414 offsetof(smb_dtor_t, dt_lnd)); 415 ll->ll_count = 0; 416 ll->ll_wrop = 0; 417 ll->ll_deleteq_count = 0; 418 ll->ll_flushing = B_FALSE; 419 } 420 421 /* 422 * Flush the delete queue and destroy a locked list. 423 */ 424 void 425 smb_llist_destructor( 426 smb_llist_t *ll) 427 { 428 smb_llist_flush(ll); 429 430 ASSERT(ll->ll_count == 0); 431 ASSERT(ll->ll_deleteq_count == 0); 432 433 rw_destroy(&ll->ll_lock); 434 list_destroy(&ll->ll_list); 435 list_destroy(&ll->ll_deleteq); 436 mutex_destroy(&ll->ll_mutex); 437 } 438 439 /* 440 * Post an object to the delete queue. The delete queue will be processed 441 * during list exit or list destruction. Objects are often posted for 442 * deletion during list iteration (while the list is locked) but that is 443 * not required, and an object can be posted at any time. 444 */ 445 void 446 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc) 447 { 448 smb_dtor_t *dtor; 449 450 ASSERT((object != NULL) && (dtorproc != NULL)); 451 452 dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP); 453 bzero(dtor, sizeof (smb_dtor_t)); 454 dtor->dt_magic = SMB_DTOR_MAGIC; 455 dtor->dt_object = object; 456 dtor->dt_proc = dtorproc; 457 458 mutex_enter(&ll->ll_mutex); 459 list_insert_tail(&ll->ll_deleteq, dtor); 460 ++ll->ll_deleteq_count; 461 mutex_exit(&ll->ll_mutex); 462 } 463 464 /* 465 * Exit the list lock and process the delete queue. 466 */ 467 void 468 smb_llist_exit(smb_llist_t *ll) 469 { 470 rw_exit(&ll->ll_lock); 471 smb_llist_flush(ll); 472 } 473 474 /* 475 * Flush the list delete queue. The mutex is dropped across the destructor 476 * call in case this leads to additional objects being posted to the delete 477 * queue. 478 */ 479 void 480 smb_llist_flush(smb_llist_t *ll) 481 { 482 smb_dtor_t *dtor; 483 484 mutex_enter(&ll->ll_mutex); 485 if (ll->ll_flushing) { 486 mutex_exit(&ll->ll_mutex); 487 return; 488 } 489 ll->ll_flushing = B_TRUE; 490 491 dtor = list_head(&ll->ll_deleteq); 492 while (dtor != NULL) { 493 SMB_DTOR_VALID(dtor); 494 ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL)); 495 list_remove(&ll->ll_deleteq, dtor); 496 --ll->ll_deleteq_count; 497 mutex_exit(&ll->ll_mutex); 498 499 dtor->dt_proc(dtor->dt_object); 500 501 dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC; 502 kmem_cache_free(smb_dtor_cache, dtor); 503 mutex_enter(&ll->ll_mutex); 504 dtor = list_head(&ll->ll_deleteq); 505 } 506 ll->ll_flushing = B_FALSE; 507 508 mutex_exit(&ll->ll_mutex); 509 } 510 511 /* 512 * smb_llist_upgrade 513 * 514 * This function tries to upgrade the lock of the locked list. It assumes the 515 * locked has already been entered in RW_READER mode. It first tries using the 516 * Solaris function rw_tryupgrade(). If that call fails the lock is released 517 * and reentered in RW_WRITER mode. In that last case a window is opened during 518 * which the contents of the list may have changed. The return code indicates 519 * whether or not the list was modified when the lock was exited. 520 */ 521 int smb_llist_upgrade( 522 smb_llist_t *ll) 523 { 524 uint64_t wrop; 525 526 if (rw_tryupgrade(&ll->ll_lock) != 0) { 527 return (0); 528 } 529 wrop = ll->ll_wrop; 530 rw_exit(&ll->ll_lock); 531 rw_enter(&ll->ll_lock, RW_WRITER); 532 return (wrop != ll->ll_wrop); 533 } 534 535 /* 536 * smb_llist_insert_head 537 * 538 * This function inserts the object passed a the beginning of the list. This 539 * function assumes the lock of the list has already been entered. 540 */ 541 void 542 smb_llist_insert_head( 543 smb_llist_t *ll, 544 void *obj) 545 { 546 list_insert_head(&ll->ll_list, obj); 547 ++ll->ll_wrop; 548 ++ll->ll_count; 549 } 550 551 /* 552 * smb_llist_insert_tail 553 * 554 * This function appends to the object passed to the list. This function assumes 555 * the lock of the list has already been entered. 556 * 557 */ 558 void 559 smb_llist_insert_tail( 560 smb_llist_t *ll, 561 void *obj) 562 { 563 list_insert_tail(&ll->ll_list, obj); 564 ++ll->ll_wrop; 565 ++ll->ll_count; 566 } 567 568 /* 569 * smb_llist_remove 570 * 571 * This function removes the object passed from the list. This function assumes 572 * the lock of the list has already been entered. 573 */ 574 void 575 smb_llist_remove( 576 smb_llist_t *ll, 577 void *obj) 578 { 579 list_remove(&ll->ll_list, obj); 580 ++ll->ll_wrop; 581 --ll->ll_count; 582 } 583 584 /* 585 * smb_llist_get_count 586 * 587 * This function returns the number of elements in the specified list. 588 */ 589 uint32_t 590 smb_llist_get_count( 591 smb_llist_t *ll) 592 { 593 return (ll->ll_count); 594 } 595 596 /* 597 * smb_slist_constructor 598 * 599 * Synchronized list constructor. 600 */ 601 void 602 smb_slist_constructor( 603 smb_slist_t *sl, 604 size_t size, 605 size_t offset) 606 { 607 mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL); 608 cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL); 609 list_create(&sl->sl_list, size, offset); 610 sl->sl_count = 0; 611 sl->sl_waiting = B_FALSE; 612 } 613 614 /* 615 * smb_slist_destructor 616 * 617 * Synchronized list destructor. 618 */ 619 void 620 smb_slist_destructor( 621 smb_slist_t *sl) 622 { 623 VERIFY(sl->sl_count == 0); 624 625 mutex_destroy(&sl->sl_mutex); 626 cv_destroy(&sl->sl_cv); 627 list_destroy(&sl->sl_list); 628 } 629 630 /* 631 * smb_slist_insert_head 632 * 633 * This function inserts the object passed a the beginning of the list. 634 */ 635 void 636 smb_slist_insert_head( 637 smb_slist_t *sl, 638 void *obj) 639 { 640 mutex_enter(&sl->sl_mutex); 641 list_insert_head(&sl->sl_list, obj); 642 ++sl->sl_count; 643 mutex_exit(&sl->sl_mutex); 644 } 645 646 /* 647 * smb_slist_insert_tail 648 * 649 * This function appends the object passed to the list. 650 */ 651 void 652 smb_slist_insert_tail( 653 smb_slist_t *sl, 654 void *obj) 655 { 656 mutex_enter(&sl->sl_mutex); 657 list_insert_tail(&sl->sl_list, obj); 658 ++sl->sl_count; 659 mutex_exit(&sl->sl_mutex); 660 } 661 662 /* 663 * smb_llist_remove 664 * 665 * This function removes the object passed by the caller from the list. 666 */ 667 void 668 smb_slist_remove( 669 smb_slist_t *sl, 670 void *obj) 671 { 672 mutex_enter(&sl->sl_mutex); 673 list_remove(&sl->sl_list, obj); 674 if ((--sl->sl_count == 0) && (sl->sl_waiting)) { 675 sl->sl_waiting = B_FALSE; 676 cv_broadcast(&sl->sl_cv); 677 } 678 mutex_exit(&sl->sl_mutex); 679 } 680 681 /* 682 * smb_slist_move_tail 683 * 684 * This function transfers all the contents of the synchronized list to the 685 * list_t provided. It returns the number of objects transferred. 686 */ 687 uint32_t 688 smb_slist_move_tail( 689 list_t *lst, 690 smb_slist_t *sl) 691 { 692 uint32_t rv; 693 694 mutex_enter(&sl->sl_mutex); 695 rv = sl->sl_count; 696 if (sl->sl_count) { 697 list_move_tail(lst, &sl->sl_list); 698 sl->sl_count = 0; 699 if (sl->sl_waiting) { 700 sl->sl_waiting = B_FALSE; 701 cv_broadcast(&sl->sl_cv); 702 } 703 } 704 mutex_exit(&sl->sl_mutex); 705 return (rv); 706 } 707 708 /* 709 * smb_slist_obj_move 710 * 711 * This function moves an object from one list to the end of the other list. It 712 * assumes the mutex of each list has been entered. 713 */ 714 void 715 smb_slist_obj_move( 716 smb_slist_t *dst, 717 smb_slist_t *src, 718 void *obj) 719 { 720 ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset); 721 ASSERT(dst->sl_list.list_size == src->sl_list.list_size); 722 723 list_remove(&src->sl_list, obj); 724 list_insert_tail(&dst->sl_list, obj); 725 dst->sl_count++; 726 src->sl_count--; 727 if ((src->sl_count == 0) && (src->sl_waiting)) { 728 src->sl_waiting = B_FALSE; 729 cv_broadcast(&src->sl_cv); 730 } 731 } 732 733 /* 734 * smb_slist_wait_for_empty 735 * 736 * This function waits for a list to be emptied. 737 */ 738 void 739 smb_slist_wait_for_empty( 740 smb_slist_t *sl) 741 { 742 mutex_enter(&sl->sl_mutex); 743 while (sl->sl_count) { 744 sl->sl_waiting = B_TRUE; 745 cv_wait(&sl->sl_cv, &sl->sl_mutex); 746 } 747 mutex_exit(&sl->sl_mutex); 748 } 749 750 /* 751 * smb_slist_exit 752 * 753 * This function exits the muetx of the list and signal the condition variable 754 * if the list is empty. 755 */ 756 void 757 smb_slist_exit(smb_slist_t *sl) 758 { 759 if ((sl->sl_count == 0) && (sl->sl_waiting)) { 760 sl->sl_waiting = B_FALSE; 761 cv_broadcast(&sl->sl_cv); 762 } 763 mutex_exit(&sl->sl_mutex); 764 } 765 766 /* 767 * smb_thread_entry_point 768 * 769 * Common entry point for all the threads created through smb_thread_start. 770 * The state of the thread is set to "running" at the beginning and moved to 771 * "exiting" just before calling thread_exit(). The condition variable is 772 * also signaled. 773 */ 774 static void 775 smb_thread_entry_point( 776 smb_thread_t *thread) 777 { 778 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 779 mutex_enter(&thread->sth_mtx); 780 ASSERT(thread->sth_state == SMB_THREAD_STATE_STARTING); 781 thread->sth_th = curthread; 782 thread->sth_did = thread->sth_th->t_did; 783 784 if (!thread->sth_kill) { 785 thread->sth_state = SMB_THREAD_STATE_RUNNING; 786 cv_signal(&thread->sth_cv); 787 mutex_exit(&thread->sth_mtx); 788 thread->sth_ep(thread, thread->sth_ep_arg); 789 mutex_enter(&thread->sth_mtx); 790 } 791 thread->sth_th = NULL; 792 thread->sth_state = SMB_THREAD_STATE_EXITING; 793 cv_broadcast(&thread->sth_cv); 794 mutex_exit(&thread->sth_mtx); 795 thread_exit(); 796 } 797 798 /* 799 * smb_thread_init 800 */ 801 void 802 smb_thread_init( 803 smb_thread_t *thread, 804 char *name, 805 smb_thread_ep_t ep, 806 void *ep_arg, 807 pri_t pri) 808 { 809 ASSERT(thread->sth_magic != SMB_THREAD_MAGIC); 810 811 bzero(thread, sizeof (*thread)); 812 813 (void) strlcpy(thread->sth_name, name, sizeof (thread->sth_name)); 814 thread->sth_ep = ep; 815 thread->sth_ep_arg = ep_arg; 816 thread->sth_state = SMB_THREAD_STATE_EXITED; 817 thread->sth_pri = pri; 818 mutex_init(&thread->sth_mtx, NULL, MUTEX_DEFAULT, NULL); 819 cv_init(&thread->sth_cv, NULL, CV_DEFAULT, NULL); 820 thread->sth_magic = SMB_THREAD_MAGIC; 821 } 822 823 /* 824 * smb_thread_destroy 825 */ 826 void 827 smb_thread_destroy( 828 smb_thread_t *thread) 829 { 830 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 831 ASSERT(thread->sth_state == SMB_THREAD_STATE_EXITED); 832 thread->sth_magic = 0; 833 mutex_destroy(&thread->sth_mtx); 834 cv_destroy(&thread->sth_cv); 835 } 836 837 /* 838 * smb_thread_start 839 * 840 * This function starts a thread with the parameters provided. It waits until 841 * the state of the thread has been moved to running. 842 */ 843 /*ARGSUSED*/ 844 int 845 smb_thread_start( 846 smb_thread_t *thread) 847 { 848 int rc = 0; 849 kthread_t *tmpthread; 850 851 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 852 853 mutex_enter(&thread->sth_mtx); 854 switch (thread->sth_state) { 855 case SMB_THREAD_STATE_EXITED: 856 thread->sth_state = SMB_THREAD_STATE_STARTING; 857 mutex_exit(&thread->sth_mtx); 858 tmpthread = thread_create(NULL, 0, smb_thread_entry_point, 859 thread, 0, &p0, TS_RUN, thread->sth_pri); 860 ASSERT(tmpthread != NULL); 861 mutex_enter(&thread->sth_mtx); 862 while (thread->sth_state == SMB_THREAD_STATE_STARTING) 863 cv_wait(&thread->sth_cv, &thread->sth_mtx); 864 if (thread->sth_state != SMB_THREAD_STATE_RUNNING) 865 rc = -1; 866 break; 867 default: 868 ASSERT(0); 869 rc = -1; 870 break; 871 } 872 mutex_exit(&thread->sth_mtx); 873 return (rc); 874 } 875 876 /* 877 * smb_thread_stop 878 * 879 * This function signals a thread to kill itself and waits until the "exiting" 880 * state has been reached. 881 */ 882 void 883 smb_thread_stop(smb_thread_t *thread) 884 { 885 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 886 887 mutex_enter(&thread->sth_mtx); 888 switch (thread->sth_state) { 889 case SMB_THREAD_STATE_RUNNING: 890 case SMB_THREAD_STATE_STARTING: 891 if (!thread->sth_kill) { 892 thread->sth_kill = B_TRUE; 893 cv_broadcast(&thread->sth_cv); 894 while (thread->sth_state != SMB_THREAD_STATE_EXITING) 895 cv_wait(&thread->sth_cv, &thread->sth_mtx); 896 mutex_exit(&thread->sth_mtx); 897 thread_join(thread->sth_did); 898 mutex_enter(&thread->sth_mtx); 899 thread->sth_state = SMB_THREAD_STATE_EXITED; 900 thread->sth_did = 0; 901 thread->sth_kill = B_FALSE; 902 cv_broadcast(&thread->sth_cv); 903 break; 904 } 905 /*FALLTHRU*/ 906 907 case SMB_THREAD_STATE_EXITING: 908 if (thread->sth_kill) { 909 while (thread->sth_state != SMB_THREAD_STATE_EXITED) 910 cv_wait(&thread->sth_cv, &thread->sth_mtx); 911 } else { 912 thread->sth_state = SMB_THREAD_STATE_EXITED; 913 thread->sth_did = 0; 914 } 915 break; 916 917 case SMB_THREAD_STATE_EXITED: 918 break; 919 920 default: 921 ASSERT(0); 922 break; 923 } 924 mutex_exit(&thread->sth_mtx); 925 } 926 927 /* 928 * smb_thread_signal 929 * 930 * This function signals a thread. 931 */ 932 void 933 smb_thread_signal(smb_thread_t *thread) 934 { 935 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 936 937 mutex_enter(&thread->sth_mtx); 938 switch (thread->sth_state) { 939 case SMB_THREAD_STATE_RUNNING: 940 cv_signal(&thread->sth_cv); 941 break; 942 943 default: 944 break; 945 } 946 mutex_exit(&thread->sth_mtx); 947 } 948 949 boolean_t 950 smb_thread_continue(smb_thread_t *thread) 951 { 952 boolean_t result; 953 954 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 955 956 mutex_enter(&thread->sth_mtx); 957 result = smb_thread_continue_timedwait_locked(thread, 0); 958 mutex_exit(&thread->sth_mtx); 959 960 return (result); 961 } 962 963 boolean_t 964 smb_thread_continue_nowait(smb_thread_t *thread) 965 { 966 boolean_t result; 967 968 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 969 970 mutex_enter(&thread->sth_mtx); 971 /* 972 * Setting ticks=-1 requests a non-blocking check. We will 973 * still block if the thread is in "suspend" state. 974 */ 975 result = smb_thread_continue_timedwait_locked(thread, -1); 976 mutex_exit(&thread->sth_mtx); 977 978 return (result); 979 } 980 981 boolean_t 982 smb_thread_continue_timedwait(smb_thread_t *thread, int seconds) 983 { 984 boolean_t result; 985 986 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 987 988 mutex_enter(&thread->sth_mtx); 989 result = smb_thread_continue_timedwait_locked(thread, 990 SEC_TO_TICK(seconds)); 991 mutex_exit(&thread->sth_mtx); 992 993 return (result); 994 } 995 996 /* 997 * smb_thread_continue_timedwait_locked 998 * 999 * Internal only. Ticks==-1 means don't block, Ticks == 0 means wait 1000 * indefinitely 1001 */ 1002 static boolean_t 1003 smb_thread_continue_timedwait_locked(smb_thread_t *thread, int ticks) 1004 { 1005 boolean_t result; 1006 1007 /* -1 means don't block */ 1008 if (ticks != -1 && !thread->sth_kill) { 1009 if (ticks == 0) { 1010 cv_wait(&thread->sth_cv, &thread->sth_mtx); 1011 } else { 1012 (void) cv_reltimedwait(&thread->sth_cv, 1013 &thread->sth_mtx, (clock_t)ticks, TR_CLOCK_TICK); 1014 } 1015 } 1016 result = (thread->sth_kill == 0); 1017 1018 return (result); 1019 } 1020 1021 /* 1022 * smb_rwx_init 1023 */ 1024 void 1025 smb_rwx_init( 1026 smb_rwx_t *rwx) 1027 { 1028 bzero(rwx, sizeof (smb_rwx_t)); 1029 cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL); 1030 mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL); 1031 rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL); 1032 } 1033 1034 /* 1035 * smb_rwx_destroy 1036 */ 1037 void 1038 smb_rwx_destroy( 1039 smb_rwx_t *rwx) 1040 { 1041 mutex_destroy(&rwx->rwx_mutex); 1042 cv_destroy(&rwx->rwx_cv); 1043 rw_destroy(&rwx->rwx_lock); 1044 } 1045 1046 /* 1047 * smb_rwx_rwexit 1048 */ 1049 void 1050 smb_rwx_rwexit( 1051 smb_rwx_t *rwx) 1052 { 1053 if (rw_write_held(&rwx->rwx_lock)) { 1054 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1055 mutex_enter(&rwx->rwx_mutex); 1056 if (rwx->rwx_waiting) { 1057 rwx->rwx_waiting = B_FALSE; 1058 cv_broadcast(&rwx->rwx_cv); 1059 } 1060 mutex_exit(&rwx->rwx_mutex); 1061 } 1062 rw_exit(&rwx->rwx_lock); 1063 } 1064 1065 /* 1066 * smb_rwx_rwupgrade 1067 */ 1068 krw_t 1069 smb_rwx_rwupgrade( 1070 smb_rwx_t *rwx) 1071 { 1072 if (rw_write_held(&rwx->rwx_lock)) { 1073 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1074 return (RW_WRITER); 1075 } 1076 if (!rw_tryupgrade(&rwx->rwx_lock)) { 1077 rw_exit(&rwx->rwx_lock); 1078 rw_enter(&rwx->rwx_lock, RW_WRITER); 1079 } 1080 return (RW_READER); 1081 } 1082 1083 /* 1084 * smb_rwx_rwrestore 1085 */ 1086 void 1087 smb_rwx_rwdowngrade( 1088 smb_rwx_t *rwx, 1089 krw_t mode) 1090 { 1091 ASSERT(rw_write_held(&rwx->rwx_lock)); 1092 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1093 1094 if (mode == RW_WRITER) { 1095 return; 1096 } 1097 ASSERT(mode == RW_READER); 1098 mutex_enter(&rwx->rwx_mutex); 1099 if (rwx->rwx_waiting) { 1100 rwx->rwx_waiting = B_FALSE; 1101 cv_broadcast(&rwx->rwx_cv); 1102 } 1103 mutex_exit(&rwx->rwx_mutex); 1104 rw_downgrade(&rwx->rwx_lock); 1105 } 1106 1107 /* 1108 * smb_rwx_wait 1109 * 1110 * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER 1111 * mode. It will: 1112 * 1113 * 1) release the lock and save its current mode. 1114 * 2) wait until the condition variable is signaled. This can happen for 1115 * 2 reasons: When a writer releases the lock or when the time out (if 1116 * provided) expires. 1117 * 3) re-acquire the lock in the mode saved in (1). 1118 */ 1119 int 1120 smb_rwx_rwwait( 1121 smb_rwx_t *rwx, 1122 clock_t timeout) 1123 { 1124 int rc; 1125 krw_t mode; 1126 1127 mutex_enter(&rwx->rwx_mutex); 1128 rwx->rwx_waiting = B_TRUE; 1129 mutex_exit(&rwx->rwx_mutex); 1130 1131 if (rw_write_held(&rwx->rwx_lock)) { 1132 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1133 mode = RW_WRITER; 1134 } else { 1135 ASSERT(rw_read_held(&rwx->rwx_lock)); 1136 mode = RW_READER; 1137 } 1138 rw_exit(&rwx->rwx_lock); 1139 1140 mutex_enter(&rwx->rwx_mutex); 1141 if (rwx->rwx_waiting) { 1142 if (timeout == -1) { 1143 rc = 1; 1144 cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex); 1145 } else { 1146 rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex, 1147 timeout, TR_CLOCK_TICK); 1148 } 1149 } 1150 mutex_exit(&rwx->rwx_mutex); 1151 1152 rw_enter(&rwx->rwx_lock, mode); 1153 return (rc); 1154 } 1155 1156 /* 1157 * SMB ID mapping 1158 * 1159 * Solaris ID mapping service (aka Winchester) works with domain SIDs 1160 * and RIDs where domain SIDs are in string format. CIFS service works 1161 * with binary SIDs understandable by CIFS clients. A layer of SMB ID 1162 * mapping functions are implemeted to hide the SID conversion details 1163 * and also hide the handling of array of batch mapping requests. 1164 * 1165 * IMPORTANT NOTE The Winchester API requires a zone. Because CIFS server 1166 * currently only runs in the global zone the global zone is specified. 1167 * This needs to be fixed when the CIFS server supports zones. 1168 */ 1169 1170 static int smb_idmap_batch_binsid(smb_idmap_batch_t *sib); 1171 1172 /* 1173 * smb_idmap_getid 1174 * 1175 * Maps the given Windows SID to a Solaris ID using the 1176 * simple mapping API. 1177 */ 1178 idmap_stat 1179 smb_idmap_getid(smb_sid_t *sid, uid_t *id, int *idtype) 1180 { 1181 smb_idmap_t sim; 1182 char sidstr[SMB_SID_STRSZ]; 1183 1184 smb_sid_tostr(sid, sidstr); 1185 if (smb_sid_splitstr(sidstr, &sim.sim_rid) != 0) 1186 return (IDMAP_ERR_SID); 1187 sim.sim_domsid = sidstr; 1188 sim.sim_id = id; 1189 1190 switch (*idtype) { 1191 case SMB_IDMAP_USER: 1192 sim.sim_stat = kidmap_getuidbysid(global_zone, sim.sim_domsid, 1193 sim.sim_rid, sim.sim_id); 1194 break; 1195 1196 case SMB_IDMAP_GROUP: 1197 sim.sim_stat = kidmap_getgidbysid(global_zone, sim.sim_domsid, 1198 sim.sim_rid, sim.sim_id); 1199 break; 1200 1201 case SMB_IDMAP_UNKNOWN: 1202 sim.sim_stat = kidmap_getpidbysid(global_zone, sim.sim_domsid, 1203 sim.sim_rid, sim.sim_id, &sim.sim_idtype); 1204 break; 1205 1206 default: 1207 ASSERT(0); 1208 return (IDMAP_ERR_ARG); 1209 } 1210 1211 *idtype = sim.sim_idtype; 1212 1213 return (sim.sim_stat); 1214 } 1215 1216 /* 1217 * smb_idmap_getsid 1218 * 1219 * Maps the given Solaris ID to a Windows SID using the 1220 * simple mapping API. 1221 */ 1222 idmap_stat 1223 smb_idmap_getsid(uid_t id, int idtype, smb_sid_t **sid) 1224 { 1225 smb_idmap_t sim; 1226 1227 switch (idtype) { 1228 case SMB_IDMAP_USER: 1229 sim.sim_stat = kidmap_getsidbyuid(global_zone, id, 1230 (const char **)&sim.sim_domsid, &sim.sim_rid); 1231 break; 1232 1233 case SMB_IDMAP_GROUP: 1234 sim.sim_stat = kidmap_getsidbygid(global_zone, id, 1235 (const char **)&sim.sim_domsid, &sim.sim_rid); 1236 break; 1237 1238 case SMB_IDMAP_EVERYONE: 1239 /* Everyone S-1-1-0 */ 1240 sim.sim_domsid = "S-1-1"; 1241 sim.sim_rid = 0; 1242 sim.sim_stat = IDMAP_SUCCESS; 1243 break; 1244 1245 default: 1246 ASSERT(0); 1247 return (IDMAP_ERR_ARG); 1248 } 1249 1250 if (sim.sim_stat != IDMAP_SUCCESS) 1251 return (sim.sim_stat); 1252 1253 if (sim.sim_domsid == NULL) 1254 return (IDMAP_ERR_NOMAPPING); 1255 1256 sim.sim_sid = smb_sid_fromstr(sim.sim_domsid); 1257 if (sim.sim_sid == NULL) 1258 return (IDMAP_ERR_INTERNAL); 1259 1260 *sid = smb_sid_splice(sim.sim_sid, sim.sim_rid); 1261 smb_sid_free(sim.sim_sid); 1262 if (*sid == NULL) 1263 sim.sim_stat = IDMAP_ERR_INTERNAL; 1264 1265 return (sim.sim_stat); 1266 } 1267 1268 /* 1269 * smb_idmap_batch_create 1270 * 1271 * Creates and initializes the context for batch ID mapping. 1272 */ 1273 idmap_stat 1274 smb_idmap_batch_create(smb_idmap_batch_t *sib, uint16_t nmap, int flags) 1275 { 1276 ASSERT(sib); 1277 1278 bzero(sib, sizeof (smb_idmap_batch_t)); 1279 1280 sib->sib_idmaph = kidmap_get_create(global_zone); 1281 1282 sib->sib_flags = flags; 1283 sib->sib_nmap = nmap; 1284 sib->sib_size = nmap * sizeof (smb_idmap_t); 1285 sib->sib_maps = kmem_zalloc(sib->sib_size, KM_SLEEP); 1286 1287 return (IDMAP_SUCCESS); 1288 } 1289 1290 /* 1291 * smb_idmap_batch_destroy 1292 * 1293 * Frees the batch ID mapping context. 1294 * If ID mapping is Solaris -> Windows it frees memories 1295 * allocated for binary SIDs. 1296 */ 1297 void 1298 smb_idmap_batch_destroy(smb_idmap_batch_t *sib) 1299 { 1300 char *domsid; 1301 int i; 1302 1303 ASSERT(sib); 1304 ASSERT(sib->sib_maps); 1305 1306 if (sib->sib_idmaph) 1307 kidmap_get_destroy(sib->sib_idmaph); 1308 1309 if (sib->sib_flags & SMB_IDMAP_ID2SID) { 1310 /* 1311 * SIDs are allocated only when mapping 1312 * UID/GID to SIDs 1313 */ 1314 for (i = 0; i < sib->sib_nmap; i++) 1315 smb_sid_free(sib->sib_maps[i].sim_sid); 1316 } else if (sib->sib_flags & SMB_IDMAP_SID2ID) { 1317 /* 1318 * SID prefixes are allocated only when mapping 1319 * SIDs to UID/GID 1320 */ 1321 for (i = 0; i < sib->sib_nmap; i++) { 1322 domsid = sib->sib_maps[i].sim_domsid; 1323 if (domsid) 1324 smb_mem_free(domsid); 1325 } 1326 } 1327 1328 if (sib->sib_size && sib->sib_maps) 1329 kmem_free(sib->sib_maps, sib->sib_size); 1330 } 1331 1332 /* 1333 * smb_idmap_batch_getid 1334 * 1335 * Queue a request to map the given SID to a UID or GID. 1336 * 1337 * sim->sim_id should point to variable that's supposed to 1338 * hold the returned UID/GID. This needs to be setup by caller 1339 * of this function. 1340 * 1341 * If requested ID type is known, it's passed as 'idtype', 1342 * if it's unknown it'll be returned in sim->sim_idtype. 1343 */ 1344 idmap_stat 1345 smb_idmap_batch_getid(idmap_get_handle_t *idmaph, smb_idmap_t *sim, 1346 smb_sid_t *sid, int idtype) 1347 { 1348 char strsid[SMB_SID_STRSZ]; 1349 idmap_stat idm_stat; 1350 1351 ASSERT(idmaph); 1352 ASSERT(sim); 1353 ASSERT(sid); 1354 1355 smb_sid_tostr(sid, strsid); 1356 if (smb_sid_splitstr(strsid, &sim->sim_rid) != 0) 1357 return (IDMAP_ERR_SID); 1358 sim->sim_domsid = smb_mem_strdup(strsid); 1359 1360 switch (idtype) { 1361 case SMB_IDMAP_USER: 1362 idm_stat = kidmap_batch_getuidbysid(idmaph, sim->sim_domsid, 1363 sim->sim_rid, sim->sim_id, &sim->sim_stat); 1364 break; 1365 1366 case SMB_IDMAP_GROUP: 1367 idm_stat = kidmap_batch_getgidbysid(idmaph, sim->sim_domsid, 1368 sim->sim_rid, sim->sim_id, &sim->sim_stat); 1369 break; 1370 1371 case SMB_IDMAP_UNKNOWN: 1372 idm_stat = kidmap_batch_getpidbysid(idmaph, sim->sim_domsid, 1373 sim->sim_rid, sim->sim_id, &sim->sim_idtype, 1374 &sim->sim_stat); 1375 break; 1376 1377 default: 1378 ASSERT(0); 1379 return (IDMAP_ERR_ARG); 1380 } 1381 1382 return (idm_stat); 1383 } 1384 1385 /* 1386 * smb_idmap_batch_getsid 1387 * 1388 * Queue a request to map the given UID/GID to a SID. 1389 * 1390 * sim->sim_domsid and sim->sim_rid will contain the mapping 1391 * result upon successful process of the batched request. 1392 */ 1393 idmap_stat 1394 smb_idmap_batch_getsid(idmap_get_handle_t *idmaph, smb_idmap_t *sim, 1395 uid_t id, int idtype) 1396 { 1397 idmap_stat idm_stat; 1398 1399 switch (idtype) { 1400 case SMB_IDMAP_USER: 1401 idm_stat = kidmap_batch_getsidbyuid(idmaph, id, 1402 (const char **)&sim->sim_domsid, &sim->sim_rid, 1403 &sim->sim_stat); 1404 break; 1405 1406 case SMB_IDMAP_GROUP: 1407 idm_stat = kidmap_batch_getsidbygid(idmaph, id, 1408 (const char **)&sim->sim_domsid, &sim->sim_rid, 1409 &sim->sim_stat); 1410 break; 1411 1412 case SMB_IDMAP_OWNERAT: 1413 /* Current Owner S-1-5-32-766 */ 1414 sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR; 1415 sim->sim_rid = SECURITY_CURRENT_OWNER_RID; 1416 sim->sim_stat = IDMAP_SUCCESS; 1417 idm_stat = IDMAP_SUCCESS; 1418 break; 1419 1420 case SMB_IDMAP_GROUPAT: 1421 /* Current Group S-1-5-32-767 */ 1422 sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR; 1423 sim->sim_rid = SECURITY_CURRENT_GROUP_RID; 1424 sim->sim_stat = IDMAP_SUCCESS; 1425 idm_stat = IDMAP_SUCCESS; 1426 break; 1427 1428 case SMB_IDMAP_EVERYONE: 1429 /* Everyone S-1-1-0 */ 1430 sim->sim_domsid = NT_WORLD_AUTH_SIDSTR; 1431 sim->sim_rid = 0; 1432 sim->sim_stat = IDMAP_SUCCESS; 1433 idm_stat = IDMAP_SUCCESS; 1434 break; 1435 1436 default: 1437 ASSERT(0); 1438 return (IDMAP_ERR_ARG); 1439 } 1440 1441 return (idm_stat); 1442 } 1443 1444 /* 1445 * smb_idmap_batch_binsid 1446 * 1447 * Convert sidrids to binary sids 1448 * 1449 * Returns 0 if successful and non-zero upon failure. 1450 */ 1451 static int 1452 smb_idmap_batch_binsid(smb_idmap_batch_t *sib) 1453 { 1454 smb_sid_t *sid; 1455 smb_idmap_t *sim; 1456 int i; 1457 1458 if (sib->sib_flags & SMB_IDMAP_SID2ID) 1459 /* This operation is not required */ 1460 return (0); 1461 1462 sim = sib->sib_maps; 1463 for (i = 0; i < sib->sib_nmap; sim++, i++) { 1464 ASSERT(sim->sim_domsid); 1465 if (sim->sim_domsid == NULL) 1466 return (1); 1467 1468 if ((sid = smb_sid_fromstr(sim->sim_domsid)) == NULL) 1469 return (1); 1470 1471 sim->sim_sid = smb_sid_splice(sid, sim->sim_rid); 1472 smb_sid_free(sid); 1473 } 1474 1475 return (0); 1476 } 1477 1478 /* 1479 * smb_idmap_batch_getmappings 1480 * 1481 * trigger ID mapping service to get the mappings for queued 1482 * requests. 1483 * 1484 * Checks the result of all the queued requests. 1485 * If this is a Solaris -> Windows mapping it generates 1486 * binary SIDs from returned (domsid, rid) pairs. 1487 */ 1488 idmap_stat 1489 smb_idmap_batch_getmappings(smb_idmap_batch_t *sib) 1490 { 1491 idmap_stat idm_stat = IDMAP_SUCCESS; 1492 int i; 1493 1494 idm_stat = kidmap_get_mappings(sib->sib_idmaph); 1495 if (idm_stat != IDMAP_SUCCESS) 1496 return (idm_stat); 1497 1498 /* 1499 * Check the status for all the queued requests 1500 */ 1501 for (i = 0; i < sib->sib_nmap; i++) { 1502 if (sib->sib_maps[i].sim_stat != IDMAP_SUCCESS) 1503 return (sib->sib_maps[i].sim_stat); 1504 } 1505 1506 if (smb_idmap_batch_binsid(sib) != 0) 1507 idm_stat = IDMAP_ERR_OTHER; 1508 1509 return (idm_stat); 1510 } 1511 1512 uint64_t 1513 smb_time_unix_to_nt(timestruc_t *unix_time) 1514 { 1515 uint64_t nt_time; 1516 1517 if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0)) 1518 return (0); 1519 1520 nt_time = unix_time->tv_sec; 1521 nt_time *= 10000000; /* seconds to 100ns */ 1522 nt_time += unix_time->tv_nsec / 100; 1523 return (nt_time + NT_TIME_BIAS); 1524 } 1525 1526 void 1527 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time) 1528 { 1529 uint32_t seconds; 1530 1531 ASSERT(unix_time); 1532 1533 if ((nt_time == 0) || (nt_time == -1)) { 1534 unix_time->tv_sec = 0; 1535 unix_time->tv_nsec = 0; 1536 return; 1537 } 1538 1539 nt_time -= NT_TIME_BIAS; 1540 seconds = nt_time / 10000000; 1541 unix_time->tv_sec = seconds; 1542 unix_time->tv_nsec = (nt_time % 10000000) * 100; 1543 } 1544 1545 /* 1546 * smb_time_gmt_to_local, smb_time_local_to_gmt 1547 * 1548 * Apply the gmt offset to convert between local time and gmt 1549 */ 1550 int32_t 1551 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt) 1552 { 1553 if ((gmt == 0) || (gmt == -1)) 1554 return (0); 1555 1556 return (gmt - sr->sr_gmtoff); 1557 } 1558 1559 int32_t 1560 smb_time_local_to_gmt(smb_request_t *sr, int32_t local) 1561 { 1562 if ((local == 0) || (local == -1)) 1563 return (0); 1564 1565 return (local + sr->sr_gmtoff); 1566 } 1567 1568 1569 /* 1570 * smb_time_dos_to_unix 1571 * 1572 * Convert SMB_DATE & SMB_TIME values to a unix timestamp. 1573 * 1574 * A date/time field of 0 means that that server file system 1575 * assigned value need not be changed. The behaviour when the 1576 * date/time field is set to -1 is not documented but is 1577 * generally treated like 0. 1578 * If date or time is 0 or -1 the unix time is returned as 0 1579 * so that the caller can identify and handle this special case. 1580 */ 1581 int32_t 1582 smb_time_dos_to_unix(int16_t date, int16_t time) 1583 { 1584 struct tm atm; 1585 1586 if (((date == 0) || (time == 0)) || 1587 ((date == -1) || (time == -1))) { 1588 return (0); 1589 } 1590 1591 atm.tm_year = ((date >> 9) & 0x3F) + 80; 1592 atm.tm_mon = ((date >> 5) & 0x0F) - 1; 1593 atm.tm_mday = ((date >> 0) & 0x1F); 1594 atm.tm_hour = ((time >> 11) & 0x1F); 1595 atm.tm_min = ((time >> 5) & 0x3F); 1596 atm.tm_sec = ((time >> 0) & 0x1F) << 1; 1597 1598 return (smb_timegm(&atm)); 1599 } 1600 1601 void 1602 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p) 1603 { 1604 struct tm atm; 1605 int i; 1606 time_t tmp_time; 1607 1608 if (ux_time == 0) { 1609 *date_p = 0; 1610 *time_p = 0; 1611 return; 1612 } 1613 1614 tmp_time = (time_t)ux_time; 1615 (void) smb_gmtime_r(&tmp_time, &atm); 1616 1617 if (date_p) { 1618 i = 0; 1619 i += atm.tm_year - 80; 1620 i <<= 4; 1621 i += atm.tm_mon + 1; 1622 i <<= 5; 1623 i += atm.tm_mday; 1624 1625 *date_p = (short)i; 1626 } 1627 if (time_p) { 1628 i = 0; 1629 i += atm.tm_hour; 1630 i <<= 6; 1631 i += atm.tm_min; 1632 i <<= 5; 1633 i += atm.tm_sec >> 1; 1634 1635 *time_p = (short)i; 1636 } 1637 } 1638 1639 1640 /* 1641 * smb_gmtime_r 1642 * 1643 * Thread-safe version of smb_gmtime. Returns a null pointer if either 1644 * input parameter is a null pointer. Otherwise returns a pointer 1645 * to result. 1646 * 1647 * Day of the week calculation: the Epoch was a thursday. 1648 * 1649 * There are no timezone corrections so tm_isdst and tm_gmtoff are 1650 * always zero, and the zone is always WET. 1651 */ 1652 struct tm * 1653 smb_gmtime_r(time_t *clock, struct tm *result) 1654 { 1655 time_t tsec; 1656 int year; 1657 int month; 1658 int sec_per_month; 1659 1660 if (clock == 0 || result == 0) 1661 return (0); 1662 1663 bzero(result, sizeof (struct tm)); 1664 tsec = *clock; 1665 tsec -= tzh_leapcnt; 1666 1667 result->tm_wday = tsec / SECSPERDAY; 1668 result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK; 1669 1670 year = EPOCH_YEAR; 1671 while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) : 1672 (SECSPERDAY * DAYSPERNYEAR))) { 1673 if (isleap(year)) 1674 tsec -= SECSPERDAY * DAYSPERLYEAR; 1675 else 1676 tsec -= SECSPERDAY * DAYSPERNYEAR; 1677 1678 ++year; 1679 } 1680 1681 result->tm_year = year - TM_YEAR_BASE; 1682 result->tm_yday = tsec / SECSPERDAY; 1683 1684 for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) { 1685 sec_per_month = days_in_month[month] * SECSPERDAY; 1686 1687 if (month == TM_FEBRUARY && isleap(year)) 1688 sec_per_month += SECSPERDAY; 1689 1690 if (tsec < sec_per_month) 1691 break; 1692 1693 tsec -= sec_per_month; 1694 } 1695 1696 result->tm_mon = month; 1697 result->tm_mday = (tsec / SECSPERDAY) + 1; 1698 tsec %= SECSPERDAY; 1699 result->tm_sec = tsec % 60; 1700 tsec /= 60; 1701 result->tm_min = tsec % 60; 1702 tsec /= 60; 1703 result->tm_hour = (int)tsec; 1704 1705 return (result); 1706 } 1707 1708 1709 /* 1710 * smb_timegm 1711 * 1712 * Converts the broken-down time in tm to a time value, i.e. the number 1713 * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is 1714 * not a POSIX or ANSI function. Per the man page, the input values of 1715 * tm_wday and tm_yday are ignored and, as the input data is assumed to 1716 * represent GMT, we force tm_isdst and tm_gmtoff to 0. 1717 * 1718 * Before returning the clock time, we use smb_gmtime_r to set up tm_wday 1719 * and tm_yday, and bring the other fields within normal range. I don't 1720 * think this is really how it should be done but it's convenient for 1721 * now. 1722 */ 1723 time_t 1724 smb_timegm(struct tm *tm) 1725 { 1726 time_t tsec; 1727 int dd; 1728 int mm; 1729 int yy; 1730 int year; 1731 1732 if (tm == 0) 1733 return (-1); 1734 1735 year = tm->tm_year + TM_YEAR_BASE; 1736 tsec = tzh_leapcnt; 1737 1738 for (yy = EPOCH_YEAR; yy < year; ++yy) { 1739 if (isleap(yy)) 1740 tsec += SECSPERDAY * DAYSPERLYEAR; 1741 else 1742 tsec += SECSPERDAY * DAYSPERNYEAR; 1743 } 1744 1745 for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) { 1746 dd = days_in_month[mm] * SECSPERDAY; 1747 1748 if (mm == TM_FEBRUARY && isleap(year)) 1749 dd += SECSPERDAY; 1750 1751 tsec += dd; 1752 } 1753 1754 tsec += (tm->tm_mday - 1) * SECSPERDAY; 1755 tsec += tm->tm_sec; 1756 tsec += tm->tm_min * SECSPERMIN; 1757 tsec += tm->tm_hour * SECSPERHOUR; 1758 1759 tm->tm_isdst = 0; 1760 (void) smb_gmtime_r(&tsec, tm); 1761 return (tsec); 1762 } 1763 1764 /* 1765 * smb_pad_align 1766 * 1767 * Returns the number of bytes required to pad an offset to the 1768 * specified alignment. 1769 */ 1770 uint32_t 1771 smb_pad_align(uint32_t offset, uint32_t align) 1772 { 1773 uint32_t pad = offset % align; 1774 1775 if (pad != 0) 1776 pad = align - pad; 1777 1778 return (pad); 1779 } 1780 1781 /* 1782 * smb_panic 1783 * 1784 * Logs the file name, function name and line number passed in and panics the 1785 * system. 1786 */ 1787 void 1788 smb_panic(char *file, const char *func, int line) 1789 { 1790 cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line); 1791 } 1792 1793 /* 1794 * Creates an AVL tree and initializes the given smb_avl_t 1795 * structure using the passed args 1796 */ 1797 void 1798 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset, smb_avl_nops_t *ops) 1799 { 1800 ASSERT(avl); 1801 ASSERT(ops); 1802 1803 rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL); 1804 mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL); 1805 1806 avl->avl_nops = ops; 1807 avl->avl_state = SMB_AVL_STATE_READY; 1808 avl->avl_refcnt = 0; 1809 (void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence, 1810 sizeof (uint32_t)); 1811 1812 avl_create(&avl->avl_tree, ops->avln_cmp, size, offset); 1813 } 1814 1815 /* 1816 * Destroys the specified AVL tree. 1817 * It waits for all the in-flight operations to finish 1818 * before destroying the AVL. 1819 */ 1820 void 1821 smb_avl_destroy(smb_avl_t *avl) 1822 { 1823 void *cookie = NULL; 1824 void *node; 1825 1826 ASSERT(avl); 1827 1828 mutex_enter(&avl->avl_mutex); 1829 if (avl->avl_state != SMB_AVL_STATE_READY) { 1830 mutex_exit(&avl->avl_mutex); 1831 return; 1832 } 1833 1834 avl->avl_state = SMB_AVL_STATE_DESTROYING; 1835 1836 while (avl->avl_refcnt > 0) 1837 (void) cv_wait(&avl->avl_cv, &avl->avl_mutex); 1838 mutex_exit(&avl->avl_mutex); 1839 1840 rw_enter(&avl->avl_lock, RW_WRITER); 1841 while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL) 1842 avl->avl_nops->avln_destroy(node); 1843 1844 avl_destroy(&avl->avl_tree); 1845 rw_exit(&avl->avl_lock); 1846 1847 rw_destroy(&avl->avl_lock); 1848 1849 mutex_destroy(&avl->avl_mutex); 1850 bzero(avl, sizeof (smb_avl_t)); 1851 } 1852 1853 /* 1854 * Adds the given item to the AVL if it's 1855 * not already there. 1856 * 1857 * Returns: 1858 * 1859 * ENOTACTIVE AVL is not in READY state 1860 * EEXIST The item is already in AVL 1861 */ 1862 int 1863 smb_avl_add(smb_avl_t *avl, void *item) 1864 { 1865 avl_index_t where; 1866 1867 ASSERT(avl); 1868 ASSERT(item); 1869 1870 if (!smb_avl_hold(avl)) 1871 return (ENOTACTIVE); 1872 1873 rw_enter(&avl->avl_lock, RW_WRITER); 1874 if (avl_find(&avl->avl_tree, item, &where) != NULL) { 1875 rw_exit(&avl->avl_lock); 1876 smb_avl_rele(avl); 1877 return (EEXIST); 1878 } 1879 1880 avl_insert(&avl->avl_tree, item, where); 1881 avl->avl_sequence++; 1882 rw_exit(&avl->avl_lock); 1883 1884 smb_avl_rele(avl); 1885 return (0); 1886 } 1887 1888 /* 1889 * Removes the given item from the AVL. 1890 * If no reference is left on the item 1891 * it will also be destroyed by calling the 1892 * registered destroy operation. 1893 */ 1894 void 1895 smb_avl_remove(smb_avl_t *avl, void *item) 1896 { 1897 avl_index_t where; 1898 void *rm_item; 1899 1900 ASSERT(avl); 1901 ASSERT(item); 1902 1903 if (!smb_avl_hold(avl)) 1904 return; 1905 1906 rw_enter(&avl->avl_lock, RW_WRITER); 1907 if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) { 1908 rw_exit(&avl->avl_lock); 1909 smb_avl_rele(avl); 1910 return; 1911 } 1912 1913 avl_remove(&avl->avl_tree, rm_item); 1914 if (avl->avl_nops->avln_rele(rm_item)) 1915 avl->avl_nops->avln_destroy(rm_item); 1916 avl->avl_sequence++; 1917 rw_exit(&avl->avl_lock); 1918 1919 smb_avl_rele(avl); 1920 } 1921 1922 /* 1923 * Looks up the AVL for the given item. 1924 * If the item is found a hold on the object 1925 * is taken before the pointer to it is 1926 * returned to the caller. The caller MUST 1927 * always call smb_avl_release() after it's done 1928 * using the returned object to release the hold 1929 * taken on the object. 1930 */ 1931 void * 1932 smb_avl_lookup(smb_avl_t *avl, void *item) 1933 { 1934 void *node = NULL; 1935 1936 ASSERT(avl); 1937 ASSERT(item); 1938 1939 if (!smb_avl_hold(avl)) 1940 return (NULL); 1941 1942 rw_enter(&avl->avl_lock, RW_READER); 1943 node = avl_find(&avl->avl_tree, item, NULL); 1944 if (node != NULL) 1945 avl->avl_nops->avln_hold(node); 1946 rw_exit(&avl->avl_lock); 1947 1948 if (node == NULL) 1949 smb_avl_rele(avl); 1950 1951 return (node); 1952 } 1953 1954 /* 1955 * The hold on the given object is released. 1956 * This function MUST always be called after 1957 * smb_avl_lookup() and smb_avl_iterate() for 1958 * the returned object. 1959 * 1960 * If AVL is in DESTROYING state, the destroying 1961 * thread will be notified. 1962 */ 1963 void 1964 smb_avl_release(smb_avl_t *avl, void *item) 1965 { 1966 ASSERT(avl); 1967 ASSERT(item); 1968 1969 if (avl->avl_nops->avln_rele(item)) 1970 avl->avl_nops->avln_destroy(item); 1971 1972 smb_avl_rele(avl); 1973 } 1974 1975 /* 1976 * Initializes the given cursor for the AVL. 1977 * The cursor will be used to iterate through the AVL 1978 */ 1979 void 1980 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor) 1981 { 1982 ASSERT(avl); 1983 ASSERT(cursor); 1984 1985 cursor->avlc_next = NULL; 1986 cursor->avlc_sequence = avl->avl_sequence; 1987 } 1988 1989 /* 1990 * Iterates through the AVL using the given cursor. 1991 * It always starts at the beginning and then returns 1992 * a pointer to the next object on each subsequent call. 1993 * 1994 * If a new object is added to or removed from the AVL 1995 * between two calls to this function, the iteration 1996 * will terminate prematurely. 1997 * 1998 * The caller MUST always call smb_avl_release() after it's 1999 * done using the returned object to release the hold taken 2000 * on the object. 2001 */ 2002 void * 2003 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor) 2004 { 2005 void *node; 2006 2007 ASSERT(avl); 2008 ASSERT(cursor); 2009 2010 if (!smb_avl_hold(avl)) 2011 return (NULL); 2012 2013 rw_enter(&avl->avl_lock, RW_READER); 2014 if (cursor->avlc_sequence != avl->avl_sequence) { 2015 rw_exit(&avl->avl_lock); 2016 smb_avl_rele(avl); 2017 return (NULL); 2018 } 2019 2020 if (cursor->avlc_next == NULL) 2021 node = avl_first(&avl->avl_tree); 2022 else 2023 node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next); 2024 2025 if (node != NULL) 2026 avl->avl_nops->avln_hold(node); 2027 2028 cursor->avlc_next = node; 2029 rw_exit(&avl->avl_lock); 2030 2031 if (node == NULL) 2032 smb_avl_rele(avl); 2033 2034 return (node); 2035 } 2036 2037 /* 2038 * Increments the AVL reference count in order to 2039 * prevent the avl from being destroyed while it's 2040 * being accessed. 2041 */ 2042 static boolean_t 2043 smb_avl_hold(smb_avl_t *avl) 2044 { 2045 mutex_enter(&avl->avl_mutex); 2046 if (avl->avl_state != SMB_AVL_STATE_READY) { 2047 mutex_exit(&avl->avl_mutex); 2048 return (B_FALSE); 2049 } 2050 avl->avl_refcnt++; 2051 mutex_exit(&avl->avl_mutex); 2052 2053 return (B_TRUE); 2054 } 2055 2056 /* 2057 * Decrements the AVL reference count to release the 2058 * hold. If another thread is trying to destroy the 2059 * AVL and is waiting for the reference count to become 2060 * 0, it is signaled to wake up. 2061 */ 2062 static void 2063 smb_avl_rele(smb_avl_t *avl) 2064 { 2065 mutex_enter(&avl->avl_mutex); 2066 ASSERT(avl->avl_refcnt > 0); 2067 avl->avl_refcnt--; 2068 if (avl->avl_state == SMB_AVL_STATE_DESTROYING) 2069 cv_broadcast(&avl->avl_cv); 2070 mutex_exit(&avl->avl_mutex); 2071 } 2072 2073 /* 2074 * smb_latency_init 2075 */ 2076 void 2077 smb_latency_init(smb_latency_t *lat) 2078 { 2079 bzero(lat, sizeof (*lat)); 2080 mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7)); 2081 } 2082 2083 /* 2084 * smb_latency_destroy 2085 */ 2086 void 2087 smb_latency_destroy(smb_latency_t *lat) 2088 { 2089 mutex_destroy(&lat->ly_mutex); 2090 } 2091 2092 /* 2093 * smb_latency_add_sample 2094 * 2095 * Uses the new sample to calculate the new mean and standard deviation. The 2096 * sample must be a scaled value. 2097 */ 2098 void 2099 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample) 2100 { 2101 hrtime_t a_mean; 2102 hrtime_t d_mean; 2103 2104 mutex_enter(&lat->ly_mutex); 2105 lat->ly_a_nreq++; 2106 lat->ly_a_sum += sample; 2107 if (lat->ly_a_nreq != 0) { 2108 a_mean = lat->ly_a_sum / lat->ly_a_nreq; 2109 lat->ly_a_stddev = 2110 (sample - a_mean) * (sample - lat->ly_a_mean); 2111 lat->ly_a_mean = a_mean; 2112 } 2113 lat->ly_d_nreq++; 2114 lat->ly_d_sum += sample; 2115 if (lat->ly_d_nreq != 0) { 2116 d_mean = lat->ly_d_sum / lat->ly_d_nreq; 2117 lat->ly_d_stddev = 2118 (sample - d_mean) * (sample - lat->ly_d_mean); 2119 lat->ly_d_mean = d_mean; 2120 } 2121 mutex_exit(&lat->ly_mutex); 2122 } 2123 2124 /* 2125 * smb_srqueue_init 2126 */ 2127 void 2128 smb_srqueue_init(smb_srqueue_t *srq) 2129 { 2130 bzero(srq, sizeof (*srq)); 2131 mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7)); 2132 srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled(); 2133 } 2134 2135 /* 2136 * smb_srqueue_destroy 2137 */ 2138 void 2139 smb_srqueue_destroy(smb_srqueue_t *srq) 2140 { 2141 mutex_destroy(&srq->srq_mutex); 2142 } 2143 2144 /* 2145 * smb_srqueue_waitq_enter 2146 */ 2147 void 2148 smb_srqueue_waitq_enter(smb_srqueue_t *srq) 2149 { 2150 hrtime_t new; 2151 hrtime_t delta; 2152 uint32_t wcnt; 2153 2154 mutex_enter(&srq->srq_mutex); 2155 new = gethrtime_unscaled(); 2156 delta = new - srq->srq_wlastupdate; 2157 srq->srq_wlastupdate = new; 2158 wcnt = srq->srq_wcnt++; 2159 if (wcnt != 0) { 2160 srq->srq_wlentime += delta * wcnt; 2161 srq->srq_wtime += delta; 2162 } 2163 mutex_exit(&srq->srq_mutex); 2164 } 2165 2166 /* 2167 * smb_srqueue_runq_exit 2168 */ 2169 void 2170 smb_srqueue_runq_exit(smb_srqueue_t *srq) 2171 { 2172 hrtime_t new; 2173 hrtime_t delta; 2174 uint32_t rcnt; 2175 2176 mutex_enter(&srq->srq_mutex); 2177 new = gethrtime_unscaled(); 2178 delta = new - srq->srq_rlastupdate; 2179 srq->srq_rlastupdate = new; 2180 rcnt = srq->srq_rcnt--; 2181 ASSERT(rcnt > 0); 2182 srq->srq_rlentime += delta * rcnt; 2183 srq->srq_rtime += delta; 2184 mutex_exit(&srq->srq_mutex); 2185 } 2186 2187 /* 2188 * smb_srqueue_waitq_to_runq 2189 */ 2190 void 2191 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq) 2192 { 2193 hrtime_t new; 2194 hrtime_t delta; 2195 uint32_t wcnt; 2196 uint32_t rcnt; 2197 2198 mutex_enter(&srq->srq_mutex); 2199 new = gethrtime_unscaled(); 2200 delta = new - srq->srq_wlastupdate; 2201 srq->srq_wlastupdate = new; 2202 wcnt = srq->srq_wcnt--; 2203 ASSERT(wcnt > 0); 2204 srq->srq_wlentime += delta * wcnt; 2205 srq->srq_wtime += delta; 2206 delta = new - srq->srq_rlastupdate; 2207 srq->srq_rlastupdate = new; 2208 rcnt = srq->srq_rcnt++; 2209 if (rcnt != 0) { 2210 srq->srq_rlentime += delta * rcnt; 2211 srq->srq_rtime += delta; 2212 } 2213 mutex_exit(&srq->srq_mutex); 2214 } 2215 2216 /* 2217 * smb_srqueue_update 2218 * 2219 * Takes a snapshot of the smb_sr_stat_t structure passed in. 2220 */ 2221 void 2222 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd) 2223 { 2224 hrtime_t delta; 2225 hrtime_t snaptime; 2226 2227 mutex_enter(&srq->srq_mutex); 2228 snaptime = gethrtime_unscaled(); 2229 delta = snaptime - srq->srq_wlastupdate; 2230 srq->srq_wlastupdate = snaptime; 2231 if (srq->srq_wcnt != 0) { 2232 srq->srq_wlentime += delta * srq->srq_wcnt; 2233 srq->srq_wtime += delta; 2234 } 2235 delta = snaptime - srq->srq_rlastupdate; 2236 srq->srq_rlastupdate = snaptime; 2237 if (srq->srq_rcnt != 0) { 2238 srq->srq_rlentime += delta * srq->srq_rcnt; 2239 srq->srq_rtime += delta; 2240 } 2241 kd->ku_rlentime = srq->srq_rlentime; 2242 kd->ku_rtime = srq->srq_rtime; 2243 kd->ku_wlentime = srq->srq_wlentime; 2244 kd->ku_wtime = srq->srq_wtime; 2245 mutex_exit(&srq->srq_mutex); 2246 scalehrtime(&kd->ku_rlentime); 2247 scalehrtime(&kd->ku_rtime); 2248 scalehrtime(&kd->ku_wlentime); 2249 scalehrtime(&kd->ku_wtime); 2250 } 2251 2252 void 2253 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd, int threshold, 2254 int timeout) 2255 { 2256 bzero(ct, sizeof (smb_cmd_threshold_t)); 2257 mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL); 2258 ct->ct_cmd = cmd; 2259 ct->ct_threshold = threshold; 2260 ct->ct_event = smb_event_create(timeout); 2261 ct->ct_event_id = smb_event_txid(ct->ct_event); 2262 2263 if (smb_threshold_debug) { 2264 cmn_err(CE_NOTE, "smb_threshold_init[%s]: threshold (%d), " 2265 "timeout (%d)", cmd, threshold, timeout); 2266 } 2267 } 2268 2269 /* 2270 * This function must be called prior to SMB_SERVER_STATE_STOPPING state 2271 * so that ct_event can be successfully removed from the event list. 2272 * It should not be called when the server mutex is held or when the 2273 * server is removed from the server list. 2274 */ 2275 void 2276 smb_threshold_fini(smb_cmd_threshold_t *ct) 2277 { 2278 smb_event_destroy(ct->ct_event); 2279 mutex_destroy(&ct->ct_mutex); 2280 bzero(ct, sizeof (smb_cmd_threshold_t)); 2281 } 2282 2283 /* 2284 * This threshold mechanism can be used to limit the number of simultaneous 2285 * requests, which serves to limit the stress that can be applied to the 2286 * service and also allows the service to respond to requests before the 2287 * client times out and reports that the server is not responding, 2288 * 2289 * If the number of requests exceeds the threshold, new requests will be 2290 * stalled until the number drops back to the threshold. Stalled requests 2291 * will be notified as appropriate, in which case 0 will be returned. 2292 * If the timeout expires before the request is notified, a non-zero errno 2293 * value will be returned. 2294 * 2295 * To avoid a flood of messages, the message rate is throttled as well. 2296 */ 2297 int 2298 smb_threshold_enter(smb_cmd_threshold_t *ct) 2299 { 2300 int rc; 2301 2302 mutex_enter(&ct->ct_mutex); 2303 if (ct->ct_active_cnt >= ct->ct_threshold && ct->ct_event != NULL) { 2304 atomic_inc_32(&ct->ct_blocked_cnt); 2305 2306 if (smb_threshold_debug) { 2307 cmn_err(CE_NOTE, "smb_threshold_enter[%s]: blocked " 2308 "(blocked ops: %u, inflight ops: %u)", 2309 ct->ct_cmd, ct->ct_blocked_cnt, ct->ct_active_cnt); 2310 } 2311 2312 mutex_exit(&ct->ct_mutex); 2313 2314 if ((rc = smb_event_wait(ct->ct_event)) != 0) { 2315 if (rc == ECANCELED) 2316 return (rc); 2317 2318 mutex_enter(&ct->ct_mutex); 2319 if (ct->ct_active_cnt >= ct->ct_threshold) { 2320 2321 if ((ct->ct_error_cnt % 2322 SMB_THRESHOLD_REPORT_THROTTLE) == 0) { 2323 cmn_err(CE_NOTE, "%s: server busy: " 2324 "threshold %d exceeded)", 2325 ct->ct_cmd, ct->ct_threshold); 2326 } 2327 2328 atomic_inc_32(&ct->ct_error_cnt); 2329 mutex_exit(&ct->ct_mutex); 2330 return (rc); 2331 } 2332 2333 mutex_exit(&ct->ct_mutex); 2334 2335 } 2336 2337 mutex_enter(&ct->ct_mutex); 2338 atomic_dec_32(&ct->ct_blocked_cnt); 2339 if (smb_threshold_debug) { 2340 cmn_err(CE_NOTE, "smb_threshold_enter[%s]: resumed " 2341 "(blocked ops: %u, inflight ops: %u)", ct->ct_cmd, 2342 ct->ct_blocked_cnt, ct->ct_active_cnt); 2343 } 2344 } 2345 2346 atomic_inc_32(&ct->ct_active_cnt); 2347 mutex_exit(&ct->ct_mutex); 2348 return (0); 2349 } 2350 2351 void 2352 smb_threshold_exit(smb_cmd_threshold_t *ct, smb_server_t *sv) 2353 { 2354 mutex_enter(&ct->ct_mutex); 2355 atomic_dec_32(&ct->ct_active_cnt); 2356 mutex_exit(&ct->ct_mutex); 2357 smb_event_notify(sv, ct->ct_event_id); 2358 } 2359