1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/types.h> 29 #include <sys/tzfile.h> 30 #include <sys/atomic.h> 31 #include <sys/kidmap.h> 32 #include <sys/time.h> 33 #include <sys/spl.h> 34 #include <sys/cpuvar.h> 35 #include <sys/random.h> 36 #include <smbsrv/smb_kproto.h> 37 #include <smbsrv/smb_fsops.h> 38 #include <smbsrv/smbinfo.h> 39 #include <smbsrv/smb_xdr.h> 40 #include <smbsrv/smb_vops.h> 41 #include <smbsrv/smb_idmap.h> 42 43 #include <sys/sid.h> 44 #include <sys/priv_names.h> 45 46 static kmem_cache_t *smb_dtor_cache; 47 static boolean_t smb_llist_initialized = B_FALSE; 48 49 static boolean_t smb_thread_continue_timedwait_locked(smb_thread_t *, int); 50 51 static boolean_t smb_avl_hold(smb_avl_t *); 52 static void smb_avl_rele(smb_avl_t *); 53 54 time_t tzh_leapcnt = 0; 55 56 struct tm 57 *smb_gmtime_r(time_t *clock, struct tm *result); 58 59 time_t 60 smb_timegm(struct tm *tm); 61 62 struct tm { 63 int tm_sec; 64 int tm_min; 65 int tm_hour; 66 int tm_mday; 67 int tm_mon; 68 int tm_year; 69 int tm_wday; 70 int tm_yday; 71 int tm_isdst; 72 }; 73 74 static int days_in_month[] = { 75 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 76 }; 77 78 int 79 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str) 80 { 81 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 82 return (smb_wcequiv_strlen(str)); 83 return (strlen(str)); 84 } 85 86 int 87 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str) 88 { 89 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 90 return (smb_wcequiv_strlen(str) + 2); 91 return (strlen(str) + 1); 92 } 93 94 int 95 smb_ascii_or_unicode_null_len(struct smb_request *sr) 96 { 97 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 98 return (2); 99 return (1); 100 } 101 102 /* 103 * 104 * Convert old-style (DOS, LanMan) wildcard strings to NT style. 105 * This should ONLY happen to patterns that come from old clients, 106 * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12). 107 * 108 * ? is converted to > 109 * * is converted to < if it is followed by . 110 * . is converted to " if it is followed by ? or * or end of pattern 111 * 112 * Note: modifies pattern in place. 113 */ 114 void 115 smb_convert_wildcards(char *pattern) 116 { 117 char *p; 118 119 for (p = pattern; *p != '\0'; p++) { 120 switch (*p) { 121 case '?': 122 *p = '>'; 123 break; 124 case '*': 125 if (p[1] == '.') 126 *p = '<'; 127 break; 128 case '.': 129 if (p[1] == '?' || p[1] == '*' || p[1] == '\0') 130 *p = '\"'; 131 break; 132 } 133 } 134 } 135 136 /* 137 * smb_sattr_check 138 * 139 * Check file attributes against a search attribute (sattr) mask. 140 * 141 * Normal files, which includes READONLY and ARCHIVE, always pass 142 * this check. If the DIRECTORY, HIDDEN or SYSTEM special attributes 143 * are set then they must appear in the search mask. The special 144 * attributes are inclusive, i.e. all special attributes that appear 145 * in sattr must also appear in the file attributes for the check to 146 * pass. 147 * 148 * The following examples show how this works: 149 * 150 * fileA: READONLY 151 * fileB: 0 (no attributes = normal file) 152 * fileC: READONLY, ARCHIVE 153 * fileD: HIDDEN 154 * fileE: READONLY, HIDDEN, SYSTEM 155 * dirA: DIRECTORY 156 * 157 * search attribute: 0 158 * Returns: fileA, fileB and fileC. 159 * search attribute: HIDDEN 160 * Returns: fileA, fileB, fileC and fileD. 161 * search attribute: SYSTEM 162 * Returns: fileA, fileB and fileC. 163 * search attribute: DIRECTORY 164 * Returns: fileA, fileB, fileC and dirA. 165 * search attribute: HIDDEN and SYSTEM 166 * Returns: fileA, fileB, fileC, fileD and fileE. 167 * 168 * Returns true if the file and sattr match; otherwise, returns false. 169 */ 170 boolean_t 171 smb_sattr_check(uint16_t dosattr, uint16_t sattr) 172 { 173 if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) && 174 !(sattr & FILE_ATTRIBUTE_DIRECTORY)) 175 return (B_FALSE); 176 177 if ((dosattr & FILE_ATTRIBUTE_HIDDEN) && 178 !(sattr & FILE_ATTRIBUTE_HIDDEN)) 179 return (B_FALSE); 180 181 if ((dosattr & FILE_ATTRIBUTE_SYSTEM) && 182 !(sattr & FILE_ATTRIBUTE_SYSTEM)) 183 return (B_FALSE); 184 185 return (B_TRUE); 186 } 187 188 int 189 microtime(timestruc_t *tvp) 190 { 191 tvp->tv_sec = gethrestime_sec(); 192 tvp->tv_nsec = 0; 193 return (0); 194 } 195 196 int32_t 197 clock_get_milli_uptime() 198 { 199 return (TICK_TO_MSEC(ddi_get_lbolt())); 200 } 201 202 /* 203 * smb_idpool_increment 204 * 205 * This function increments the ID pool by doubling the current size. This 206 * function assumes the caller entered the mutex of the pool. 207 */ 208 static int 209 smb_idpool_increment( 210 smb_idpool_t *pool) 211 { 212 uint8_t *new_pool; 213 uint32_t new_size; 214 215 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 216 217 new_size = pool->id_size * 2; 218 if (new_size <= SMB_IDPOOL_MAX_SIZE) { 219 new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP); 220 if (new_pool) { 221 bzero(new_pool, new_size / 8); 222 bcopy(pool->id_pool, new_pool, pool->id_size / 8); 223 kmem_free(pool->id_pool, pool->id_size / 8); 224 pool->id_pool = new_pool; 225 pool->id_free_counter += new_size - pool->id_size; 226 pool->id_max_free_counter += new_size - pool->id_size; 227 pool->id_size = new_size; 228 pool->id_idx_msk = (new_size / 8) - 1; 229 if (new_size >= SMB_IDPOOL_MAX_SIZE) { 230 /* id -1 made unavailable */ 231 pool->id_pool[pool->id_idx_msk] = 0x80; 232 pool->id_free_counter--; 233 pool->id_max_free_counter--; 234 } 235 return (0); 236 } 237 } 238 return (-1); 239 } 240 241 /* 242 * smb_idpool_constructor 243 * 244 * This function initializes the pool structure provided. 245 */ 246 int 247 smb_idpool_constructor( 248 smb_idpool_t *pool) 249 { 250 251 ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC); 252 253 pool->id_size = SMB_IDPOOL_MIN_SIZE; 254 pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1; 255 pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1; 256 pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1; 257 pool->id_bit = 0x02; 258 pool->id_bit_idx = 1; 259 pool->id_idx = 0; 260 pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8), 261 KM_SLEEP); 262 bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8)); 263 /* -1 id made unavailable */ 264 pool->id_pool[0] = 0x01; /* id 0 made unavailable */ 265 mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL); 266 pool->id_magic = SMB_IDPOOL_MAGIC; 267 return (0); 268 } 269 270 /* 271 * smb_idpool_destructor 272 * 273 * This function tears down and frees the resources associated with the 274 * pool provided. 275 */ 276 void 277 smb_idpool_destructor( 278 smb_idpool_t *pool) 279 { 280 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 281 ASSERT(pool->id_free_counter == pool->id_max_free_counter); 282 pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC; 283 mutex_destroy(&pool->id_mutex); 284 kmem_free(pool->id_pool, (size_t)(pool->id_size / 8)); 285 } 286 287 /* 288 * smb_idpool_alloc 289 * 290 * This function allocates an ID from the pool provided. 291 */ 292 int 293 smb_idpool_alloc( 294 smb_idpool_t *pool, 295 uint16_t *id) 296 { 297 uint32_t i; 298 uint8_t bit; 299 uint8_t bit_idx; 300 uint8_t byte; 301 302 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 303 304 mutex_enter(&pool->id_mutex); 305 if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) { 306 mutex_exit(&pool->id_mutex); 307 return (-1); 308 } 309 310 i = pool->id_size; 311 while (i) { 312 bit = pool->id_bit; 313 bit_idx = pool->id_bit_idx; 314 byte = pool->id_pool[pool->id_idx]; 315 while (bit) { 316 if (byte & bit) { 317 bit = bit << 1; 318 bit_idx++; 319 continue; 320 } 321 pool->id_pool[pool->id_idx] |= bit; 322 *id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx); 323 pool->id_free_counter--; 324 pool->id_bit = bit; 325 pool->id_bit_idx = bit_idx; 326 mutex_exit(&pool->id_mutex); 327 return (0); 328 } 329 pool->id_bit = 1; 330 pool->id_bit_idx = 0; 331 pool->id_idx++; 332 pool->id_idx &= pool->id_idx_msk; 333 --i; 334 } 335 /* 336 * This section of code shouldn't be reached. If there are IDs 337 * available and none could be found there's a problem. 338 */ 339 ASSERT(0); 340 mutex_exit(&pool->id_mutex); 341 return (-1); 342 } 343 344 /* 345 * smb_idpool_free 346 * 347 * This function frees the ID provided. 348 */ 349 void 350 smb_idpool_free( 351 smb_idpool_t *pool, 352 uint16_t id) 353 { 354 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 355 ASSERT(id != 0); 356 ASSERT(id != 0xFFFF); 357 358 mutex_enter(&pool->id_mutex); 359 if (pool->id_pool[id >> 3] & (1 << (id & 7))) { 360 pool->id_pool[id >> 3] &= ~(1 << (id & 7)); 361 pool->id_free_counter++; 362 ASSERT(pool->id_free_counter <= pool->id_max_free_counter); 363 mutex_exit(&pool->id_mutex); 364 return; 365 } 366 /* Freeing a free ID. */ 367 ASSERT(0); 368 mutex_exit(&pool->id_mutex); 369 } 370 371 /* 372 * Initialize the llist delete queue object cache. 373 */ 374 void 375 smb_llist_init(void) 376 { 377 if (smb_llist_initialized) 378 return; 379 380 smb_dtor_cache = kmem_cache_create("smb_dtor_cache", 381 sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0); 382 383 smb_llist_initialized = B_TRUE; 384 } 385 386 /* 387 * Destroy the llist delete queue object cache. 388 */ 389 void 390 smb_llist_fini(void) 391 { 392 if (!smb_llist_initialized) 393 return; 394 395 kmem_cache_destroy(smb_dtor_cache); 396 smb_llist_initialized = B_FALSE; 397 } 398 399 /* 400 * smb_llist_constructor 401 * 402 * This function initializes a locked list. 403 */ 404 void 405 smb_llist_constructor( 406 smb_llist_t *ll, 407 size_t size, 408 size_t offset) 409 { 410 rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL); 411 mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL); 412 list_create(&ll->ll_list, size, offset); 413 list_create(&ll->ll_deleteq, sizeof (smb_dtor_t), 414 offsetof(smb_dtor_t, dt_lnd)); 415 ll->ll_count = 0; 416 ll->ll_wrop = 0; 417 ll->ll_deleteq_count = 0; 418 ll->ll_flushing = B_FALSE; 419 } 420 421 /* 422 * Flush the delete queue and destroy a locked list. 423 */ 424 void 425 smb_llist_destructor( 426 smb_llist_t *ll) 427 { 428 smb_llist_flush(ll); 429 430 ASSERT(ll->ll_count == 0); 431 ASSERT(ll->ll_deleteq_count == 0); 432 433 rw_destroy(&ll->ll_lock); 434 list_destroy(&ll->ll_list); 435 list_destroy(&ll->ll_deleteq); 436 mutex_destroy(&ll->ll_mutex); 437 } 438 439 /* 440 * Post an object to the delete queue. The delete queue will be processed 441 * during list exit or list destruction. Objects are often posted for 442 * deletion during list iteration (while the list is locked) but that is 443 * not required, and an object can be posted at any time. 444 */ 445 void 446 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc) 447 { 448 smb_dtor_t *dtor; 449 450 ASSERT((object != NULL) && (dtorproc != NULL)); 451 452 dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP); 453 bzero(dtor, sizeof (smb_dtor_t)); 454 dtor->dt_magic = SMB_DTOR_MAGIC; 455 dtor->dt_object = object; 456 dtor->dt_proc = dtorproc; 457 458 mutex_enter(&ll->ll_mutex); 459 list_insert_tail(&ll->ll_deleteq, dtor); 460 ++ll->ll_deleteq_count; 461 mutex_exit(&ll->ll_mutex); 462 } 463 464 /* 465 * Exit the list lock and process the delete queue. 466 */ 467 void 468 smb_llist_exit(smb_llist_t *ll) 469 { 470 rw_exit(&ll->ll_lock); 471 smb_llist_flush(ll); 472 } 473 474 /* 475 * Flush the list delete queue. The mutex is dropped across the destructor 476 * call in case this leads to additional objects being posted to the delete 477 * queue. 478 */ 479 void 480 smb_llist_flush(smb_llist_t *ll) 481 { 482 smb_dtor_t *dtor; 483 484 mutex_enter(&ll->ll_mutex); 485 if (ll->ll_flushing) { 486 mutex_exit(&ll->ll_mutex); 487 return; 488 } 489 ll->ll_flushing = B_TRUE; 490 491 dtor = list_head(&ll->ll_deleteq); 492 while (dtor != NULL) { 493 SMB_DTOR_VALID(dtor); 494 ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL)); 495 list_remove(&ll->ll_deleteq, dtor); 496 --ll->ll_deleteq_count; 497 mutex_exit(&ll->ll_mutex); 498 499 dtor->dt_proc(dtor->dt_object); 500 501 dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC; 502 kmem_cache_free(smb_dtor_cache, dtor); 503 mutex_enter(&ll->ll_mutex); 504 dtor = list_head(&ll->ll_deleteq); 505 } 506 ll->ll_flushing = B_FALSE; 507 508 mutex_exit(&ll->ll_mutex); 509 } 510 511 /* 512 * smb_llist_upgrade 513 * 514 * This function tries to upgrade the lock of the locked list. It assumes the 515 * locked has already been entered in RW_READER mode. It first tries using the 516 * Solaris function rw_tryupgrade(). If that call fails the lock is released 517 * and reentered in RW_WRITER mode. In that last case a window is opened during 518 * which the contents of the list may have changed. The return code indicates 519 * whether or not the list was modified when the lock was exited. 520 */ 521 int smb_llist_upgrade( 522 smb_llist_t *ll) 523 { 524 uint64_t wrop; 525 526 if (rw_tryupgrade(&ll->ll_lock) != 0) { 527 return (0); 528 } 529 wrop = ll->ll_wrop; 530 rw_exit(&ll->ll_lock); 531 rw_enter(&ll->ll_lock, RW_WRITER); 532 return (wrop != ll->ll_wrop); 533 } 534 535 /* 536 * smb_llist_insert_head 537 * 538 * This function inserts the object passed a the beginning of the list. This 539 * function assumes the lock of the list has already been entered. 540 */ 541 void 542 smb_llist_insert_head( 543 smb_llist_t *ll, 544 void *obj) 545 { 546 list_insert_head(&ll->ll_list, obj); 547 ++ll->ll_wrop; 548 ++ll->ll_count; 549 } 550 551 /* 552 * smb_llist_insert_tail 553 * 554 * This function appends to the object passed to the list. This function assumes 555 * the lock of the list has already been entered. 556 * 557 */ 558 void 559 smb_llist_insert_tail( 560 smb_llist_t *ll, 561 void *obj) 562 { 563 list_insert_tail(&ll->ll_list, obj); 564 ++ll->ll_wrop; 565 ++ll->ll_count; 566 } 567 568 /* 569 * smb_llist_remove 570 * 571 * This function removes the object passed from the list. This function assumes 572 * the lock of the list has already been entered. 573 */ 574 void 575 smb_llist_remove( 576 smb_llist_t *ll, 577 void *obj) 578 { 579 list_remove(&ll->ll_list, obj); 580 ++ll->ll_wrop; 581 --ll->ll_count; 582 } 583 584 /* 585 * smb_llist_get_count 586 * 587 * This function returns the number of elements in the specified list. 588 */ 589 uint32_t 590 smb_llist_get_count( 591 smb_llist_t *ll) 592 { 593 return (ll->ll_count); 594 } 595 596 /* 597 * smb_slist_constructor 598 * 599 * Synchronized list constructor. 600 */ 601 void 602 smb_slist_constructor( 603 smb_slist_t *sl, 604 size_t size, 605 size_t offset) 606 { 607 mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL); 608 cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL); 609 list_create(&sl->sl_list, size, offset); 610 sl->sl_count = 0; 611 sl->sl_waiting = B_FALSE; 612 } 613 614 /* 615 * smb_slist_destructor 616 * 617 * Synchronized list destructor. 618 */ 619 void 620 smb_slist_destructor( 621 smb_slist_t *sl) 622 { 623 VERIFY(sl->sl_count == 0); 624 625 mutex_destroy(&sl->sl_mutex); 626 cv_destroy(&sl->sl_cv); 627 list_destroy(&sl->sl_list); 628 } 629 630 /* 631 * smb_slist_insert_head 632 * 633 * This function inserts the object passed a the beginning of the list. 634 */ 635 void 636 smb_slist_insert_head( 637 smb_slist_t *sl, 638 void *obj) 639 { 640 mutex_enter(&sl->sl_mutex); 641 list_insert_head(&sl->sl_list, obj); 642 ++sl->sl_count; 643 mutex_exit(&sl->sl_mutex); 644 } 645 646 /* 647 * smb_slist_insert_tail 648 * 649 * This function appends the object passed to the list. 650 */ 651 void 652 smb_slist_insert_tail( 653 smb_slist_t *sl, 654 void *obj) 655 { 656 mutex_enter(&sl->sl_mutex); 657 list_insert_tail(&sl->sl_list, obj); 658 ++sl->sl_count; 659 mutex_exit(&sl->sl_mutex); 660 } 661 662 /* 663 * smb_llist_remove 664 * 665 * This function removes the object passed by the caller from the list. 666 */ 667 void 668 smb_slist_remove( 669 smb_slist_t *sl, 670 void *obj) 671 { 672 mutex_enter(&sl->sl_mutex); 673 list_remove(&sl->sl_list, obj); 674 if ((--sl->sl_count == 0) && (sl->sl_waiting)) { 675 sl->sl_waiting = B_FALSE; 676 cv_broadcast(&sl->sl_cv); 677 } 678 mutex_exit(&sl->sl_mutex); 679 } 680 681 /* 682 * smb_slist_move_tail 683 * 684 * This function transfers all the contents of the synchronized list to the 685 * list_t provided. It returns the number of objects transferred. 686 */ 687 uint32_t 688 smb_slist_move_tail( 689 list_t *lst, 690 smb_slist_t *sl) 691 { 692 uint32_t rv; 693 694 mutex_enter(&sl->sl_mutex); 695 rv = sl->sl_count; 696 if (sl->sl_count) { 697 list_move_tail(lst, &sl->sl_list); 698 sl->sl_count = 0; 699 if (sl->sl_waiting) { 700 sl->sl_waiting = B_FALSE; 701 cv_broadcast(&sl->sl_cv); 702 } 703 } 704 mutex_exit(&sl->sl_mutex); 705 return (rv); 706 } 707 708 /* 709 * smb_slist_obj_move 710 * 711 * This function moves an object from one list to the end of the other list. It 712 * assumes the mutex of each list has been entered. 713 */ 714 void 715 smb_slist_obj_move( 716 smb_slist_t *dst, 717 smb_slist_t *src, 718 void *obj) 719 { 720 ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset); 721 ASSERT(dst->sl_list.list_size == src->sl_list.list_size); 722 723 list_remove(&src->sl_list, obj); 724 list_insert_tail(&dst->sl_list, obj); 725 dst->sl_count++; 726 src->sl_count--; 727 if ((src->sl_count == 0) && (src->sl_waiting)) { 728 src->sl_waiting = B_FALSE; 729 cv_broadcast(&src->sl_cv); 730 } 731 } 732 733 /* 734 * smb_slist_wait_for_empty 735 * 736 * This function waits for a list to be emptied. 737 */ 738 void 739 smb_slist_wait_for_empty( 740 smb_slist_t *sl) 741 { 742 mutex_enter(&sl->sl_mutex); 743 while (sl->sl_count) { 744 sl->sl_waiting = B_TRUE; 745 cv_wait(&sl->sl_cv, &sl->sl_mutex); 746 } 747 mutex_exit(&sl->sl_mutex); 748 } 749 750 /* 751 * smb_slist_exit 752 * 753 * This function exits the muetx of the list and signal the condition variable 754 * if the list is empty. 755 */ 756 void 757 smb_slist_exit(smb_slist_t *sl) 758 { 759 if ((sl->sl_count == 0) && (sl->sl_waiting)) { 760 sl->sl_waiting = B_FALSE; 761 cv_broadcast(&sl->sl_cv); 762 } 763 mutex_exit(&sl->sl_mutex); 764 } 765 766 /* 767 * smb_thread_entry_point 768 * 769 * Common entry point for all the threads created through smb_thread_start. 770 * The state of the thread is set to "running" at the beginning and moved to 771 * "exiting" just before calling thread_exit(). The condition variable is 772 * also signaled. 773 */ 774 static void 775 smb_thread_entry_point( 776 smb_thread_t *thread) 777 { 778 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 779 mutex_enter(&thread->sth_mtx); 780 ASSERT(thread->sth_state == SMB_THREAD_STATE_STARTING); 781 thread->sth_th = curthread; 782 thread->sth_did = thread->sth_th->t_did; 783 784 if (!thread->sth_kill) { 785 thread->sth_state = SMB_THREAD_STATE_RUNNING; 786 cv_signal(&thread->sth_cv); 787 mutex_exit(&thread->sth_mtx); 788 thread->sth_ep(thread, thread->sth_ep_arg); 789 mutex_enter(&thread->sth_mtx); 790 } 791 thread->sth_th = NULL; 792 thread->sth_state = SMB_THREAD_STATE_EXITING; 793 cv_broadcast(&thread->sth_cv); 794 mutex_exit(&thread->sth_mtx); 795 thread_exit(); 796 } 797 798 /* 799 * smb_thread_init 800 */ 801 void 802 smb_thread_init( 803 smb_thread_t *thread, 804 char *name, 805 smb_thread_ep_t ep, 806 void *ep_arg) 807 { 808 ASSERT(thread->sth_magic != SMB_THREAD_MAGIC); 809 810 bzero(thread, sizeof (*thread)); 811 812 (void) strlcpy(thread->sth_name, name, sizeof (thread->sth_name)); 813 thread->sth_ep = ep; 814 thread->sth_ep_arg = ep_arg; 815 thread->sth_state = SMB_THREAD_STATE_EXITED; 816 mutex_init(&thread->sth_mtx, NULL, MUTEX_DEFAULT, NULL); 817 cv_init(&thread->sth_cv, NULL, CV_DEFAULT, NULL); 818 thread->sth_magic = SMB_THREAD_MAGIC; 819 } 820 821 /* 822 * smb_thread_destroy 823 */ 824 void 825 smb_thread_destroy( 826 smb_thread_t *thread) 827 { 828 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 829 ASSERT(thread->sth_state == SMB_THREAD_STATE_EXITED); 830 thread->sth_magic = 0; 831 mutex_destroy(&thread->sth_mtx); 832 cv_destroy(&thread->sth_cv); 833 } 834 835 /* 836 * smb_thread_start 837 * 838 * This function starts a thread with the parameters provided. It waits until 839 * the state of the thread has been moved to running. 840 */ 841 /*ARGSUSED*/ 842 int 843 smb_thread_start( 844 smb_thread_t *thread) 845 { 846 int rc = 0; 847 kthread_t *tmpthread; 848 849 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 850 851 mutex_enter(&thread->sth_mtx); 852 switch (thread->sth_state) { 853 case SMB_THREAD_STATE_EXITED: 854 thread->sth_state = SMB_THREAD_STATE_STARTING; 855 mutex_exit(&thread->sth_mtx); 856 tmpthread = thread_create(NULL, 0, smb_thread_entry_point, 857 thread, 0, &p0, TS_RUN, minclsyspri); 858 ASSERT(tmpthread != NULL); 859 mutex_enter(&thread->sth_mtx); 860 while (thread->sth_state == SMB_THREAD_STATE_STARTING) 861 cv_wait(&thread->sth_cv, &thread->sth_mtx); 862 if (thread->sth_state != SMB_THREAD_STATE_RUNNING) 863 rc = -1; 864 break; 865 default: 866 ASSERT(0); 867 rc = -1; 868 break; 869 } 870 mutex_exit(&thread->sth_mtx); 871 return (rc); 872 } 873 874 /* 875 * smb_thread_stop 876 * 877 * This function signals a thread to kill itself and waits until the "exiting" 878 * state has been reached. 879 */ 880 void 881 smb_thread_stop(smb_thread_t *thread) 882 { 883 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 884 885 mutex_enter(&thread->sth_mtx); 886 switch (thread->sth_state) { 887 case SMB_THREAD_STATE_RUNNING: 888 case SMB_THREAD_STATE_STARTING: 889 if (!thread->sth_kill) { 890 thread->sth_kill = B_TRUE; 891 cv_broadcast(&thread->sth_cv); 892 while (thread->sth_state != SMB_THREAD_STATE_EXITING) 893 cv_wait(&thread->sth_cv, &thread->sth_mtx); 894 mutex_exit(&thread->sth_mtx); 895 thread_join(thread->sth_did); 896 mutex_enter(&thread->sth_mtx); 897 thread->sth_state = SMB_THREAD_STATE_EXITED; 898 thread->sth_did = 0; 899 thread->sth_kill = B_FALSE; 900 cv_broadcast(&thread->sth_cv); 901 break; 902 } 903 /*FALLTHRU*/ 904 905 case SMB_THREAD_STATE_EXITING: 906 if (thread->sth_kill) { 907 while (thread->sth_state != SMB_THREAD_STATE_EXITED) 908 cv_wait(&thread->sth_cv, &thread->sth_mtx); 909 } else { 910 thread->sth_state = SMB_THREAD_STATE_EXITED; 911 thread->sth_did = 0; 912 } 913 break; 914 915 case SMB_THREAD_STATE_EXITED: 916 break; 917 918 default: 919 ASSERT(0); 920 break; 921 } 922 mutex_exit(&thread->sth_mtx); 923 } 924 925 /* 926 * smb_thread_signal 927 * 928 * This function signals a thread. 929 */ 930 void 931 smb_thread_signal(smb_thread_t *thread) 932 { 933 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 934 935 mutex_enter(&thread->sth_mtx); 936 switch (thread->sth_state) { 937 case SMB_THREAD_STATE_RUNNING: 938 cv_signal(&thread->sth_cv); 939 break; 940 941 default: 942 break; 943 } 944 mutex_exit(&thread->sth_mtx); 945 } 946 947 boolean_t 948 smb_thread_continue(smb_thread_t *thread) 949 { 950 boolean_t result; 951 952 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 953 954 mutex_enter(&thread->sth_mtx); 955 result = smb_thread_continue_timedwait_locked(thread, 0); 956 mutex_exit(&thread->sth_mtx); 957 958 return (result); 959 } 960 961 boolean_t 962 smb_thread_continue_nowait(smb_thread_t *thread) 963 { 964 boolean_t result; 965 966 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 967 968 mutex_enter(&thread->sth_mtx); 969 /* 970 * Setting ticks=-1 requests a non-blocking check. We will 971 * still block if the thread is in "suspend" state. 972 */ 973 result = smb_thread_continue_timedwait_locked(thread, -1); 974 mutex_exit(&thread->sth_mtx); 975 976 return (result); 977 } 978 979 boolean_t 980 smb_thread_continue_timedwait(smb_thread_t *thread, int seconds) 981 { 982 boolean_t result; 983 984 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 985 986 mutex_enter(&thread->sth_mtx); 987 result = smb_thread_continue_timedwait_locked(thread, 988 SEC_TO_TICK(seconds)); 989 mutex_exit(&thread->sth_mtx); 990 991 return (result); 992 } 993 994 /* 995 * smb_thread_continue_timedwait_locked 996 * 997 * Internal only. Ticks==-1 means don't block, Ticks == 0 means wait 998 * indefinitely 999 */ 1000 static boolean_t 1001 smb_thread_continue_timedwait_locked(smb_thread_t *thread, int ticks) 1002 { 1003 boolean_t result; 1004 1005 /* -1 means don't block */ 1006 if (ticks != -1 && !thread->sth_kill) { 1007 if (ticks == 0) { 1008 cv_wait(&thread->sth_cv, &thread->sth_mtx); 1009 } else { 1010 (void) cv_reltimedwait(&thread->sth_cv, 1011 &thread->sth_mtx, (clock_t)ticks, TR_CLOCK_TICK); 1012 } 1013 } 1014 result = (thread->sth_kill == 0); 1015 1016 return (result); 1017 } 1018 1019 /* 1020 * smb_rwx_init 1021 */ 1022 void 1023 smb_rwx_init( 1024 smb_rwx_t *rwx) 1025 { 1026 bzero(rwx, sizeof (smb_rwx_t)); 1027 cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL); 1028 mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL); 1029 rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL); 1030 } 1031 1032 /* 1033 * smb_rwx_destroy 1034 */ 1035 void 1036 smb_rwx_destroy( 1037 smb_rwx_t *rwx) 1038 { 1039 mutex_destroy(&rwx->rwx_mutex); 1040 cv_destroy(&rwx->rwx_cv); 1041 rw_destroy(&rwx->rwx_lock); 1042 } 1043 1044 /* 1045 * smb_rwx_rwexit 1046 */ 1047 void 1048 smb_rwx_rwexit( 1049 smb_rwx_t *rwx) 1050 { 1051 if (rw_write_held(&rwx->rwx_lock)) { 1052 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1053 mutex_enter(&rwx->rwx_mutex); 1054 if (rwx->rwx_waiting) { 1055 rwx->rwx_waiting = B_FALSE; 1056 cv_broadcast(&rwx->rwx_cv); 1057 } 1058 mutex_exit(&rwx->rwx_mutex); 1059 } 1060 rw_exit(&rwx->rwx_lock); 1061 } 1062 1063 /* 1064 * smb_rwx_rwupgrade 1065 */ 1066 krw_t 1067 smb_rwx_rwupgrade( 1068 smb_rwx_t *rwx) 1069 { 1070 if (rw_write_held(&rwx->rwx_lock)) { 1071 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1072 return (RW_WRITER); 1073 } 1074 if (!rw_tryupgrade(&rwx->rwx_lock)) { 1075 rw_exit(&rwx->rwx_lock); 1076 rw_enter(&rwx->rwx_lock, RW_WRITER); 1077 } 1078 return (RW_READER); 1079 } 1080 1081 /* 1082 * smb_rwx_rwrestore 1083 */ 1084 void 1085 smb_rwx_rwdowngrade( 1086 smb_rwx_t *rwx, 1087 krw_t mode) 1088 { 1089 ASSERT(rw_write_held(&rwx->rwx_lock)); 1090 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1091 1092 if (mode == RW_WRITER) { 1093 return; 1094 } 1095 ASSERT(mode == RW_READER); 1096 mutex_enter(&rwx->rwx_mutex); 1097 if (rwx->rwx_waiting) { 1098 rwx->rwx_waiting = B_FALSE; 1099 cv_broadcast(&rwx->rwx_cv); 1100 } 1101 mutex_exit(&rwx->rwx_mutex); 1102 rw_downgrade(&rwx->rwx_lock); 1103 } 1104 1105 /* 1106 * smb_rwx_wait 1107 * 1108 * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER 1109 * mode. It will: 1110 * 1111 * 1) release the lock and save its current mode. 1112 * 2) wait until the condition variable is signaled. This can happen for 1113 * 2 reasons: When a writer releases the lock or when the time out (if 1114 * provided) expires. 1115 * 3) re-acquire the lock in the mode saved in (1). 1116 */ 1117 int 1118 smb_rwx_rwwait( 1119 smb_rwx_t *rwx, 1120 clock_t timeout) 1121 { 1122 int rc; 1123 krw_t mode; 1124 1125 mutex_enter(&rwx->rwx_mutex); 1126 rwx->rwx_waiting = B_TRUE; 1127 mutex_exit(&rwx->rwx_mutex); 1128 1129 if (rw_write_held(&rwx->rwx_lock)) { 1130 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1131 mode = RW_WRITER; 1132 } else { 1133 ASSERT(rw_read_held(&rwx->rwx_lock)); 1134 mode = RW_READER; 1135 } 1136 rw_exit(&rwx->rwx_lock); 1137 1138 mutex_enter(&rwx->rwx_mutex); 1139 if (rwx->rwx_waiting) { 1140 if (timeout == -1) { 1141 rc = 1; 1142 cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex); 1143 } else { 1144 rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex, 1145 timeout, TR_CLOCK_TICK); 1146 } 1147 } 1148 mutex_exit(&rwx->rwx_mutex); 1149 1150 rw_enter(&rwx->rwx_lock, mode); 1151 return (rc); 1152 } 1153 1154 /* 1155 * SMB ID mapping 1156 * 1157 * Solaris ID mapping service (aka Winchester) works with domain SIDs 1158 * and RIDs where domain SIDs are in string format. CIFS service works 1159 * with binary SIDs understandable by CIFS clients. A layer of SMB ID 1160 * mapping functions are implemeted to hide the SID conversion details 1161 * and also hide the handling of array of batch mapping requests. 1162 * 1163 * IMPORTANT NOTE The Winchester API requires a zone. Because CIFS server 1164 * currently only runs in the global zone the global zone is specified. 1165 * This needs to be fixed when the CIFS server supports zones. 1166 */ 1167 1168 static int smb_idmap_batch_binsid(smb_idmap_batch_t *sib); 1169 1170 /* 1171 * smb_idmap_getid 1172 * 1173 * Maps the given Windows SID to a Solaris ID using the 1174 * simple mapping API. 1175 */ 1176 idmap_stat 1177 smb_idmap_getid(smb_sid_t *sid, uid_t *id, int *idtype) 1178 { 1179 smb_idmap_t sim; 1180 char sidstr[SMB_SID_STRSZ]; 1181 1182 smb_sid_tostr(sid, sidstr); 1183 if (smb_sid_splitstr(sidstr, &sim.sim_rid) != 0) 1184 return (IDMAP_ERR_SID); 1185 sim.sim_domsid = sidstr; 1186 sim.sim_id = id; 1187 1188 switch (*idtype) { 1189 case SMB_IDMAP_USER: 1190 sim.sim_stat = kidmap_getuidbysid(global_zone, sim.sim_domsid, 1191 sim.sim_rid, sim.sim_id); 1192 break; 1193 1194 case SMB_IDMAP_GROUP: 1195 sim.sim_stat = kidmap_getgidbysid(global_zone, sim.sim_domsid, 1196 sim.sim_rid, sim.sim_id); 1197 break; 1198 1199 case SMB_IDMAP_UNKNOWN: 1200 sim.sim_stat = kidmap_getpidbysid(global_zone, sim.sim_domsid, 1201 sim.sim_rid, sim.sim_id, &sim.sim_idtype); 1202 break; 1203 1204 default: 1205 ASSERT(0); 1206 return (IDMAP_ERR_ARG); 1207 } 1208 1209 *idtype = sim.sim_idtype; 1210 1211 return (sim.sim_stat); 1212 } 1213 1214 /* 1215 * smb_idmap_getsid 1216 * 1217 * Maps the given Solaris ID to a Windows SID using the 1218 * simple mapping API. 1219 */ 1220 idmap_stat 1221 smb_idmap_getsid(uid_t id, int idtype, smb_sid_t **sid) 1222 { 1223 smb_idmap_t sim; 1224 1225 switch (idtype) { 1226 case SMB_IDMAP_USER: 1227 sim.sim_stat = kidmap_getsidbyuid(global_zone, id, 1228 (const char **)&sim.sim_domsid, &sim.sim_rid); 1229 break; 1230 1231 case SMB_IDMAP_GROUP: 1232 sim.sim_stat = kidmap_getsidbygid(global_zone, id, 1233 (const char **)&sim.sim_domsid, &sim.sim_rid); 1234 break; 1235 1236 case SMB_IDMAP_EVERYONE: 1237 /* Everyone S-1-1-0 */ 1238 sim.sim_domsid = "S-1-1"; 1239 sim.sim_rid = 0; 1240 sim.sim_stat = IDMAP_SUCCESS; 1241 break; 1242 1243 default: 1244 ASSERT(0); 1245 return (IDMAP_ERR_ARG); 1246 } 1247 1248 if (sim.sim_stat != IDMAP_SUCCESS) 1249 return (sim.sim_stat); 1250 1251 if (sim.sim_domsid == NULL) 1252 return (IDMAP_ERR_NOMAPPING); 1253 1254 sim.sim_sid = smb_sid_fromstr(sim.sim_domsid); 1255 if (sim.sim_sid == NULL) 1256 return (IDMAP_ERR_INTERNAL); 1257 1258 *sid = smb_sid_splice(sim.sim_sid, sim.sim_rid); 1259 smb_sid_free(sim.sim_sid); 1260 if (*sid == NULL) 1261 sim.sim_stat = IDMAP_ERR_INTERNAL; 1262 1263 return (sim.sim_stat); 1264 } 1265 1266 /* 1267 * smb_idmap_batch_create 1268 * 1269 * Creates and initializes the context for batch ID mapping. 1270 */ 1271 idmap_stat 1272 smb_idmap_batch_create(smb_idmap_batch_t *sib, uint16_t nmap, int flags) 1273 { 1274 ASSERT(sib); 1275 1276 bzero(sib, sizeof (smb_idmap_batch_t)); 1277 1278 sib->sib_idmaph = kidmap_get_create(global_zone); 1279 1280 sib->sib_flags = flags; 1281 sib->sib_nmap = nmap; 1282 sib->sib_size = nmap * sizeof (smb_idmap_t); 1283 sib->sib_maps = kmem_zalloc(sib->sib_size, KM_SLEEP); 1284 1285 return (IDMAP_SUCCESS); 1286 } 1287 1288 /* 1289 * smb_idmap_batch_destroy 1290 * 1291 * Frees the batch ID mapping context. 1292 * If ID mapping is Solaris -> Windows it frees memories 1293 * allocated for binary SIDs. 1294 */ 1295 void 1296 smb_idmap_batch_destroy(smb_idmap_batch_t *sib) 1297 { 1298 char *domsid; 1299 int i; 1300 1301 ASSERT(sib); 1302 ASSERT(sib->sib_maps); 1303 1304 if (sib->sib_idmaph) 1305 kidmap_get_destroy(sib->sib_idmaph); 1306 1307 if (sib->sib_flags & SMB_IDMAP_ID2SID) { 1308 /* 1309 * SIDs are allocated only when mapping 1310 * UID/GID to SIDs 1311 */ 1312 for (i = 0; i < sib->sib_nmap; i++) 1313 smb_sid_free(sib->sib_maps[i].sim_sid); 1314 } else if (sib->sib_flags & SMB_IDMAP_SID2ID) { 1315 /* 1316 * SID prefixes are allocated only when mapping 1317 * SIDs to UID/GID 1318 */ 1319 for (i = 0; i < sib->sib_nmap; i++) { 1320 domsid = sib->sib_maps[i].sim_domsid; 1321 if (domsid) 1322 smb_mem_free(domsid); 1323 } 1324 } 1325 1326 if (sib->sib_size && sib->sib_maps) 1327 kmem_free(sib->sib_maps, sib->sib_size); 1328 } 1329 1330 /* 1331 * smb_idmap_batch_getid 1332 * 1333 * Queue a request to map the given SID to a UID or GID. 1334 * 1335 * sim->sim_id should point to variable that's supposed to 1336 * hold the returned UID/GID. This needs to be setup by caller 1337 * of this function. 1338 * 1339 * If requested ID type is known, it's passed as 'idtype', 1340 * if it's unknown it'll be returned in sim->sim_idtype. 1341 */ 1342 idmap_stat 1343 smb_idmap_batch_getid(idmap_get_handle_t *idmaph, smb_idmap_t *sim, 1344 smb_sid_t *sid, int idtype) 1345 { 1346 char strsid[SMB_SID_STRSZ]; 1347 idmap_stat idm_stat; 1348 1349 ASSERT(idmaph); 1350 ASSERT(sim); 1351 ASSERT(sid); 1352 1353 smb_sid_tostr(sid, strsid); 1354 if (smb_sid_splitstr(strsid, &sim->sim_rid) != 0) 1355 return (IDMAP_ERR_SID); 1356 sim->sim_domsid = smb_mem_strdup(strsid); 1357 1358 switch (idtype) { 1359 case SMB_IDMAP_USER: 1360 idm_stat = kidmap_batch_getuidbysid(idmaph, sim->sim_domsid, 1361 sim->sim_rid, sim->sim_id, &sim->sim_stat); 1362 break; 1363 1364 case SMB_IDMAP_GROUP: 1365 idm_stat = kidmap_batch_getgidbysid(idmaph, sim->sim_domsid, 1366 sim->sim_rid, sim->sim_id, &sim->sim_stat); 1367 break; 1368 1369 case SMB_IDMAP_UNKNOWN: 1370 idm_stat = kidmap_batch_getpidbysid(idmaph, sim->sim_domsid, 1371 sim->sim_rid, sim->sim_id, &sim->sim_idtype, 1372 &sim->sim_stat); 1373 break; 1374 1375 default: 1376 ASSERT(0); 1377 return (IDMAP_ERR_ARG); 1378 } 1379 1380 return (idm_stat); 1381 } 1382 1383 /* 1384 * smb_idmap_batch_getsid 1385 * 1386 * Queue a request to map the given UID/GID to a SID. 1387 * 1388 * sim->sim_domsid and sim->sim_rid will contain the mapping 1389 * result upon successful process of the batched request. 1390 */ 1391 idmap_stat 1392 smb_idmap_batch_getsid(idmap_get_handle_t *idmaph, smb_idmap_t *sim, 1393 uid_t id, int idtype) 1394 { 1395 idmap_stat idm_stat; 1396 1397 switch (idtype) { 1398 case SMB_IDMAP_USER: 1399 idm_stat = kidmap_batch_getsidbyuid(idmaph, id, 1400 (const char **)&sim->sim_domsid, &sim->sim_rid, 1401 &sim->sim_stat); 1402 break; 1403 1404 case SMB_IDMAP_GROUP: 1405 idm_stat = kidmap_batch_getsidbygid(idmaph, id, 1406 (const char **)&sim->sim_domsid, &sim->sim_rid, 1407 &sim->sim_stat); 1408 break; 1409 1410 case SMB_IDMAP_OWNERAT: 1411 /* Current Owner S-1-5-32-766 */ 1412 sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR; 1413 sim->sim_rid = SECURITY_CURRENT_OWNER_RID; 1414 sim->sim_stat = IDMAP_SUCCESS; 1415 idm_stat = IDMAP_SUCCESS; 1416 break; 1417 1418 case SMB_IDMAP_GROUPAT: 1419 /* Current Group S-1-5-32-767 */ 1420 sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR; 1421 sim->sim_rid = SECURITY_CURRENT_GROUP_RID; 1422 sim->sim_stat = IDMAP_SUCCESS; 1423 idm_stat = IDMAP_SUCCESS; 1424 break; 1425 1426 case SMB_IDMAP_EVERYONE: 1427 /* Everyone S-1-1-0 */ 1428 sim->sim_domsid = NT_WORLD_AUTH_SIDSTR; 1429 sim->sim_rid = 0; 1430 sim->sim_stat = IDMAP_SUCCESS; 1431 idm_stat = IDMAP_SUCCESS; 1432 break; 1433 1434 default: 1435 ASSERT(0); 1436 return (IDMAP_ERR_ARG); 1437 } 1438 1439 return (idm_stat); 1440 } 1441 1442 /* 1443 * smb_idmap_batch_binsid 1444 * 1445 * Convert sidrids to binary sids 1446 * 1447 * Returns 0 if successful and non-zero upon failure. 1448 */ 1449 static int 1450 smb_idmap_batch_binsid(smb_idmap_batch_t *sib) 1451 { 1452 smb_sid_t *sid; 1453 smb_idmap_t *sim; 1454 int i; 1455 1456 if (sib->sib_flags & SMB_IDMAP_SID2ID) 1457 /* This operation is not required */ 1458 return (0); 1459 1460 sim = sib->sib_maps; 1461 for (i = 0; i < sib->sib_nmap; sim++, i++) { 1462 ASSERT(sim->sim_domsid); 1463 if (sim->sim_domsid == NULL) 1464 return (1); 1465 1466 if ((sid = smb_sid_fromstr(sim->sim_domsid)) == NULL) 1467 return (1); 1468 1469 sim->sim_sid = smb_sid_splice(sid, sim->sim_rid); 1470 smb_sid_free(sid); 1471 } 1472 1473 return (0); 1474 } 1475 1476 /* 1477 * smb_idmap_batch_getmappings 1478 * 1479 * trigger ID mapping service to get the mappings for queued 1480 * requests. 1481 * 1482 * Checks the result of all the queued requests. 1483 * If this is a Solaris -> Windows mapping it generates 1484 * binary SIDs from returned (domsid, rid) pairs. 1485 */ 1486 idmap_stat 1487 smb_idmap_batch_getmappings(smb_idmap_batch_t *sib) 1488 { 1489 idmap_stat idm_stat = IDMAP_SUCCESS; 1490 int i; 1491 1492 idm_stat = kidmap_get_mappings(sib->sib_idmaph); 1493 if (idm_stat != IDMAP_SUCCESS) 1494 return (idm_stat); 1495 1496 /* 1497 * Check the status for all the queued requests 1498 */ 1499 for (i = 0; i < sib->sib_nmap; i++) { 1500 if (sib->sib_maps[i].sim_stat != IDMAP_SUCCESS) 1501 return (sib->sib_maps[i].sim_stat); 1502 } 1503 1504 if (smb_idmap_batch_binsid(sib) != 0) 1505 idm_stat = IDMAP_ERR_OTHER; 1506 1507 return (idm_stat); 1508 } 1509 1510 uint64_t 1511 smb_time_unix_to_nt(timestruc_t *unix_time) 1512 { 1513 uint64_t nt_time; 1514 1515 if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0)) 1516 return (0); 1517 1518 nt_time = unix_time->tv_sec; 1519 nt_time *= 10000000; /* seconds to 100ns */ 1520 nt_time += unix_time->tv_nsec / 100; 1521 return (nt_time + NT_TIME_BIAS); 1522 } 1523 1524 void 1525 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time) 1526 { 1527 uint32_t seconds; 1528 1529 ASSERT(unix_time); 1530 1531 if ((nt_time == 0) || (nt_time == -1)) { 1532 unix_time->tv_sec = 0; 1533 unix_time->tv_nsec = 0; 1534 return; 1535 } 1536 1537 nt_time -= NT_TIME_BIAS; 1538 seconds = nt_time / 10000000; 1539 unix_time->tv_sec = seconds; 1540 unix_time->tv_nsec = (nt_time % 10000000) * 100; 1541 } 1542 1543 /* 1544 * smb_time_gmt_to_local, smb_time_local_to_gmt 1545 * 1546 * Apply the gmt offset to convert between local time and gmt 1547 */ 1548 int32_t 1549 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt) 1550 { 1551 if ((gmt == 0) || (gmt == -1)) 1552 return (0); 1553 1554 return (gmt - sr->sr_gmtoff); 1555 } 1556 1557 int32_t 1558 smb_time_local_to_gmt(smb_request_t *sr, int32_t local) 1559 { 1560 if ((local == 0) || (local == -1)) 1561 return (0); 1562 1563 return (local + sr->sr_gmtoff); 1564 } 1565 1566 1567 /* 1568 * smb_time_dos_to_unix 1569 * 1570 * Convert SMB_DATE & SMB_TIME values to a unix timestamp. 1571 * 1572 * A date/time field of 0 means that that server file system 1573 * assigned value need not be changed. The behaviour when the 1574 * date/time field is set to -1 is not documented but is 1575 * generally treated like 0. 1576 * If date or time is 0 or -1 the unix time is returned as 0 1577 * so that the caller can identify and handle this special case. 1578 */ 1579 int32_t 1580 smb_time_dos_to_unix(int16_t date, int16_t time) 1581 { 1582 struct tm atm; 1583 1584 if (((date == 0) || (time == 0)) || 1585 ((date == -1) || (time == -1))) { 1586 return (0); 1587 } 1588 1589 atm.tm_year = ((date >> 9) & 0x3F) + 80; 1590 atm.tm_mon = ((date >> 5) & 0x0F) - 1; 1591 atm.tm_mday = ((date >> 0) & 0x1F); 1592 atm.tm_hour = ((time >> 11) & 0x1F); 1593 atm.tm_min = ((time >> 5) & 0x3F); 1594 atm.tm_sec = ((time >> 0) & 0x1F) << 1; 1595 1596 return (smb_timegm(&atm)); 1597 } 1598 1599 void 1600 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p) 1601 { 1602 struct tm atm; 1603 int i; 1604 time_t tmp_time; 1605 1606 if (ux_time == 0) { 1607 *date_p = 0; 1608 *time_p = 0; 1609 return; 1610 } 1611 1612 tmp_time = (time_t)ux_time; 1613 (void) smb_gmtime_r(&tmp_time, &atm); 1614 1615 if (date_p) { 1616 i = 0; 1617 i += atm.tm_year - 80; 1618 i <<= 4; 1619 i += atm.tm_mon + 1; 1620 i <<= 5; 1621 i += atm.tm_mday; 1622 1623 *date_p = (short)i; 1624 } 1625 if (time_p) { 1626 i = 0; 1627 i += atm.tm_hour; 1628 i <<= 6; 1629 i += atm.tm_min; 1630 i <<= 5; 1631 i += atm.tm_sec >> 1; 1632 1633 *time_p = (short)i; 1634 } 1635 } 1636 1637 1638 /* 1639 * smb_gmtime_r 1640 * 1641 * Thread-safe version of smb_gmtime. Returns a null pointer if either 1642 * input parameter is a null pointer. Otherwise returns a pointer 1643 * to result. 1644 * 1645 * Day of the week calculation: the Epoch was a thursday. 1646 * 1647 * There are no timezone corrections so tm_isdst and tm_gmtoff are 1648 * always zero, and the zone is always WET. 1649 */ 1650 struct tm * 1651 smb_gmtime_r(time_t *clock, struct tm *result) 1652 { 1653 time_t tsec; 1654 int year; 1655 int month; 1656 int sec_per_month; 1657 1658 if (clock == 0 || result == 0) 1659 return (0); 1660 1661 bzero(result, sizeof (struct tm)); 1662 tsec = *clock; 1663 tsec -= tzh_leapcnt; 1664 1665 result->tm_wday = tsec / SECSPERDAY; 1666 result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK; 1667 1668 year = EPOCH_YEAR; 1669 while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) : 1670 (SECSPERDAY * DAYSPERNYEAR))) { 1671 if (isleap(year)) 1672 tsec -= SECSPERDAY * DAYSPERLYEAR; 1673 else 1674 tsec -= SECSPERDAY * DAYSPERNYEAR; 1675 1676 ++year; 1677 } 1678 1679 result->tm_year = year - TM_YEAR_BASE; 1680 result->tm_yday = tsec / SECSPERDAY; 1681 1682 for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) { 1683 sec_per_month = days_in_month[month] * SECSPERDAY; 1684 1685 if (month == TM_FEBRUARY && isleap(year)) 1686 sec_per_month += SECSPERDAY; 1687 1688 if (tsec < sec_per_month) 1689 break; 1690 1691 tsec -= sec_per_month; 1692 } 1693 1694 result->tm_mon = month; 1695 result->tm_mday = (tsec / SECSPERDAY) + 1; 1696 tsec %= SECSPERDAY; 1697 result->tm_sec = tsec % 60; 1698 tsec /= 60; 1699 result->tm_min = tsec % 60; 1700 tsec /= 60; 1701 result->tm_hour = (int)tsec; 1702 1703 return (result); 1704 } 1705 1706 1707 /* 1708 * smb_timegm 1709 * 1710 * Converts the broken-down time in tm to a time value, i.e. the number 1711 * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is 1712 * not a POSIX or ANSI function. Per the man page, the input values of 1713 * tm_wday and tm_yday are ignored and, as the input data is assumed to 1714 * represent GMT, we force tm_isdst and tm_gmtoff to 0. 1715 * 1716 * Before returning the clock time, we use smb_gmtime_r to set up tm_wday 1717 * and tm_yday, and bring the other fields within normal range. I don't 1718 * think this is really how it should be done but it's convenient for 1719 * now. 1720 */ 1721 time_t 1722 smb_timegm(struct tm *tm) 1723 { 1724 time_t tsec; 1725 int dd; 1726 int mm; 1727 int yy; 1728 int year; 1729 1730 if (tm == 0) 1731 return (-1); 1732 1733 year = tm->tm_year + TM_YEAR_BASE; 1734 tsec = tzh_leapcnt; 1735 1736 for (yy = EPOCH_YEAR; yy < year; ++yy) { 1737 if (isleap(yy)) 1738 tsec += SECSPERDAY * DAYSPERLYEAR; 1739 else 1740 tsec += SECSPERDAY * DAYSPERNYEAR; 1741 } 1742 1743 for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) { 1744 dd = days_in_month[mm] * SECSPERDAY; 1745 1746 if (mm == TM_FEBRUARY && isleap(year)) 1747 dd += SECSPERDAY; 1748 1749 tsec += dd; 1750 } 1751 1752 tsec += (tm->tm_mday - 1) * SECSPERDAY; 1753 tsec += tm->tm_sec; 1754 tsec += tm->tm_min * SECSPERMIN; 1755 tsec += tm->tm_hour * SECSPERHOUR; 1756 1757 tm->tm_isdst = 0; 1758 (void) smb_gmtime_r(&tsec, tm); 1759 return (tsec); 1760 } 1761 1762 /* 1763 * smb_pad_align 1764 * 1765 * Returns the number of bytes required to pad an offset to the 1766 * specified alignment. 1767 */ 1768 uint32_t 1769 smb_pad_align(uint32_t offset, uint32_t align) 1770 { 1771 uint32_t pad = offset % align; 1772 1773 if (pad != 0) 1774 pad = align - pad; 1775 1776 return (pad); 1777 } 1778 1779 /* 1780 * smb_panic 1781 * 1782 * Logs the file name, function name and line number passed in and panics the 1783 * system. 1784 */ 1785 void 1786 smb_panic(char *file, const char *func, int line) 1787 { 1788 cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line); 1789 } 1790 1791 /* 1792 * Creates an AVL tree and initializes the given smb_avl_t 1793 * structure using the passed args 1794 */ 1795 void 1796 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset, smb_avl_nops_t *ops) 1797 { 1798 ASSERT(avl); 1799 ASSERT(ops); 1800 1801 rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL); 1802 mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL); 1803 1804 avl->avl_nops = ops; 1805 avl->avl_state = SMB_AVL_STATE_READY; 1806 avl->avl_refcnt = 0; 1807 (void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence, 1808 sizeof (uint32_t)); 1809 1810 avl_create(&avl->avl_tree, ops->avln_cmp, size, offset); 1811 } 1812 1813 /* 1814 * Destroys the specified AVL tree. 1815 * It waits for all the in-flight operations to finish 1816 * before destroying the AVL. 1817 */ 1818 void 1819 smb_avl_destroy(smb_avl_t *avl) 1820 { 1821 void *cookie = NULL; 1822 void *node; 1823 1824 ASSERT(avl); 1825 1826 mutex_enter(&avl->avl_mutex); 1827 if (avl->avl_state != SMB_AVL_STATE_READY) { 1828 mutex_exit(&avl->avl_mutex); 1829 return; 1830 } 1831 1832 avl->avl_state = SMB_AVL_STATE_DESTROYING; 1833 1834 while (avl->avl_refcnt > 0) 1835 (void) cv_wait(&avl->avl_cv, &avl->avl_mutex); 1836 mutex_exit(&avl->avl_mutex); 1837 1838 rw_enter(&avl->avl_lock, RW_WRITER); 1839 while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL) 1840 avl->avl_nops->avln_destroy(node); 1841 1842 avl_destroy(&avl->avl_tree); 1843 rw_exit(&avl->avl_lock); 1844 1845 rw_destroy(&avl->avl_lock); 1846 1847 mutex_destroy(&avl->avl_mutex); 1848 bzero(avl, sizeof (smb_avl_t)); 1849 } 1850 1851 /* 1852 * Adds the given item to the AVL if it's 1853 * not already there. 1854 * 1855 * Returns: 1856 * 1857 * ENOTACTIVE AVL is not in READY state 1858 * EEXIST The item is already in AVL 1859 */ 1860 int 1861 smb_avl_add(smb_avl_t *avl, void *item) 1862 { 1863 avl_index_t where; 1864 1865 ASSERT(avl); 1866 ASSERT(item); 1867 1868 if (!smb_avl_hold(avl)) 1869 return (ENOTACTIVE); 1870 1871 rw_enter(&avl->avl_lock, RW_WRITER); 1872 if (avl_find(&avl->avl_tree, item, &where) != NULL) { 1873 rw_exit(&avl->avl_lock); 1874 smb_avl_rele(avl); 1875 return (EEXIST); 1876 } 1877 1878 avl_insert(&avl->avl_tree, item, where); 1879 avl->avl_sequence++; 1880 rw_exit(&avl->avl_lock); 1881 1882 smb_avl_rele(avl); 1883 return (0); 1884 } 1885 1886 /* 1887 * Removes the given item from the AVL. 1888 * If no reference is left on the item 1889 * it will also be destroyed by calling the 1890 * registered destroy operation. 1891 */ 1892 void 1893 smb_avl_remove(smb_avl_t *avl, void *item) 1894 { 1895 avl_index_t where; 1896 void *rm_item; 1897 1898 ASSERT(avl); 1899 ASSERT(item); 1900 1901 if (!smb_avl_hold(avl)) 1902 return; 1903 1904 rw_enter(&avl->avl_lock, RW_WRITER); 1905 if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) { 1906 rw_exit(&avl->avl_lock); 1907 smb_avl_rele(avl); 1908 return; 1909 } 1910 1911 avl_remove(&avl->avl_tree, rm_item); 1912 if (avl->avl_nops->avln_rele(rm_item)) 1913 avl->avl_nops->avln_destroy(rm_item); 1914 avl->avl_sequence++; 1915 rw_exit(&avl->avl_lock); 1916 1917 smb_avl_rele(avl); 1918 } 1919 1920 /* 1921 * Looks up the AVL for the given item. 1922 * If the item is found a hold on the object 1923 * is taken before the pointer to it is 1924 * returned to the caller. The caller MUST 1925 * always call smb_avl_release() after it's done 1926 * using the returned object to release the hold 1927 * taken on the object. 1928 */ 1929 void * 1930 smb_avl_lookup(smb_avl_t *avl, void *item) 1931 { 1932 void *node = NULL; 1933 1934 ASSERT(avl); 1935 ASSERT(item); 1936 1937 if (!smb_avl_hold(avl)) 1938 return (NULL); 1939 1940 rw_enter(&avl->avl_lock, RW_READER); 1941 node = avl_find(&avl->avl_tree, item, NULL); 1942 if (node != NULL) 1943 avl->avl_nops->avln_hold(node); 1944 rw_exit(&avl->avl_lock); 1945 1946 if (node == NULL) 1947 smb_avl_rele(avl); 1948 1949 return (node); 1950 } 1951 1952 /* 1953 * The hold on the given object is released. 1954 * This function MUST always be called after 1955 * smb_avl_lookup() and smb_avl_iterate() for 1956 * the returned object. 1957 * 1958 * If AVL is in DESTROYING state, the destroying 1959 * thread will be notified. 1960 */ 1961 void 1962 smb_avl_release(smb_avl_t *avl, void *item) 1963 { 1964 ASSERT(avl); 1965 ASSERT(item); 1966 1967 if (avl->avl_nops->avln_rele(item)) 1968 avl->avl_nops->avln_destroy(item); 1969 1970 smb_avl_rele(avl); 1971 } 1972 1973 /* 1974 * Initializes the given cursor for the AVL. 1975 * The cursor will be used to iterate through the AVL 1976 */ 1977 void 1978 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor) 1979 { 1980 ASSERT(avl); 1981 ASSERT(cursor); 1982 1983 cursor->avlc_next = NULL; 1984 cursor->avlc_sequence = avl->avl_sequence; 1985 } 1986 1987 /* 1988 * Iterates through the AVL using the given cursor. 1989 * It always starts at the beginning and then returns 1990 * a pointer to the next object on each subsequent call. 1991 * 1992 * If a new object is added to or removed from the AVL 1993 * between two calls to this function, the iteration 1994 * will terminate prematurely. 1995 * 1996 * The caller MUST always call smb_avl_release() after it's 1997 * done using the returned object to release the hold taken 1998 * on the object. 1999 */ 2000 void * 2001 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor) 2002 { 2003 void *node; 2004 2005 ASSERT(avl); 2006 ASSERT(cursor); 2007 2008 if (!smb_avl_hold(avl)) 2009 return (NULL); 2010 2011 rw_enter(&avl->avl_lock, RW_READER); 2012 if (cursor->avlc_sequence != avl->avl_sequence) { 2013 rw_exit(&avl->avl_lock); 2014 smb_avl_rele(avl); 2015 return (NULL); 2016 } 2017 2018 if (cursor->avlc_next == NULL) 2019 node = avl_first(&avl->avl_tree); 2020 else 2021 node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next); 2022 2023 if (node != NULL) 2024 avl->avl_nops->avln_hold(node); 2025 2026 cursor->avlc_next = node; 2027 rw_exit(&avl->avl_lock); 2028 2029 if (node == NULL) 2030 smb_avl_rele(avl); 2031 2032 return (node); 2033 } 2034 2035 /* 2036 * Increments the AVL reference count in order to 2037 * prevent the avl from being destroyed while it's 2038 * being accessed. 2039 */ 2040 static boolean_t 2041 smb_avl_hold(smb_avl_t *avl) 2042 { 2043 mutex_enter(&avl->avl_mutex); 2044 if (avl->avl_state != SMB_AVL_STATE_READY) { 2045 mutex_exit(&avl->avl_mutex); 2046 return (B_FALSE); 2047 } 2048 avl->avl_refcnt++; 2049 mutex_exit(&avl->avl_mutex); 2050 2051 return (B_TRUE); 2052 } 2053 2054 /* 2055 * Decrements the AVL reference count to release the 2056 * hold. If another thread is trying to destroy the 2057 * AVL and is waiting for the reference count to become 2058 * 0, it is signaled to wake up. 2059 */ 2060 static void 2061 smb_avl_rele(smb_avl_t *avl) 2062 { 2063 mutex_enter(&avl->avl_mutex); 2064 ASSERT(avl->avl_refcnt > 0); 2065 avl->avl_refcnt--; 2066 if (avl->avl_state == SMB_AVL_STATE_DESTROYING) 2067 cv_broadcast(&avl->avl_cv); 2068 mutex_exit(&avl->avl_mutex); 2069 } 2070 2071 /* 2072 * smb_latency_init 2073 */ 2074 void 2075 smb_latency_init(smb_latency_t *lat) 2076 { 2077 bzero(lat, sizeof (*lat)); 2078 mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7)); 2079 } 2080 2081 /* 2082 * smb_latency_destroy 2083 */ 2084 void 2085 smb_latency_destroy(smb_latency_t *lat) 2086 { 2087 mutex_destroy(&lat->ly_mutex); 2088 } 2089 2090 /* 2091 * smb_latency_add_sample 2092 * 2093 * Uses the new sample to calculate the new mean and standard deviation. The 2094 * sample must be a scaled value. 2095 */ 2096 void 2097 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample) 2098 { 2099 hrtime_t a_mean; 2100 hrtime_t d_mean; 2101 2102 mutex_enter(&lat->ly_mutex); 2103 lat->ly_a_nreq++; 2104 lat->ly_a_sum += sample; 2105 if (lat->ly_a_nreq != 0) { 2106 a_mean = lat->ly_a_sum / lat->ly_a_nreq; 2107 lat->ly_a_stddev = 2108 (sample - a_mean) * (sample - lat->ly_a_mean); 2109 lat->ly_a_mean = a_mean; 2110 } 2111 lat->ly_d_nreq++; 2112 lat->ly_d_sum += sample; 2113 if (lat->ly_d_nreq != 0) { 2114 d_mean = lat->ly_d_sum / lat->ly_d_nreq; 2115 lat->ly_d_stddev = 2116 (sample - d_mean) * (sample - lat->ly_d_mean); 2117 lat->ly_d_mean = d_mean; 2118 } 2119 mutex_exit(&lat->ly_mutex); 2120 } 2121 2122 /* 2123 * smb_srqueue_init 2124 */ 2125 void 2126 smb_srqueue_init(smb_srqueue_t *srq) 2127 { 2128 bzero(srq, sizeof (*srq)); 2129 mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7)); 2130 srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled(); 2131 } 2132 2133 /* 2134 * smb_srqueue_destroy 2135 */ 2136 void 2137 smb_srqueue_destroy(smb_srqueue_t *srq) 2138 { 2139 mutex_destroy(&srq->srq_mutex); 2140 } 2141 2142 /* 2143 * smb_srqueue_waitq_enter 2144 */ 2145 void 2146 smb_srqueue_waitq_enter(smb_srqueue_t *srq) 2147 { 2148 hrtime_t new; 2149 hrtime_t delta; 2150 uint32_t wcnt; 2151 2152 mutex_enter(&srq->srq_mutex); 2153 new = gethrtime_unscaled(); 2154 delta = new - srq->srq_wlastupdate; 2155 srq->srq_wlastupdate = new; 2156 wcnt = srq->srq_wcnt++; 2157 if (wcnt != 0) { 2158 srq->srq_wlentime += delta * wcnt; 2159 srq->srq_wtime += delta; 2160 } 2161 mutex_exit(&srq->srq_mutex); 2162 } 2163 2164 /* 2165 * smb_srqueue_runq_exit 2166 */ 2167 void 2168 smb_srqueue_runq_exit(smb_srqueue_t *srq) 2169 { 2170 hrtime_t new; 2171 hrtime_t delta; 2172 uint32_t rcnt; 2173 2174 mutex_enter(&srq->srq_mutex); 2175 new = gethrtime_unscaled(); 2176 delta = new - srq->srq_rlastupdate; 2177 srq->srq_rlastupdate = new; 2178 rcnt = srq->srq_rcnt--; 2179 ASSERT(rcnt > 0); 2180 srq->srq_rlentime += delta * rcnt; 2181 srq->srq_rtime += delta; 2182 mutex_exit(&srq->srq_mutex); 2183 } 2184 2185 /* 2186 * smb_srqueue_waitq_to_runq 2187 */ 2188 void 2189 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq) 2190 { 2191 hrtime_t new; 2192 hrtime_t delta; 2193 uint32_t wcnt; 2194 uint32_t rcnt; 2195 2196 mutex_enter(&srq->srq_mutex); 2197 new = gethrtime_unscaled(); 2198 delta = new - srq->srq_wlastupdate; 2199 srq->srq_wlastupdate = new; 2200 wcnt = srq->srq_wcnt--; 2201 ASSERT(wcnt > 0); 2202 srq->srq_wlentime += delta * wcnt; 2203 srq->srq_wtime += delta; 2204 delta = new - srq->srq_rlastupdate; 2205 srq->srq_rlastupdate = new; 2206 rcnt = srq->srq_rcnt++; 2207 if (rcnt != 0) { 2208 srq->srq_rlentime += delta * rcnt; 2209 srq->srq_rtime += delta; 2210 } 2211 mutex_exit(&srq->srq_mutex); 2212 } 2213 2214 /* 2215 * smb_srqueue_update 2216 * 2217 * Takes a snapshot of the smb_sr_stat_t structure passed in. 2218 */ 2219 void 2220 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd) 2221 { 2222 hrtime_t delta; 2223 hrtime_t snaptime; 2224 2225 mutex_enter(&srq->srq_mutex); 2226 snaptime = gethrtime_unscaled(); 2227 delta = snaptime - srq->srq_wlastupdate; 2228 srq->srq_wlastupdate = snaptime; 2229 if (srq->srq_wcnt != 0) { 2230 srq->srq_wlentime += delta * srq->srq_wcnt; 2231 srq->srq_wtime += delta; 2232 } 2233 delta = snaptime - srq->srq_rlastupdate; 2234 srq->srq_rlastupdate = snaptime; 2235 if (srq->srq_rcnt != 0) { 2236 srq->srq_rlentime += delta * srq->srq_rcnt; 2237 srq->srq_rtime += delta; 2238 } 2239 kd->ku_rlentime = srq->srq_rlentime; 2240 kd->ku_rtime = srq->srq_rtime; 2241 kd->ku_wlentime = srq->srq_wlentime; 2242 kd->ku_wtime = srq->srq_wtime; 2243 mutex_exit(&srq->srq_mutex); 2244 scalehrtime(&kd->ku_rlentime); 2245 scalehrtime(&kd->ku_rtime); 2246 scalehrtime(&kd->ku_wlentime); 2247 scalehrtime(&kd->ku_wtime); 2248 } 2249 2250 void 2251 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd, int threshold, 2252 int timeout) 2253 { 2254 bzero(ct, sizeof (smb_cmd_threshold_t)); 2255 mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL); 2256 ct->ct_cmd = cmd; 2257 ct->ct_threshold = threshold; 2258 ct->ct_event = smb_event_create(timeout); 2259 ct->ct_event_id = smb_event_txid(ct->ct_event); 2260 2261 if (smb_threshold_debug) { 2262 cmn_err(CE_NOTE, "smb_threshold_init[%s]: threshold (%d), " 2263 "timeout (%d)", cmd, threshold, timeout); 2264 } 2265 } 2266 2267 /* 2268 * This function must be called prior to SMB_SERVER_STATE_STOPPING state 2269 * so that ct_event can be successfully removed from the event list. 2270 * It should not be called when the server mutex is held or when the 2271 * server is removed from the server list. 2272 */ 2273 void 2274 smb_threshold_fini(smb_cmd_threshold_t *ct) 2275 { 2276 smb_event_destroy(ct->ct_event); 2277 mutex_destroy(&ct->ct_mutex); 2278 bzero(ct, sizeof (smb_cmd_threshold_t)); 2279 } 2280 2281 /* 2282 * This threshold mechanism can be used to limit the number of simultaneous 2283 * requests, which serves to limit the stress that can be applied to the 2284 * service and also allows the service to respond to requests before the 2285 * client times out and reports that the server is not responding, 2286 * 2287 * If the number of requests exceeds the threshold, new requests will be 2288 * stalled until the number drops back to the threshold. Stalled requests 2289 * will be notified as appropriate, in which case 0 will be returned. 2290 * If the timeout expires before the request is notified, a non-zero errno 2291 * value will be returned. 2292 * 2293 * To avoid a flood of messages, the message rate is throttled as well. 2294 */ 2295 int 2296 smb_threshold_enter(smb_cmd_threshold_t *ct) 2297 { 2298 int rc; 2299 2300 mutex_enter(&ct->ct_mutex); 2301 if (ct->ct_active_cnt >= ct->ct_threshold && ct->ct_event != NULL) { 2302 atomic_inc_32(&ct->ct_blocked_cnt); 2303 2304 if (smb_threshold_debug) { 2305 cmn_err(CE_NOTE, "smb_threshold_enter[%s]: blocked " 2306 "(blocked ops: %u, inflight ops: %u)", 2307 ct->ct_cmd, ct->ct_blocked_cnt, ct->ct_active_cnt); 2308 } 2309 2310 mutex_exit(&ct->ct_mutex); 2311 2312 if ((rc = smb_event_wait(ct->ct_event)) != 0) { 2313 if (rc == ECANCELED) 2314 return (rc); 2315 2316 mutex_enter(&ct->ct_mutex); 2317 if (ct->ct_active_cnt >= ct->ct_threshold) { 2318 2319 if ((ct->ct_error_cnt % 2320 SMB_THRESHOLD_REPORT_THROTTLE) == 0) { 2321 cmn_err(CE_NOTE, "%s: server busy: " 2322 "threshold %d exceeded)", 2323 ct->ct_cmd, ct->ct_threshold); 2324 } 2325 2326 atomic_inc_32(&ct->ct_error_cnt); 2327 mutex_exit(&ct->ct_mutex); 2328 return (rc); 2329 } 2330 2331 mutex_exit(&ct->ct_mutex); 2332 2333 } 2334 2335 mutex_enter(&ct->ct_mutex); 2336 atomic_dec_32(&ct->ct_blocked_cnt); 2337 if (smb_threshold_debug) { 2338 cmn_err(CE_NOTE, "smb_threshold_enter[%s]: resumed " 2339 "(blocked ops: %u, inflight ops: %u)", ct->ct_cmd, 2340 ct->ct_blocked_cnt, ct->ct_active_cnt); 2341 } 2342 } 2343 2344 atomic_inc_32(&ct->ct_active_cnt); 2345 mutex_exit(&ct->ct_mutex); 2346 return (0); 2347 } 2348 2349 void 2350 smb_threshold_exit(smb_cmd_threshold_t *ct, smb_server_t *sv) 2351 { 2352 mutex_enter(&ct->ct_mutex); 2353 atomic_dec_32(&ct->ct_active_cnt); 2354 mutex_exit(&ct->ct_mutex); 2355 smb_event_notify(sv, ct->ct_event_id); 2356 } 2357