1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 #include <sys/param.h> 26 #include <sys/types.h> 27 #include <sys/tzfile.h> 28 #include <sys/atomic.h> 29 #include <sys/kidmap.h> 30 #include <sys/time.h> 31 #include <sys/spl.h> 32 #include <sys/cpuvar.h> 33 #include <sys/random.h> 34 #include <smbsrv/smb_kproto.h> 35 #include <smbsrv/smb_fsops.h> 36 #include <smbsrv/smbinfo.h> 37 #include <smbsrv/smb_xdr.h> 38 #include <smbsrv/smb_vops.h> 39 #include <smbsrv/smb_idmap.h> 40 41 #include <sys/sid.h> 42 #include <sys/priv_names.h> 43 44 static kmem_cache_t *smb_dtor_cache; 45 static boolean_t smb_llist_initialized = B_FALSE; 46 47 static boolean_t smb_thread_continue_timedwait_locked(smb_thread_t *, int); 48 49 static boolean_t smb_avl_hold(smb_avl_t *); 50 static void smb_avl_rele(smb_avl_t *); 51 52 time_t tzh_leapcnt = 0; 53 54 struct tm 55 *smb_gmtime_r(time_t *clock, struct tm *result); 56 57 time_t 58 smb_timegm(struct tm *tm); 59 60 struct tm { 61 int tm_sec; 62 int tm_min; 63 int tm_hour; 64 int tm_mday; 65 int tm_mon; 66 int tm_year; 67 int tm_wday; 68 int tm_yday; 69 int tm_isdst; 70 }; 71 72 static int days_in_month[] = { 73 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 74 }; 75 76 int 77 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str) 78 { 79 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 80 return (smb_wcequiv_strlen(str)); 81 return (strlen(str)); 82 } 83 84 int 85 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str) 86 { 87 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 88 return (smb_wcequiv_strlen(str) + 2); 89 return (strlen(str) + 1); 90 } 91 92 int 93 smb_ascii_or_unicode_null_len(struct smb_request *sr) 94 { 95 if (sr->smb_flg2 & SMB_FLAGS2_UNICODE) 96 return (2); 97 return (1); 98 } 99 100 /* 101 * Return B_TRUE if pattern contains wildcards 102 */ 103 boolean_t 104 smb_contains_wildcards(const char *pattern) 105 { 106 static const char *wildcards = "*?"; 107 108 return (strpbrk(pattern, wildcards) != NULL); 109 } 110 111 /* 112 * When converting wildcards a '.' in a name is treated as a base and 113 * extension separator even if the name is longer than 8.3. 114 * 115 * The '*' character matches an entire part of the name. For example, 116 * "*.abc" matches any name with an extension of "abc". 117 * 118 * The '?' character matches a single character. 119 * If the base contains all ? (8 or more) then it is treated as *. 120 * If the extension contains all ? (3 or more) then it is treated as *. 121 * 122 * Clients convert ASCII wildcards to Unicode wildcards as follows: 123 * 124 * ? is converted to > 125 * . is converted to " if it is followed by ? or * 126 * * is converted to < if it is followed by . 127 * 128 * Note that clients convert "*." to '< and drop the '.' but "*.txt" 129 * is sent as "<.TXT", i.e. 130 * 131 * dir *. -> dir < 132 * dir *.txt -> dir <.TXT 133 * 134 * Since " and < are illegal in Windows file names, we always convert 135 * these Unicode wildcards without checking the following character. 136 */ 137 void 138 smb_convert_wildcards(char *pattern) 139 { 140 static char *match_all[] = { 141 "*.", 142 "*.*" 143 }; 144 char *extension; 145 char *p; 146 int len; 147 int i; 148 149 /* 150 * Special case "<" for "dir *.", and fast-track for "*". 151 */ 152 if ((*pattern == '<') || (*pattern == '*')) { 153 if (*(pattern + 1) == '\0') { 154 *pattern = '*'; 155 return; 156 } 157 } 158 159 for (p = pattern; *p != '\0'; ++p) { 160 switch (*p) { 161 case '<': 162 *p = '*'; 163 break; 164 case '>': 165 *p = '?'; 166 break; 167 case '\"': 168 *p = '.'; 169 break; 170 default: 171 break; 172 } 173 } 174 175 /* 176 * Replace "????????.ext" with "*.ext". 177 */ 178 p = pattern; 179 p += strspn(p, "?"); 180 if (*p == '.') { 181 *p = '\0'; 182 len = strlen(pattern); 183 *p = '.'; 184 if (len >= SMB_NAME83_BASELEN) { 185 *pattern = '*'; 186 (void) strlcpy(pattern + 1, p, MAXPATHLEN - 1); 187 } 188 } 189 190 /* 191 * Replace "base.???" with 'base.*'. 192 */ 193 if ((extension = strrchr(pattern, '.')) != NULL) { 194 p = ++extension; 195 p += strspn(p, "?"); 196 if (*p == '\0') { 197 len = strlen(extension); 198 if (len >= SMB_NAME83_EXTLEN) { 199 *extension = '\0'; 200 (void) strlcat(pattern, "*", MAXPATHLEN); 201 } 202 } 203 } 204 205 /* 206 * Replace anything that matches an entry in match_all with "*". 207 */ 208 for (i = 0; i < sizeof (match_all) / sizeof (match_all[0]); ++i) { 209 if (strcmp(pattern, match_all[i]) == 0) { 210 (void) strlcpy(pattern, "*", MAXPATHLEN); 211 break; 212 } 213 } 214 } 215 216 /* 217 * smb_sattr_check 218 * 219 * Check file attributes against a search attribute (sattr) mask. 220 * 221 * Normal files, which includes READONLY and ARCHIVE, always pass 222 * this check. If the DIRECTORY, HIDDEN or SYSTEM special attributes 223 * are set then they must appear in the search mask. The special 224 * attributes are inclusive, i.e. all special attributes that appear 225 * in sattr must also appear in the file attributes for the check to 226 * pass. 227 * 228 * The following examples show how this works: 229 * 230 * fileA: READONLY 231 * fileB: 0 (no attributes = normal file) 232 * fileC: READONLY, ARCHIVE 233 * fileD: HIDDEN 234 * fileE: READONLY, HIDDEN, SYSTEM 235 * dirA: DIRECTORY 236 * 237 * search attribute: 0 238 * Returns: fileA, fileB and fileC. 239 * search attribute: HIDDEN 240 * Returns: fileA, fileB, fileC and fileD. 241 * search attribute: SYSTEM 242 * Returns: fileA, fileB and fileC. 243 * search attribute: DIRECTORY 244 * Returns: fileA, fileB, fileC and dirA. 245 * search attribute: HIDDEN and SYSTEM 246 * Returns: fileA, fileB, fileC, fileD and fileE. 247 * 248 * Returns true if the file and sattr match; otherwise, returns false. 249 */ 250 boolean_t 251 smb_sattr_check(uint16_t dosattr, uint16_t sattr) 252 { 253 if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) && 254 !(sattr & FILE_ATTRIBUTE_DIRECTORY)) 255 return (B_FALSE); 256 257 if ((dosattr & FILE_ATTRIBUTE_HIDDEN) && 258 !(sattr & FILE_ATTRIBUTE_HIDDEN)) 259 return (B_FALSE); 260 261 if ((dosattr & FILE_ATTRIBUTE_SYSTEM) && 262 !(sattr & FILE_ATTRIBUTE_SYSTEM)) 263 return (B_FALSE); 264 265 return (B_TRUE); 266 } 267 268 int 269 microtime(timestruc_t *tvp) 270 { 271 tvp->tv_sec = gethrestime_sec(); 272 tvp->tv_nsec = 0; 273 return (0); 274 } 275 276 int32_t 277 clock_get_milli_uptime() 278 { 279 return (TICK_TO_MSEC(ddi_get_lbolt())); 280 } 281 282 int /*ARGSUSED*/ 283 smb_noop(void *p, size_t size, int foo) 284 { 285 return (0); 286 } 287 288 /* 289 * smb_idpool_increment 290 * 291 * This function increments the ID pool by doubling the current size. This 292 * function assumes the caller entered the mutex of the pool. 293 */ 294 static int 295 smb_idpool_increment( 296 smb_idpool_t *pool) 297 { 298 uint8_t *new_pool; 299 uint32_t new_size; 300 301 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 302 303 new_size = pool->id_size * 2; 304 if (new_size <= SMB_IDPOOL_MAX_SIZE) { 305 new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP); 306 if (new_pool) { 307 bzero(new_pool, new_size / 8); 308 bcopy(pool->id_pool, new_pool, pool->id_size / 8); 309 kmem_free(pool->id_pool, pool->id_size / 8); 310 pool->id_pool = new_pool; 311 pool->id_free_counter += new_size - pool->id_size; 312 pool->id_max_free_counter += new_size - pool->id_size; 313 pool->id_size = new_size; 314 pool->id_idx_msk = (new_size / 8) - 1; 315 if (new_size >= SMB_IDPOOL_MAX_SIZE) { 316 /* id -1 made unavailable */ 317 pool->id_pool[pool->id_idx_msk] = 0x80; 318 pool->id_free_counter--; 319 pool->id_max_free_counter--; 320 } 321 return (0); 322 } 323 } 324 return (-1); 325 } 326 327 /* 328 * smb_idpool_constructor 329 * 330 * This function initializes the pool structure provided. 331 */ 332 int 333 smb_idpool_constructor( 334 smb_idpool_t *pool) 335 { 336 337 ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC); 338 339 pool->id_size = SMB_IDPOOL_MIN_SIZE; 340 pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1; 341 pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1; 342 pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1; 343 pool->id_bit = 0x02; 344 pool->id_bit_idx = 1; 345 pool->id_idx = 0; 346 pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8), 347 KM_SLEEP); 348 bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8)); 349 /* -1 id made unavailable */ 350 pool->id_pool[0] = 0x01; /* id 0 made unavailable */ 351 mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL); 352 pool->id_magic = SMB_IDPOOL_MAGIC; 353 return (0); 354 } 355 356 /* 357 * smb_idpool_destructor 358 * 359 * This function tears down and frees the resources associated with the 360 * pool provided. 361 */ 362 void 363 smb_idpool_destructor( 364 smb_idpool_t *pool) 365 { 366 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 367 ASSERT(pool->id_free_counter == pool->id_max_free_counter); 368 pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC; 369 mutex_destroy(&pool->id_mutex); 370 kmem_free(pool->id_pool, (size_t)(pool->id_size / 8)); 371 } 372 373 /* 374 * smb_idpool_alloc 375 * 376 * This function allocates an ID from the pool provided. 377 */ 378 int 379 smb_idpool_alloc( 380 smb_idpool_t *pool, 381 uint16_t *id) 382 { 383 uint32_t i; 384 uint8_t bit; 385 uint8_t bit_idx; 386 uint8_t byte; 387 388 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 389 390 mutex_enter(&pool->id_mutex); 391 if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) { 392 mutex_exit(&pool->id_mutex); 393 return (-1); 394 } 395 396 i = pool->id_size; 397 while (i) { 398 bit = pool->id_bit; 399 bit_idx = pool->id_bit_idx; 400 byte = pool->id_pool[pool->id_idx]; 401 while (bit) { 402 if (byte & bit) { 403 bit = bit << 1; 404 bit_idx++; 405 continue; 406 } 407 pool->id_pool[pool->id_idx] |= bit; 408 *id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx); 409 pool->id_free_counter--; 410 pool->id_bit = bit; 411 pool->id_bit_idx = bit_idx; 412 mutex_exit(&pool->id_mutex); 413 return (0); 414 } 415 pool->id_bit = 1; 416 pool->id_bit_idx = 0; 417 pool->id_idx++; 418 pool->id_idx &= pool->id_idx_msk; 419 --i; 420 } 421 /* 422 * This section of code shouldn't be reached. If there are IDs 423 * available and none could be found there's a problem. 424 */ 425 ASSERT(0); 426 mutex_exit(&pool->id_mutex); 427 return (-1); 428 } 429 430 /* 431 * smb_idpool_free 432 * 433 * This function frees the ID provided. 434 */ 435 void 436 smb_idpool_free( 437 smb_idpool_t *pool, 438 uint16_t id) 439 { 440 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC); 441 ASSERT(id != 0); 442 ASSERT(id != 0xFFFF); 443 444 mutex_enter(&pool->id_mutex); 445 if (pool->id_pool[id >> 3] & (1 << (id & 7))) { 446 pool->id_pool[id >> 3] &= ~(1 << (id & 7)); 447 pool->id_free_counter++; 448 ASSERT(pool->id_free_counter <= pool->id_max_free_counter); 449 mutex_exit(&pool->id_mutex); 450 return; 451 } 452 /* Freeing a free ID. */ 453 ASSERT(0); 454 mutex_exit(&pool->id_mutex); 455 } 456 457 /* 458 * Initialize the llist delete queue object cache. 459 */ 460 void 461 smb_llist_init(void) 462 { 463 if (smb_llist_initialized) 464 return; 465 466 smb_dtor_cache = kmem_cache_create("smb_dtor_cache", 467 sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0); 468 469 smb_llist_initialized = B_TRUE; 470 } 471 472 /* 473 * Destroy the llist delete queue object cache. 474 */ 475 void 476 smb_llist_fini(void) 477 { 478 if (!smb_llist_initialized) 479 return; 480 481 kmem_cache_destroy(smb_dtor_cache); 482 smb_llist_initialized = B_FALSE; 483 } 484 485 /* 486 * smb_llist_constructor 487 * 488 * This function initializes a locked list. 489 */ 490 void 491 smb_llist_constructor( 492 smb_llist_t *ll, 493 size_t size, 494 size_t offset) 495 { 496 rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL); 497 mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL); 498 list_create(&ll->ll_list, size, offset); 499 list_create(&ll->ll_deleteq, sizeof (smb_dtor_t), 500 offsetof(smb_dtor_t, dt_lnd)); 501 ll->ll_count = 0; 502 ll->ll_wrop = 0; 503 ll->ll_deleteq_count = 0; 504 ll->ll_flushing = B_FALSE; 505 } 506 507 /* 508 * Flush the delete queue and destroy a locked list. 509 */ 510 void 511 smb_llist_destructor( 512 smb_llist_t *ll) 513 { 514 smb_llist_flush(ll); 515 516 ASSERT(ll->ll_count == 0); 517 ASSERT(ll->ll_deleteq_count == 0); 518 519 rw_destroy(&ll->ll_lock); 520 list_destroy(&ll->ll_list); 521 list_destroy(&ll->ll_deleteq); 522 mutex_destroy(&ll->ll_mutex); 523 } 524 525 /* 526 * Post an object to the delete queue. The delete queue will be processed 527 * during list exit or list destruction. Objects are often posted for 528 * deletion during list iteration (while the list is locked) but that is 529 * not required, and an object can be posted at any time. 530 */ 531 void 532 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc) 533 { 534 smb_dtor_t *dtor; 535 536 ASSERT((object != NULL) && (dtorproc != NULL)); 537 538 dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP); 539 bzero(dtor, sizeof (smb_dtor_t)); 540 dtor->dt_magic = SMB_DTOR_MAGIC; 541 dtor->dt_object = object; 542 dtor->dt_proc = dtorproc; 543 544 mutex_enter(&ll->ll_mutex); 545 list_insert_tail(&ll->ll_deleteq, dtor); 546 ++ll->ll_deleteq_count; 547 mutex_exit(&ll->ll_mutex); 548 } 549 550 /* 551 * Exit the list lock and process the delete queue. 552 */ 553 void 554 smb_llist_exit(smb_llist_t *ll) 555 { 556 rw_exit(&ll->ll_lock); 557 smb_llist_flush(ll); 558 } 559 560 /* 561 * Flush the list delete queue. The mutex is dropped across the destructor 562 * call in case this leads to additional objects being posted to the delete 563 * queue. 564 */ 565 void 566 smb_llist_flush(smb_llist_t *ll) 567 { 568 smb_dtor_t *dtor; 569 570 mutex_enter(&ll->ll_mutex); 571 if (ll->ll_flushing) { 572 mutex_exit(&ll->ll_mutex); 573 return; 574 } 575 ll->ll_flushing = B_TRUE; 576 577 dtor = list_head(&ll->ll_deleteq); 578 while (dtor != NULL) { 579 SMB_DTOR_VALID(dtor); 580 ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL)); 581 list_remove(&ll->ll_deleteq, dtor); 582 --ll->ll_deleteq_count; 583 mutex_exit(&ll->ll_mutex); 584 585 dtor->dt_proc(dtor->dt_object); 586 587 dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC; 588 kmem_cache_free(smb_dtor_cache, dtor); 589 mutex_enter(&ll->ll_mutex); 590 dtor = list_head(&ll->ll_deleteq); 591 } 592 ll->ll_flushing = B_FALSE; 593 594 mutex_exit(&ll->ll_mutex); 595 } 596 597 /* 598 * smb_llist_upgrade 599 * 600 * This function tries to upgrade the lock of the locked list. It assumes the 601 * locked has already been entered in RW_READER mode. It first tries using the 602 * Solaris function rw_tryupgrade(). If that call fails the lock is released 603 * and reentered in RW_WRITER mode. In that last case a window is opened during 604 * which the contents of the list may have changed. The return code indicates 605 * whether or not the list was modified when the lock was exited. 606 */ 607 int smb_llist_upgrade( 608 smb_llist_t *ll) 609 { 610 uint64_t wrop; 611 612 if (rw_tryupgrade(&ll->ll_lock) != 0) { 613 return (0); 614 } 615 wrop = ll->ll_wrop; 616 rw_exit(&ll->ll_lock); 617 rw_enter(&ll->ll_lock, RW_WRITER); 618 return (wrop != ll->ll_wrop); 619 } 620 621 /* 622 * smb_llist_insert_head 623 * 624 * This function inserts the object passed a the beginning of the list. This 625 * function assumes the lock of the list has already been entered. 626 */ 627 void 628 smb_llist_insert_head( 629 smb_llist_t *ll, 630 void *obj) 631 { 632 list_insert_head(&ll->ll_list, obj); 633 ++ll->ll_wrop; 634 ++ll->ll_count; 635 } 636 637 /* 638 * smb_llist_insert_tail 639 * 640 * This function appends to the object passed to the list. This function assumes 641 * the lock of the list has already been entered. 642 * 643 */ 644 void 645 smb_llist_insert_tail( 646 smb_llist_t *ll, 647 void *obj) 648 { 649 list_insert_tail(&ll->ll_list, obj); 650 ++ll->ll_wrop; 651 ++ll->ll_count; 652 } 653 654 /* 655 * smb_llist_remove 656 * 657 * This function removes the object passed from the list. This function assumes 658 * the lock of the list has already been entered. 659 */ 660 void 661 smb_llist_remove( 662 smb_llist_t *ll, 663 void *obj) 664 { 665 list_remove(&ll->ll_list, obj); 666 ++ll->ll_wrop; 667 --ll->ll_count; 668 } 669 670 /* 671 * smb_llist_get_count 672 * 673 * This function returns the number of elements in the specified list. 674 */ 675 uint32_t 676 smb_llist_get_count( 677 smb_llist_t *ll) 678 { 679 return (ll->ll_count); 680 } 681 682 /* 683 * smb_slist_constructor 684 * 685 * Synchronized list constructor. 686 */ 687 void 688 smb_slist_constructor( 689 smb_slist_t *sl, 690 size_t size, 691 size_t offset) 692 { 693 mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL); 694 cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL); 695 list_create(&sl->sl_list, size, offset); 696 sl->sl_count = 0; 697 sl->sl_waiting = B_FALSE; 698 } 699 700 /* 701 * smb_slist_destructor 702 * 703 * Synchronized list destructor. 704 */ 705 void 706 smb_slist_destructor( 707 smb_slist_t *sl) 708 { 709 VERIFY(sl->sl_count == 0); 710 711 mutex_destroy(&sl->sl_mutex); 712 cv_destroy(&sl->sl_cv); 713 list_destroy(&sl->sl_list); 714 } 715 716 /* 717 * smb_slist_insert_head 718 * 719 * This function inserts the object passed a the beginning of the list. 720 */ 721 void 722 smb_slist_insert_head( 723 smb_slist_t *sl, 724 void *obj) 725 { 726 mutex_enter(&sl->sl_mutex); 727 list_insert_head(&sl->sl_list, obj); 728 ++sl->sl_count; 729 mutex_exit(&sl->sl_mutex); 730 } 731 732 /* 733 * smb_slist_insert_tail 734 * 735 * This function appends the object passed to the list. 736 */ 737 void 738 smb_slist_insert_tail( 739 smb_slist_t *sl, 740 void *obj) 741 { 742 mutex_enter(&sl->sl_mutex); 743 list_insert_tail(&sl->sl_list, obj); 744 ++sl->sl_count; 745 mutex_exit(&sl->sl_mutex); 746 } 747 748 /* 749 * smb_llist_remove 750 * 751 * This function removes the object passed by the caller from the list. 752 */ 753 void 754 smb_slist_remove( 755 smb_slist_t *sl, 756 void *obj) 757 { 758 mutex_enter(&sl->sl_mutex); 759 list_remove(&sl->sl_list, obj); 760 if ((--sl->sl_count == 0) && (sl->sl_waiting)) { 761 sl->sl_waiting = B_FALSE; 762 cv_broadcast(&sl->sl_cv); 763 } 764 mutex_exit(&sl->sl_mutex); 765 } 766 767 /* 768 * smb_slist_move_tail 769 * 770 * This function transfers all the contents of the synchronized list to the 771 * list_t provided. It returns the number of objects transferred. 772 */ 773 uint32_t 774 smb_slist_move_tail( 775 list_t *lst, 776 smb_slist_t *sl) 777 { 778 uint32_t rv; 779 780 mutex_enter(&sl->sl_mutex); 781 rv = sl->sl_count; 782 if (sl->sl_count) { 783 list_move_tail(lst, &sl->sl_list); 784 sl->sl_count = 0; 785 if (sl->sl_waiting) { 786 sl->sl_waiting = B_FALSE; 787 cv_broadcast(&sl->sl_cv); 788 } 789 } 790 mutex_exit(&sl->sl_mutex); 791 return (rv); 792 } 793 794 /* 795 * smb_slist_obj_move 796 * 797 * This function moves an object from one list to the end of the other list. It 798 * assumes the mutex of each list has been entered. 799 */ 800 void 801 smb_slist_obj_move( 802 smb_slist_t *dst, 803 smb_slist_t *src, 804 void *obj) 805 { 806 ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset); 807 ASSERT(dst->sl_list.list_size == src->sl_list.list_size); 808 809 list_remove(&src->sl_list, obj); 810 list_insert_tail(&dst->sl_list, obj); 811 dst->sl_count++; 812 src->sl_count--; 813 if ((src->sl_count == 0) && (src->sl_waiting)) { 814 src->sl_waiting = B_FALSE; 815 cv_broadcast(&src->sl_cv); 816 } 817 } 818 819 /* 820 * smb_slist_wait_for_empty 821 * 822 * This function waits for a list to be emptied. 823 */ 824 void 825 smb_slist_wait_for_empty( 826 smb_slist_t *sl) 827 { 828 mutex_enter(&sl->sl_mutex); 829 while (sl->sl_count) { 830 sl->sl_waiting = B_TRUE; 831 cv_wait(&sl->sl_cv, &sl->sl_mutex); 832 } 833 mutex_exit(&sl->sl_mutex); 834 } 835 836 /* 837 * smb_slist_exit 838 * 839 * This function exits the muetx of the list and signal the condition variable 840 * if the list is empty. 841 */ 842 void 843 smb_slist_exit(smb_slist_t *sl) 844 { 845 if ((sl->sl_count == 0) && (sl->sl_waiting)) { 846 sl->sl_waiting = B_FALSE; 847 cv_broadcast(&sl->sl_cv); 848 } 849 mutex_exit(&sl->sl_mutex); 850 } 851 852 /* 853 * smb_thread_entry_point 854 * 855 * Common entry point for all the threads created through smb_thread_start. 856 * The state of the thread is set to "running" at the beginning and moved to 857 * "exiting" just before calling thread_exit(). The condition variable is 858 * also signaled. 859 */ 860 static void 861 smb_thread_entry_point( 862 smb_thread_t *thread) 863 { 864 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 865 mutex_enter(&thread->sth_mtx); 866 ASSERT(thread->sth_state == SMB_THREAD_STATE_STARTING); 867 thread->sth_th = curthread; 868 thread->sth_did = thread->sth_th->t_did; 869 870 if (!thread->sth_kill) { 871 thread->sth_state = SMB_THREAD_STATE_RUNNING; 872 cv_signal(&thread->sth_cv); 873 mutex_exit(&thread->sth_mtx); 874 thread->sth_ep(thread, thread->sth_ep_arg); 875 mutex_enter(&thread->sth_mtx); 876 } 877 thread->sth_th = NULL; 878 thread->sth_state = SMB_THREAD_STATE_EXITING; 879 cv_broadcast(&thread->sth_cv); 880 mutex_exit(&thread->sth_mtx); 881 thread_exit(); 882 } 883 884 /* 885 * smb_thread_init 886 */ 887 void 888 smb_thread_init( 889 smb_thread_t *thread, 890 char *name, 891 smb_thread_ep_t ep, 892 void *ep_arg) 893 { 894 ASSERT(thread->sth_magic != SMB_THREAD_MAGIC); 895 896 bzero(thread, sizeof (*thread)); 897 898 (void) strlcpy(thread->sth_name, name, sizeof (thread->sth_name)); 899 thread->sth_ep = ep; 900 thread->sth_ep_arg = ep_arg; 901 thread->sth_state = SMB_THREAD_STATE_EXITED; 902 mutex_init(&thread->sth_mtx, NULL, MUTEX_DEFAULT, NULL); 903 cv_init(&thread->sth_cv, NULL, CV_DEFAULT, NULL); 904 thread->sth_magic = SMB_THREAD_MAGIC; 905 } 906 907 /* 908 * smb_thread_destroy 909 */ 910 void 911 smb_thread_destroy( 912 smb_thread_t *thread) 913 { 914 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 915 ASSERT(thread->sth_state == SMB_THREAD_STATE_EXITED); 916 thread->sth_magic = 0; 917 mutex_destroy(&thread->sth_mtx); 918 cv_destroy(&thread->sth_cv); 919 } 920 921 /* 922 * smb_thread_start 923 * 924 * This function starts a thread with the parameters provided. It waits until 925 * the state of the thread has been moved to running. 926 */ 927 /*ARGSUSED*/ 928 int 929 smb_thread_start( 930 smb_thread_t *thread) 931 { 932 int rc = 0; 933 kthread_t *tmpthread; 934 935 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 936 937 mutex_enter(&thread->sth_mtx); 938 switch (thread->sth_state) { 939 case SMB_THREAD_STATE_EXITED: 940 thread->sth_state = SMB_THREAD_STATE_STARTING; 941 mutex_exit(&thread->sth_mtx); 942 tmpthread = thread_create(NULL, 0, smb_thread_entry_point, 943 thread, 0, &p0, TS_RUN, minclsyspri); 944 ASSERT(tmpthread != NULL); 945 mutex_enter(&thread->sth_mtx); 946 while (thread->sth_state == SMB_THREAD_STATE_STARTING) 947 cv_wait(&thread->sth_cv, &thread->sth_mtx); 948 if (thread->sth_state != SMB_THREAD_STATE_RUNNING) 949 rc = -1; 950 break; 951 default: 952 ASSERT(0); 953 rc = -1; 954 break; 955 } 956 mutex_exit(&thread->sth_mtx); 957 return (rc); 958 } 959 960 /* 961 * smb_thread_stop 962 * 963 * This function signals a thread to kill itself and waits until the "exiting" 964 * state has been reached. 965 */ 966 void 967 smb_thread_stop(smb_thread_t *thread) 968 { 969 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 970 971 mutex_enter(&thread->sth_mtx); 972 switch (thread->sth_state) { 973 case SMB_THREAD_STATE_RUNNING: 974 case SMB_THREAD_STATE_STARTING: 975 if (!thread->sth_kill) { 976 thread->sth_kill = B_TRUE; 977 cv_broadcast(&thread->sth_cv); 978 while (thread->sth_state != SMB_THREAD_STATE_EXITING) 979 cv_wait(&thread->sth_cv, &thread->sth_mtx); 980 mutex_exit(&thread->sth_mtx); 981 thread_join(thread->sth_did); 982 mutex_enter(&thread->sth_mtx); 983 thread->sth_state = SMB_THREAD_STATE_EXITED; 984 thread->sth_did = 0; 985 thread->sth_kill = B_FALSE; 986 cv_broadcast(&thread->sth_cv); 987 break; 988 } 989 /*FALLTHRU*/ 990 991 case SMB_THREAD_STATE_EXITING: 992 if (thread->sth_kill) { 993 while (thread->sth_state != SMB_THREAD_STATE_EXITED) 994 cv_wait(&thread->sth_cv, &thread->sth_mtx); 995 } else { 996 thread->sth_state = SMB_THREAD_STATE_EXITED; 997 thread->sth_did = 0; 998 } 999 break; 1000 1001 case SMB_THREAD_STATE_EXITED: 1002 break; 1003 1004 default: 1005 ASSERT(0); 1006 break; 1007 } 1008 mutex_exit(&thread->sth_mtx); 1009 } 1010 1011 /* 1012 * smb_thread_signal 1013 * 1014 * This function signals a thread. 1015 */ 1016 void 1017 smb_thread_signal(smb_thread_t *thread) 1018 { 1019 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 1020 1021 mutex_enter(&thread->sth_mtx); 1022 switch (thread->sth_state) { 1023 case SMB_THREAD_STATE_RUNNING: 1024 cv_signal(&thread->sth_cv); 1025 break; 1026 1027 default: 1028 break; 1029 } 1030 mutex_exit(&thread->sth_mtx); 1031 } 1032 1033 boolean_t 1034 smb_thread_continue(smb_thread_t *thread) 1035 { 1036 boolean_t result; 1037 1038 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 1039 1040 mutex_enter(&thread->sth_mtx); 1041 result = smb_thread_continue_timedwait_locked(thread, 0); 1042 mutex_exit(&thread->sth_mtx); 1043 1044 return (result); 1045 } 1046 1047 boolean_t 1048 smb_thread_continue_nowait(smb_thread_t *thread) 1049 { 1050 boolean_t result; 1051 1052 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 1053 1054 mutex_enter(&thread->sth_mtx); 1055 /* 1056 * Setting ticks=-1 requests a non-blocking check. We will 1057 * still block if the thread is in "suspend" state. 1058 */ 1059 result = smb_thread_continue_timedwait_locked(thread, -1); 1060 mutex_exit(&thread->sth_mtx); 1061 1062 return (result); 1063 } 1064 1065 boolean_t 1066 smb_thread_continue_timedwait(smb_thread_t *thread, int seconds) 1067 { 1068 boolean_t result; 1069 1070 ASSERT(thread->sth_magic == SMB_THREAD_MAGIC); 1071 1072 mutex_enter(&thread->sth_mtx); 1073 result = smb_thread_continue_timedwait_locked(thread, 1074 SEC_TO_TICK(seconds)); 1075 mutex_exit(&thread->sth_mtx); 1076 1077 return (result); 1078 } 1079 1080 /* 1081 * smb_thread_continue_timedwait_locked 1082 * 1083 * Internal only. Ticks==-1 means don't block, Ticks == 0 means wait 1084 * indefinitely 1085 */ 1086 static boolean_t 1087 smb_thread_continue_timedwait_locked(smb_thread_t *thread, int ticks) 1088 { 1089 boolean_t result; 1090 1091 /* -1 means don't block */ 1092 if (ticks != -1 && !thread->sth_kill) { 1093 if (ticks == 0) { 1094 cv_wait(&thread->sth_cv, &thread->sth_mtx); 1095 } else { 1096 (void) cv_reltimedwait(&thread->sth_cv, 1097 &thread->sth_mtx, (clock_t)ticks, TR_CLOCK_TICK); 1098 } 1099 } 1100 result = (thread->sth_kill == 0); 1101 1102 return (result); 1103 } 1104 1105 /* 1106 * smb_rwx_init 1107 */ 1108 void 1109 smb_rwx_init( 1110 smb_rwx_t *rwx) 1111 { 1112 bzero(rwx, sizeof (smb_rwx_t)); 1113 cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL); 1114 mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL); 1115 rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL); 1116 } 1117 1118 /* 1119 * smb_rwx_destroy 1120 */ 1121 void 1122 smb_rwx_destroy( 1123 smb_rwx_t *rwx) 1124 { 1125 mutex_destroy(&rwx->rwx_mutex); 1126 cv_destroy(&rwx->rwx_cv); 1127 rw_destroy(&rwx->rwx_lock); 1128 } 1129 1130 /* 1131 * smb_rwx_rwexit 1132 */ 1133 void 1134 smb_rwx_rwexit( 1135 smb_rwx_t *rwx) 1136 { 1137 if (rw_write_held(&rwx->rwx_lock)) { 1138 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1139 mutex_enter(&rwx->rwx_mutex); 1140 if (rwx->rwx_waiting) { 1141 rwx->rwx_waiting = B_FALSE; 1142 cv_broadcast(&rwx->rwx_cv); 1143 } 1144 mutex_exit(&rwx->rwx_mutex); 1145 } 1146 rw_exit(&rwx->rwx_lock); 1147 } 1148 1149 /* 1150 * smb_rwx_rwupgrade 1151 */ 1152 krw_t 1153 smb_rwx_rwupgrade( 1154 smb_rwx_t *rwx) 1155 { 1156 if (rw_write_held(&rwx->rwx_lock)) { 1157 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1158 return (RW_WRITER); 1159 } 1160 if (!rw_tryupgrade(&rwx->rwx_lock)) { 1161 rw_exit(&rwx->rwx_lock); 1162 rw_enter(&rwx->rwx_lock, RW_WRITER); 1163 } 1164 return (RW_READER); 1165 } 1166 1167 /* 1168 * smb_rwx_rwrestore 1169 */ 1170 void 1171 smb_rwx_rwdowngrade( 1172 smb_rwx_t *rwx, 1173 krw_t mode) 1174 { 1175 ASSERT(rw_write_held(&rwx->rwx_lock)); 1176 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1177 1178 if (mode == RW_WRITER) { 1179 return; 1180 } 1181 ASSERT(mode == RW_READER); 1182 mutex_enter(&rwx->rwx_mutex); 1183 if (rwx->rwx_waiting) { 1184 rwx->rwx_waiting = B_FALSE; 1185 cv_broadcast(&rwx->rwx_cv); 1186 } 1187 mutex_exit(&rwx->rwx_mutex); 1188 rw_downgrade(&rwx->rwx_lock); 1189 } 1190 1191 /* 1192 * smb_rwx_wait 1193 * 1194 * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER 1195 * mode. It will: 1196 * 1197 * 1) release the lock and save its current mode. 1198 * 2) wait until the condition variable is signaled. This can happen for 1199 * 2 reasons: When a writer releases the lock or when the time out (if 1200 * provided) expires. 1201 * 3) re-acquire the lock in the mode saved in (1). 1202 */ 1203 int 1204 smb_rwx_rwwait( 1205 smb_rwx_t *rwx, 1206 clock_t timeout) 1207 { 1208 int rc; 1209 krw_t mode; 1210 1211 mutex_enter(&rwx->rwx_mutex); 1212 rwx->rwx_waiting = B_TRUE; 1213 mutex_exit(&rwx->rwx_mutex); 1214 1215 if (rw_write_held(&rwx->rwx_lock)) { 1216 ASSERT(rw_owner(&rwx->rwx_lock) == curthread); 1217 mode = RW_WRITER; 1218 } else { 1219 ASSERT(rw_read_held(&rwx->rwx_lock)); 1220 mode = RW_READER; 1221 } 1222 rw_exit(&rwx->rwx_lock); 1223 1224 mutex_enter(&rwx->rwx_mutex); 1225 if (rwx->rwx_waiting) { 1226 if (timeout == -1) { 1227 rc = 1; 1228 cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex); 1229 } else { 1230 rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex, 1231 timeout, TR_CLOCK_TICK); 1232 } 1233 } 1234 mutex_exit(&rwx->rwx_mutex); 1235 1236 rw_enter(&rwx->rwx_lock, mode); 1237 return (rc); 1238 } 1239 1240 /* 1241 * SMB ID mapping 1242 * 1243 * Solaris ID mapping service (aka Winchester) works with domain SIDs 1244 * and RIDs where domain SIDs are in string format. CIFS service works 1245 * with binary SIDs understandable by CIFS clients. A layer of SMB ID 1246 * mapping functions are implemeted to hide the SID conversion details 1247 * and also hide the handling of array of batch mapping requests. 1248 * 1249 * IMPORTANT NOTE The Winchester API requires a zone. Because CIFS server 1250 * currently only runs in the global zone the global zone is specified. 1251 * This needs to be fixed when the CIFS server supports zones. 1252 */ 1253 1254 static int smb_idmap_batch_binsid(smb_idmap_batch_t *sib); 1255 1256 /* 1257 * smb_idmap_getid 1258 * 1259 * Maps the given Windows SID to a Solaris ID using the 1260 * simple mapping API. 1261 */ 1262 idmap_stat 1263 smb_idmap_getid(smb_sid_t *sid, uid_t *id, int *idtype) 1264 { 1265 smb_idmap_t sim; 1266 char sidstr[SMB_SID_STRSZ]; 1267 1268 smb_sid_tostr(sid, sidstr); 1269 if (smb_sid_splitstr(sidstr, &sim.sim_rid) != 0) 1270 return (IDMAP_ERR_SID); 1271 sim.sim_domsid = sidstr; 1272 sim.sim_id = id; 1273 1274 switch (*idtype) { 1275 case SMB_IDMAP_USER: 1276 sim.sim_stat = kidmap_getuidbysid(global_zone, sim.sim_domsid, 1277 sim.sim_rid, sim.sim_id); 1278 break; 1279 1280 case SMB_IDMAP_GROUP: 1281 sim.sim_stat = kidmap_getgidbysid(global_zone, sim.sim_domsid, 1282 sim.sim_rid, sim.sim_id); 1283 break; 1284 1285 case SMB_IDMAP_UNKNOWN: 1286 sim.sim_stat = kidmap_getpidbysid(global_zone, sim.sim_domsid, 1287 sim.sim_rid, sim.sim_id, &sim.sim_idtype); 1288 break; 1289 1290 default: 1291 ASSERT(0); 1292 return (IDMAP_ERR_ARG); 1293 } 1294 1295 *idtype = sim.sim_idtype; 1296 1297 return (sim.sim_stat); 1298 } 1299 1300 /* 1301 * smb_idmap_getsid 1302 * 1303 * Maps the given Solaris ID to a Windows SID using the 1304 * simple mapping API. 1305 */ 1306 idmap_stat 1307 smb_idmap_getsid(uid_t id, int idtype, smb_sid_t **sid) 1308 { 1309 smb_idmap_t sim; 1310 1311 switch (idtype) { 1312 case SMB_IDMAP_USER: 1313 sim.sim_stat = kidmap_getsidbyuid(global_zone, id, 1314 (const char **)&sim.sim_domsid, &sim.sim_rid); 1315 break; 1316 1317 case SMB_IDMAP_GROUP: 1318 sim.sim_stat = kidmap_getsidbygid(global_zone, id, 1319 (const char **)&sim.sim_domsid, &sim.sim_rid); 1320 break; 1321 1322 case SMB_IDMAP_EVERYONE: 1323 /* Everyone S-1-1-0 */ 1324 sim.sim_domsid = "S-1-1"; 1325 sim.sim_rid = 0; 1326 sim.sim_stat = IDMAP_SUCCESS; 1327 break; 1328 1329 default: 1330 ASSERT(0); 1331 return (IDMAP_ERR_ARG); 1332 } 1333 1334 if (sim.sim_stat != IDMAP_SUCCESS) 1335 return (sim.sim_stat); 1336 1337 if (sim.sim_domsid == NULL) 1338 return (IDMAP_ERR_NOMAPPING); 1339 1340 sim.sim_sid = smb_sid_fromstr(sim.sim_domsid); 1341 if (sim.sim_sid == NULL) 1342 return (IDMAP_ERR_INTERNAL); 1343 1344 *sid = smb_sid_splice(sim.sim_sid, sim.sim_rid); 1345 smb_sid_free(sim.sim_sid); 1346 if (*sid == NULL) 1347 sim.sim_stat = IDMAP_ERR_INTERNAL; 1348 1349 return (sim.sim_stat); 1350 } 1351 1352 /* 1353 * smb_idmap_batch_create 1354 * 1355 * Creates and initializes the context for batch ID mapping. 1356 */ 1357 idmap_stat 1358 smb_idmap_batch_create(smb_idmap_batch_t *sib, uint16_t nmap, int flags) 1359 { 1360 ASSERT(sib); 1361 1362 bzero(sib, sizeof (smb_idmap_batch_t)); 1363 1364 sib->sib_idmaph = kidmap_get_create(global_zone); 1365 1366 sib->sib_flags = flags; 1367 sib->sib_nmap = nmap; 1368 sib->sib_size = nmap * sizeof (smb_idmap_t); 1369 sib->sib_maps = kmem_zalloc(sib->sib_size, KM_SLEEP); 1370 1371 return (IDMAP_SUCCESS); 1372 } 1373 1374 /* 1375 * smb_idmap_batch_destroy 1376 * 1377 * Frees the batch ID mapping context. 1378 * If ID mapping is Solaris -> Windows it frees memories 1379 * allocated for binary SIDs. 1380 */ 1381 void 1382 smb_idmap_batch_destroy(smb_idmap_batch_t *sib) 1383 { 1384 char *domsid; 1385 int i; 1386 1387 ASSERT(sib); 1388 ASSERT(sib->sib_maps); 1389 1390 if (sib->sib_idmaph) 1391 kidmap_get_destroy(sib->sib_idmaph); 1392 1393 if (sib->sib_flags & SMB_IDMAP_ID2SID) { 1394 /* 1395 * SIDs are allocated only when mapping 1396 * UID/GID to SIDs 1397 */ 1398 for (i = 0; i < sib->sib_nmap; i++) 1399 smb_sid_free(sib->sib_maps[i].sim_sid); 1400 } else if (sib->sib_flags & SMB_IDMAP_SID2ID) { 1401 /* 1402 * SID prefixes are allocated only when mapping 1403 * SIDs to UID/GID 1404 */ 1405 for (i = 0; i < sib->sib_nmap; i++) { 1406 domsid = sib->sib_maps[i].sim_domsid; 1407 if (domsid) 1408 smb_mem_free(domsid); 1409 } 1410 } 1411 1412 if (sib->sib_size && sib->sib_maps) 1413 kmem_free(sib->sib_maps, sib->sib_size); 1414 } 1415 1416 /* 1417 * smb_idmap_batch_getid 1418 * 1419 * Queue a request to map the given SID to a UID or GID. 1420 * 1421 * sim->sim_id should point to variable that's supposed to 1422 * hold the returned UID/GID. This needs to be setup by caller 1423 * of this function. 1424 * 1425 * If requested ID type is known, it's passed as 'idtype', 1426 * if it's unknown it'll be returned in sim->sim_idtype. 1427 */ 1428 idmap_stat 1429 smb_idmap_batch_getid(idmap_get_handle_t *idmaph, smb_idmap_t *sim, 1430 smb_sid_t *sid, int idtype) 1431 { 1432 char strsid[SMB_SID_STRSZ]; 1433 idmap_stat idm_stat; 1434 1435 ASSERT(idmaph); 1436 ASSERT(sim); 1437 ASSERT(sid); 1438 1439 smb_sid_tostr(sid, strsid); 1440 if (smb_sid_splitstr(strsid, &sim->sim_rid) != 0) 1441 return (IDMAP_ERR_SID); 1442 sim->sim_domsid = smb_mem_strdup(strsid); 1443 1444 switch (idtype) { 1445 case SMB_IDMAP_USER: 1446 idm_stat = kidmap_batch_getuidbysid(idmaph, sim->sim_domsid, 1447 sim->sim_rid, sim->sim_id, &sim->sim_stat); 1448 break; 1449 1450 case SMB_IDMAP_GROUP: 1451 idm_stat = kidmap_batch_getgidbysid(idmaph, sim->sim_domsid, 1452 sim->sim_rid, sim->sim_id, &sim->sim_stat); 1453 break; 1454 1455 case SMB_IDMAP_UNKNOWN: 1456 idm_stat = kidmap_batch_getpidbysid(idmaph, sim->sim_domsid, 1457 sim->sim_rid, sim->sim_id, &sim->sim_idtype, 1458 &sim->sim_stat); 1459 break; 1460 1461 default: 1462 ASSERT(0); 1463 return (IDMAP_ERR_ARG); 1464 } 1465 1466 return (idm_stat); 1467 } 1468 1469 /* 1470 * smb_idmap_batch_getsid 1471 * 1472 * Queue a request to map the given UID/GID to a SID. 1473 * 1474 * sim->sim_domsid and sim->sim_rid will contain the mapping 1475 * result upon successful process of the batched request. 1476 */ 1477 idmap_stat 1478 smb_idmap_batch_getsid(idmap_get_handle_t *idmaph, smb_idmap_t *sim, 1479 uid_t id, int idtype) 1480 { 1481 idmap_stat idm_stat; 1482 1483 switch (idtype) { 1484 case SMB_IDMAP_USER: 1485 idm_stat = kidmap_batch_getsidbyuid(idmaph, id, 1486 (const char **)&sim->sim_domsid, &sim->sim_rid, 1487 &sim->sim_stat); 1488 break; 1489 1490 case SMB_IDMAP_GROUP: 1491 idm_stat = kidmap_batch_getsidbygid(idmaph, id, 1492 (const char **)&sim->sim_domsid, &sim->sim_rid, 1493 &sim->sim_stat); 1494 break; 1495 1496 case SMB_IDMAP_OWNERAT: 1497 /* Current Owner S-1-5-32-766 */ 1498 sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR; 1499 sim->sim_rid = SECURITY_CURRENT_OWNER_RID; 1500 sim->sim_stat = IDMAP_SUCCESS; 1501 idm_stat = IDMAP_SUCCESS; 1502 break; 1503 1504 case SMB_IDMAP_GROUPAT: 1505 /* Current Group S-1-5-32-767 */ 1506 sim->sim_domsid = NT_BUILTIN_DOMAIN_SIDSTR; 1507 sim->sim_rid = SECURITY_CURRENT_GROUP_RID; 1508 sim->sim_stat = IDMAP_SUCCESS; 1509 idm_stat = IDMAP_SUCCESS; 1510 break; 1511 1512 case SMB_IDMAP_EVERYONE: 1513 /* Everyone S-1-1-0 */ 1514 sim->sim_domsid = NT_WORLD_AUTH_SIDSTR; 1515 sim->sim_rid = 0; 1516 sim->sim_stat = IDMAP_SUCCESS; 1517 idm_stat = IDMAP_SUCCESS; 1518 break; 1519 1520 default: 1521 ASSERT(0); 1522 return (IDMAP_ERR_ARG); 1523 } 1524 1525 return (idm_stat); 1526 } 1527 1528 /* 1529 * smb_idmap_batch_binsid 1530 * 1531 * Convert sidrids to binary sids 1532 * 1533 * Returns 0 if successful and non-zero upon failure. 1534 */ 1535 static int 1536 smb_idmap_batch_binsid(smb_idmap_batch_t *sib) 1537 { 1538 smb_sid_t *sid; 1539 smb_idmap_t *sim; 1540 int i; 1541 1542 if (sib->sib_flags & SMB_IDMAP_SID2ID) 1543 /* This operation is not required */ 1544 return (0); 1545 1546 sim = sib->sib_maps; 1547 for (i = 0; i < sib->sib_nmap; sim++, i++) { 1548 ASSERT(sim->sim_domsid); 1549 if (sim->sim_domsid == NULL) 1550 return (1); 1551 1552 if ((sid = smb_sid_fromstr(sim->sim_domsid)) == NULL) 1553 return (1); 1554 1555 sim->sim_sid = smb_sid_splice(sid, sim->sim_rid); 1556 smb_sid_free(sid); 1557 } 1558 1559 return (0); 1560 } 1561 1562 /* 1563 * smb_idmap_batch_getmappings 1564 * 1565 * trigger ID mapping service to get the mappings for queued 1566 * requests. 1567 * 1568 * Checks the result of all the queued requests. 1569 * If this is a Solaris -> Windows mapping it generates 1570 * binary SIDs from returned (domsid, rid) pairs. 1571 */ 1572 idmap_stat 1573 smb_idmap_batch_getmappings(smb_idmap_batch_t *sib) 1574 { 1575 idmap_stat idm_stat = IDMAP_SUCCESS; 1576 int i; 1577 1578 idm_stat = kidmap_get_mappings(sib->sib_idmaph); 1579 if (idm_stat != IDMAP_SUCCESS) 1580 return (idm_stat); 1581 1582 /* 1583 * Check the status for all the queued requests 1584 */ 1585 for (i = 0; i < sib->sib_nmap; i++) { 1586 if (sib->sib_maps[i].sim_stat != IDMAP_SUCCESS) 1587 return (sib->sib_maps[i].sim_stat); 1588 } 1589 1590 if (smb_idmap_batch_binsid(sib) != 0) 1591 idm_stat = IDMAP_ERR_OTHER; 1592 1593 return (idm_stat); 1594 } 1595 1596 uint64_t 1597 smb_time_unix_to_nt(timestruc_t *unix_time) 1598 { 1599 uint64_t nt_time; 1600 1601 if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0)) 1602 return (0); 1603 1604 nt_time = unix_time->tv_sec; 1605 nt_time *= 10000000; /* seconds to 100ns */ 1606 nt_time += unix_time->tv_nsec / 100; 1607 return (nt_time + NT_TIME_BIAS); 1608 } 1609 1610 void 1611 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time) 1612 { 1613 uint32_t seconds; 1614 1615 ASSERT(unix_time); 1616 1617 if ((nt_time == 0) || (nt_time == -1)) { 1618 unix_time->tv_sec = 0; 1619 unix_time->tv_nsec = 0; 1620 return; 1621 } 1622 1623 nt_time -= NT_TIME_BIAS; 1624 seconds = nt_time / 10000000; 1625 unix_time->tv_sec = seconds; 1626 unix_time->tv_nsec = (nt_time % 10000000) * 100; 1627 } 1628 1629 /* 1630 * smb_time_gmt_to_local, smb_time_local_to_gmt 1631 * 1632 * Apply the gmt offset to convert between local time and gmt 1633 */ 1634 int32_t 1635 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt) 1636 { 1637 if ((gmt == 0) || (gmt == -1)) 1638 return (0); 1639 1640 return (gmt - sr->sr_gmtoff); 1641 } 1642 1643 int32_t 1644 smb_time_local_to_gmt(smb_request_t *sr, int32_t local) 1645 { 1646 if ((local == 0) || (local == -1)) 1647 return (0); 1648 1649 return (local + sr->sr_gmtoff); 1650 } 1651 1652 1653 /* 1654 * smb_time_dos_to_unix 1655 * 1656 * Convert SMB_DATE & SMB_TIME values to a unix timestamp. 1657 * 1658 * A date/time field of 0 means that that server file system 1659 * assigned value need not be changed. The behaviour when the 1660 * date/time field is set to -1 is not documented but is 1661 * generally treated like 0. 1662 * If date or time is 0 or -1 the unix time is returned as 0 1663 * so that the caller can identify and handle this special case. 1664 */ 1665 int32_t 1666 smb_time_dos_to_unix(int16_t date, int16_t time) 1667 { 1668 struct tm atm; 1669 1670 if (((date == 0) || (time == 0)) || 1671 ((date == -1) || (time == -1))) { 1672 return (0); 1673 } 1674 1675 atm.tm_year = ((date >> 9) & 0x3F) + 80; 1676 atm.tm_mon = ((date >> 5) & 0x0F) - 1; 1677 atm.tm_mday = ((date >> 0) & 0x1F); 1678 atm.tm_hour = ((time >> 11) & 0x1F); 1679 atm.tm_min = ((time >> 5) & 0x3F); 1680 atm.tm_sec = ((time >> 0) & 0x1F) << 1; 1681 1682 return (smb_timegm(&atm)); 1683 } 1684 1685 void 1686 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p) 1687 { 1688 struct tm atm; 1689 int i; 1690 time_t tmp_time; 1691 1692 if (ux_time == 0) { 1693 *date_p = 0; 1694 *time_p = 0; 1695 return; 1696 } 1697 1698 tmp_time = (time_t)ux_time; 1699 (void) smb_gmtime_r(&tmp_time, &atm); 1700 1701 if (date_p) { 1702 i = 0; 1703 i += atm.tm_year - 80; 1704 i <<= 4; 1705 i += atm.tm_mon + 1; 1706 i <<= 5; 1707 i += atm.tm_mday; 1708 1709 *date_p = (short)i; 1710 } 1711 if (time_p) { 1712 i = 0; 1713 i += atm.tm_hour; 1714 i <<= 6; 1715 i += atm.tm_min; 1716 i <<= 5; 1717 i += atm.tm_sec >> 1; 1718 1719 *time_p = (short)i; 1720 } 1721 } 1722 1723 1724 /* 1725 * smb_gmtime_r 1726 * 1727 * Thread-safe version of smb_gmtime. Returns a null pointer if either 1728 * input parameter is a null pointer. Otherwise returns a pointer 1729 * to result. 1730 * 1731 * Day of the week calculation: the Epoch was a thursday. 1732 * 1733 * There are no timezone corrections so tm_isdst and tm_gmtoff are 1734 * always zero, and the zone is always WET. 1735 */ 1736 struct tm * 1737 smb_gmtime_r(time_t *clock, struct tm *result) 1738 { 1739 time_t tsec; 1740 int year; 1741 int month; 1742 int sec_per_month; 1743 1744 if (clock == 0 || result == 0) 1745 return (0); 1746 1747 bzero(result, sizeof (struct tm)); 1748 tsec = *clock; 1749 tsec -= tzh_leapcnt; 1750 1751 result->tm_wday = tsec / SECSPERDAY; 1752 result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK; 1753 1754 year = EPOCH_YEAR; 1755 while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) : 1756 (SECSPERDAY * DAYSPERNYEAR))) { 1757 if (isleap(year)) 1758 tsec -= SECSPERDAY * DAYSPERLYEAR; 1759 else 1760 tsec -= SECSPERDAY * DAYSPERNYEAR; 1761 1762 ++year; 1763 } 1764 1765 result->tm_year = year - TM_YEAR_BASE; 1766 result->tm_yday = tsec / SECSPERDAY; 1767 1768 for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) { 1769 sec_per_month = days_in_month[month] * SECSPERDAY; 1770 1771 if (month == TM_FEBRUARY && isleap(year)) 1772 sec_per_month += SECSPERDAY; 1773 1774 if (tsec < sec_per_month) 1775 break; 1776 1777 tsec -= sec_per_month; 1778 } 1779 1780 result->tm_mon = month; 1781 result->tm_mday = (tsec / SECSPERDAY) + 1; 1782 tsec %= SECSPERDAY; 1783 result->tm_sec = tsec % 60; 1784 tsec /= 60; 1785 result->tm_min = tsec % 60; 1786 tsec /= 60; 1787 result->tm_hour = (int)tsec; 1788 1789 return (result); 1790 } 1791 1792 1793 /* 1794 * smb_timegm 1795 * 1796 * Converts the broken-down time in tm to a time value, i.e. the number 1797 * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is 1798 * not a POSIX or ANSI function. Per the man page, the input values of 1799 * tm_wday and tm_yday are ignored and, as the input data is assumed to 1800 * represent GMT, we force tm_isdst and tm_gmtoff to 0. 1801 * 1802 * Before returning the clock time, we use smb_gmtime_r to set up tm_wday 1803 * and tm_yday, and bring the other fields within normal range. I don't 1804 * think this is really how it should be done but it's convenient for 1805 * now. 1806 */ 1807 time_t 1808 smb_timegm(struct tm *tm) 1809 { 1810 time_t tsec; 1811 int dd; 1812 int mm; 1813 int yy; 1814 int year; 1815 1816 if (tm == 0) 1817 return (-1); 1818 1819 year = tm->tm_year + TM_YEAR_BASE; 1820 tsec = tzh_leapcnt; 1821 1822 for (yy = EPOCH_YEAR; yy < year; ++yy) { 1823 if (isleap(yy)) 1824 tsec += SECSPERDAY * DAYSPERLYEAR; 1825 else 1826 tsec += SECSPERDAY * DAYSPERNYEAR; 1827 } 1828 1829 for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) { 1830 dd = days_in_month[mm] * SECSPERDAY; 1831 1832 if (mm == TM_FEBRUARY && isleap(year)) 1833 dd += SECSPERDAY; 1834 1835 tsec += dd; 1836 } 1837 1838 tsec += (tm->tm_mday - 1) * SECSPERDAY; 1839 tsec += tm->tm_sec; 1840 tsec += tm->tm_min * SECSPERMIN; 1841 tsec += tm->tm_hour * SECSPERHOUR; 1842 1843 tm->tm_isdst = 0; 1844 (void) smb_gmtime_r(&tsec, tm); 1845 return (tsec); 1846 } 1847 1848 /* 1849 * smb_pad_align 1850 * 1851 * Returns the number of bytes required to pad an offset to the 1852 * specified alignment. 1853 */ 1854 uint32_t 1855 smb_pad_align(uint32_t offset, uint32_t align) 1856 { 1857 uint32_t pad = offset % align; 1858 1859 if (pad != 0) 1860 pad = align - pad; 1861 1862 return (pad); 1863 } 1864 1865 /* 1866 * smb_panic 1867 * 1868 * Logs the file name, function name and line number passed in and panics the 1869 * system. 1870 */ 1871 void 1872 smb_panic(char *file, const char *func, int line) 1873 { 1874 cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line); 1875 } 1876 1877 /* 1878 * Creates an AVL tree and initializes the given smb_avl_t 1879 * structure using the passed args 1880 */ 1881 void 1882 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset, smb_avl_nops_t *ops) 1883 { 1884 ASSERT(avl); 1885 ASSERT(ops); 1886 1887 rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL); 1888 mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL); 1889 1890 avl->avl_nops = ops; 1891 avl->avl_state = SMB_AVL_STATE_READY; 1892 avl->avl_refcnt = 0; 1893 (void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence, 1894 sizeof (uint32_t)); 1895 1896 avl_create(&avl->avl_tree, ops->avln_cmp, size, offset); 1897 } 1898 1899 /* 1900 * Destroys the specified AVL tree. 1901 * It waits for all the in-flight operations to finish 1902 * before destroying the AVL. 1903 */ 1904 void 1905 smb_avl_destroy(smb_avl_t *avl) 1906 { 1907 void *cookie = NULL; 1908 void *node; 1909 1910 ASSERT(avl); 1911 1912 mutex_enter(&avl->avl_mutex); 1913 if (avl->avl_state != SMB_AVL_STATE_READY) { 1914 mutex_exit(&avl->avl_mutex); 1915 return; 1916 } 1917 1918 avl->avl_state = SMB_AVL_STATE_DESTROYING; 1919 1920 while (avl->avl_refcnt > 0) 1921 (void) cv_wait(&avl->avl_cv, &avl->avl_mutex); 1922 mutex_exit(&avl->avl_mutex); 1923 1924 rw_enter(&avl->avl_lock, RW_WRITER); 1925 while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL) 1926 avl->avl_nops->avln_destroy(node); 1927 1928 avl_destroy(&avl->avl_tree); 1929 rw_exit(&avl->avl_lock); 1930 1931 rw_destroy(&avl->avl_lock); 1932 1933 mutex_destroy(&avl->avl_mutex); 1934 bzero(avl, sizeof (smb_avl_t)); 1935 } 1936 1937 /* 1938 * Adds the given item to the AVL if it's 1939 * not already there. 1940 * 1941 * Returns: 1942 * 1943 * ENOTACTIVE AVL is not in READY state 1944 * EEXIST The item is already in AVL 1945 */ 1946 int 1947 smb_avl_add(smb_avl_t *avl, void *item) 1948 { 1949 avl_index_t where; 1950 1951 ASSERT(avl); 1952 ASSERT(item); 1953 1954 if (!smb_avl_hold(avl)) 1955 return (ENOTACTIVE); 1956 1957 rw_enter(&avl->avl_lock, RW_WRITER); 1958 if (avl_find(&avl->avl_tree, item, &where) != NULL) { 1959 rw_exit(&avl->avl_lock); 1960 smb_avl_rele(avl); 1961 return (EEXIST); 1962 } 1963 1964 avl_insert(&avl->avl_tree, item, where); 1965 avl->avl_sequence++; 1966 rw_exit(&avl->avl_lock); 1967 1968 smb_avl_rele(avl); 1969 return (0); 1970 } 1971 1972 /* 1973 * Removes the given item from the AVL. 1974 * If no reference is left on the item 1975 * it will also be destroyed by calling the 1976 * registered destroy operation. 1977 */ 1978 void 1979 smb_avl_remove(smb_avl_t *avl, void *item) 1980 { 1981 avl_index_t where; 1982 void *rm_item; 1983 1984 ASSERT(avl); 1985 ASSERT(item); 1986 1987 if (!smb_avl_hold(avl)) 1988 return; 1989 1990 rw_enter(&avl->avl_lock, RW_WRITER); 1991 if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) { 1992 rw_exit(&avl->avl_lock); 1993 smb_avl_rele(avl); 1994 return; 1995 } 1996 1997 avl_remove(&avl->avl_tree, rm_item); 1998 if (avl->avl_nops->avln_rele(rm_item)) 1999 avl->avl_nops->avln_destroy(rm_item); 2000 avl->avl_sequence++; 2001 rw_exit(&avl->avl_lock); 2002 2003 smb_avl_rele(avl); 2004 } 2005 2006 /* 2007 * Looks up the AVL for the given item. 2008 * If the item is found a hold on the object 2009 * is taken before the pointer to it is 2010 * returned to the caller. The caller MUST 2011 * always call smb_avl_release() after it's done 2012 * using the returned object to release the hold 2013 * taken on the object. 2014 */ 2015 void * 2016 smb_avl_lookup(smb_avl_t *avl, void *item) 2017 { 2018 void *node = NULL; 2019 2020 ASSERT(avl); 2021 ASSERT(item); 2022 2023 if (!smb_avl_hold(avl)) 2024 return (NULL); 2025 2026 rw_enter(&avl->avl_lock, RW_READER); 2027 node = avl_find(&avl->avl_tree, item, NULL); 2028 if (node != NULL) 2029 avl->avl_nops->avln_hold(node); 2030 rw_exit(&avl->avl_lock); 2031 2032 if (node == NULL) 2033 smb_avl_rele(avl); 2034 2035 return (node); 2036 } 2037 2038 /* 2039 * The hold on the given object is released. 2040 * This function MUST always be called after 2041 * smb_avl_lookup() and smb_avl_iterate() for 2042 * the returned object. 2043 * 2044 * If AVL is in DESTROYING state, the destroying 2045 * thread will be notified. 2046 */ 2047 void 2048 smb_avl_release(smb_avl_t *avl, void *item) 2049 { 2050 ASSERT(avl); 2051 ASSERT(item); 2052 2053 if (avl->avl_nops->avln_rele(item)) 2054 avl->avl_nops->avln_destroy(item); 2055 2056 smb_avl_rele(avl); 2057 } 2058 2059 /* 2060 * Initializes the given cursor for the AVL. 2061 * The cursor will be used to iterate through the AVL 2062 */ 2063 void 2064 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor) 2065 { 2066 ASSERT(avl); 2067 ASSERT(cursor); 2068 2069 cursor->avlc_next = NULL; 2070 cursor->avlc_sequence = avl->avl_sequence; 2071 } 2072 2073 /* 2074 * Iterates through the AVL using the given cursor. 2075 * It always starts at the beginning and then returns 2076 * a pointer to the next object on each subsequent call. 2077 * 2078 * If a new object is added to or removed from the AVL 2079 * between two calls to this function, the iteration 2080 * will terminate prematurely. 2081 * 2082 * The caller MUST always call smb_avl_release() after it's 2083 * done using the returned object to release the hold taken 2084 * on the object. 2085 */ 2086 void * 2087 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor) 2088 { 2089 void *node; 2090 2091 ASSERT(avl); 2092 ASSERT(cursor); 2093 2094 if (!smb_avl_hold(avl)) 2095 return (NULL); 2096 2097 rw_enter(&avl->avl_lock, RW_READER); 2098 if (cursor->avlc_sequence != avl->avl_sequence) { 2099 rw_exit(&avl->avl_lock); 2100 smb_avl_rele(avl); 2101 return (NULL); 2102 } 2103 2104 if (cursor->avlc_next == NULL) 2105 node = avl_first(&avl->avl_tree); 2106 else 2107 node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next); 2108 2109 if (node != NULL) 2110 avl->avl_nops->avln_hold(node); 2111 2112 cursor->avlc_next = node; 2113 rw_exit(&avl->avl_lock); 2114 2115 if (node == NULL) 2116 smb_avl_rele(avl); 2117 2118 return (node); 2119 } 2120 2121 /* 2122 * Increments the AVL reference count in order to 2123 * prevent the avl from being destroyed while it's 2124 * being accessed. 2125 */ 2126 static boolean_t 2127 smb_avl_hold(smb_avl_t *avl) 2128 { 2129 mutex_enter(&avl->avl_mutex); 2130 if (avl->avl_state != SMB_AVL_STATE_READY) { 2131 mutex_exit(&avl->avl_mutex); 2132 return (B_FALSE); 2133 } 2134 avl->avl_refcnt++; 2135 mutex_exit(&avl->avl_mutex); 2136 2137 return (B_TRUE); 2138 } 2139 2140 /* 2141 * Decrements the AVL reference count to release the 2142 * hold. If another thread is trying to destroy the 2143 * AVL and is waiting for the reference count to become 2144 * 0, it is signaled to wake up. 2145 */ 2146 static void 2147 smb_avl_rele(smb_avl_t *avl) 2148 { 2149 mutex_enter(&avl->avl_mutex); 2150 ASSERT(avl->avl_refcnt > 0); 2151 avl->avl_refcnt--; 2152 if (avl->avl_state == SMB_AVL_STATE_DESTROYING) 2153 cv_broadcast(&avl->avl_cv); 2154 mutex_exit(&avl->avl_mutex); 2155 } 2156 2157 /* 2158 * smb_latency_init 2159 */ 2160 void 2161 smb_latency_init(smb_latency_t *lat) 2162 { 2163 bzero(lat, sizeof (*lat)); 2164 mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7)); 2165 } 2166 2167 /* 2168 * smb_latency_destroy 2169 */ 2170 void 2171 smb_latency_destroy(smb_latency_t *lat) 2172 { 2173 mutex_destroy(&lat->ly_mutex); 2174 } 2175 2176 /* 2177 * smb_latency_add_sample 2178 * 2179 * Uses the new sample to calculate the new mean and standard deviation. The 2180 * sample must be a scaled value. 2181 */ 2182 void 2183 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample) 2184 { 2185 hrtime_t a_mean; 2186 hrtime_t d_mean; 2187 2188 mutex_enter(&lat->ly_mutex); 2189 lat->ly_a_nreq++; 2190 lat->ly_a_sum += sample; 2191 if (lat->ly_a_nreq != 0) { 2192 a_mean = lat->ly_a_sum / lat->ly_a_nreq; 2193 lat->ly_a_stddev = 2194 (sample - a_mean) * (sample - lat->ly_a_mean); 2195 lat->ly_a_mean = a_mean; 2196 } 2197 lat->ly_d_nreq++; 2198 lat->ly_d_sum += sample; 2199 if (lat->ly_d_nreq != 0) { 2200 d_mean = lat->ly_d_sum / lat->ly_d_nreq; 2201 lat->ly_d_stddev = 2202 (sample - d_mean) * (sample - lat->ly_d_mean); 2203 lat->ly_d_mean = d_mean; 2204 } 2205 mutex_exit(&lat->ly_mutex); 2206 } 2207 2208 /* 2209 * smb_srqueue_init 2210 */ 2211 void 2212 smb_srqueue_init(smb_srqueue_t *srq) 2213 { 2214 bzero(srq, sizeof (*srq)); 2215 mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7)); 2216 srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled(); 2217 } 2218 2219 /* 2220 * smb_srqueue_destroy 2221 */ 2222 void 2223 smb_srqueue_destroy(smb_srqueue_t *srq) 2224 { 2225 mutex_destroy(&srq->srq_mutex); 2226 } 2227 2228 /* 2229 * smb_srqueue_waitq_enter 2230 */ 2231 void 2232 smb_srqueue_waitq_enter(smb_srqueue_t *srq) 2233 { 2234 hrtime_t new; 2235 hrtime_t delta; 2236 uint32_t wcnt; 2237 2238 mutex_enter(&srq->srq_mutex); 2239 new = gethrtime_unscaled(); 2240 delta = new - srq->srq_wlastupdate; 2241 srq->srq_wlastupdate = new; 2242 wcnt = srq->srq_wcnt++; 2243 if (wcnt != 0) { 2244 srq->srq_wlentime += delta * wcnt; 2245 srq->srq_wtime += delta; 2246 } 2247 mutex_exit(&srq->srq_mutex); 2248 } 2249 2250 /* 2251 * smb_srqueue_runq_exit 2252 */ 2253 void 2254 smb_srqueue_runq_exit(smb_srqueue_t *srq) 2255 { 2256 hrtime_t new; 2257 hrtime_t delta; 2258 uint32_t rcnt; 2259 2260 mutex_enter(&srq->srq_mutex); 2261 new = gethrtime_unscaled(); 2262 delta = new - srq->srq_rlastupdate; 2263 srq->srq_rlastupdate = new; 2264 rcnt = srq->srq_rcnt--; 2265 ASSERT(rcnt > 0); 2266 srq->srq_rlentime += delta * rcnt; 2267 srq->srq_rtime += delta; 2268 mutex_exit(&srq->srq_mutex); 2269 } 2270 2271 /* 2272 * smb_srqueue_waitq_to_runq 2273 */ 2274 void 2275 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq) 2276 { 2277 hrtime_t new; 2278 hrtime_t delta; 2279 uint32_t wcnt; 2280 uint32_t rcnt; 2281 2282 mutex_enter(&srq->srq_mutex); 2283 new = gethrtime_unscaled(); 2284 delta = new - srq->srq_wlastupdate; 2285 srq->srq_wlastupdate = new; 2286 wcnt = srq->srq_wcnt--; 2287 ASSERT(wcnt > 0); 2288 srq->srq_wlentime += delta * wcnt; 2289 srq->srq_wtime += delta; 2290 delta = new - srq->srq_rlastupdate; 2291 srq->srq_rlastupdate = new; 2292 rcnt = srq->srq_rcnt++; 2293 if (rcnt != 0) { 2294 srq->srq_rlentime += delta * rcnt; 2295 srq->srq_rtime += delta; 2296 } 2297 mutex_exit(&srq->srq_mutex); 2298 } 2299 2300 /* 2301 * smb_srqueue_update 2302 * 2303 * Takes a snapshot of the smb_sr_stat_t structure passed in. 2304 */ 2305 void 2306 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd) 2307 { 2308 hrtime_t delta; 2309 hrtime_t snaptime; 2310 2311 mutex_enter(&srq->srq_mutex); 2312 snaptime = gethrtime_unscaled(); 2313 delta = snaptime - srq->srq_wlastupdate; 2314 srq->srq_wlastupdate = snaptime; 2315 if (srq->srq_wcnt != 0) { 2316 srq->srq_wlentime += delta * srq->srq_wcnt; 2317 srq->srq_wtime += delta; 2318 } 2319 delta = snaptime - srq->srq_rlastupdate; 2320 srq->srq_rlastupdate = snaptime; 2321 if (srq->srq_rcnt != 0) { 2322 srq->srq_rlentime += delta * srq->srq_rcnt; 2323 srq->srq_rtime += delta; 2324 } 2325 kd->ku_rlentime = srq->srq_rlentime; 2326 kd->ku_rtime = srq->srq_rtime; 2327 kd->ku_wlentime = srq->srq_wlentime; 2328 kd->ku_wtime = srq->srq_wtime; 2329 mutex_exit(&srq->srq_mutex); 2330 scalehrtime(&kd->ku_rlentime); 2331 scalehrtime(&kd->ku_rtime); 2332 scalehrtime(&kd->ku_wlentime); 2333 scalehrtime(&kd->ku_wtime); 2334 } 2335 2336 void 2337 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd, int threshold, 2338 int timeout) 2339 { 2340 bzero(ct, sizeof (smb_cmd_threshold_t)); 2341 mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL); 2342 ct->ct_cmd = cmd; 2343 ct->ct_threshold = threshold; 2344 ct->ct_event = smb_event_create(timeout); 2345 ct->ct_event_id = smb_event_txid(ct->ct_event); 2346 2347 if (smb_threshold_debug) { 2348 cmn_err(CE_NOTE, "smb_threshold_init[%s]: threshold (%d), " 2349 "timeout (%d)", cmd, threshold, timeout); 2350 } 2351 } 2352 2353 /* 2354 * This function must be called prior to SMB_SERVER_STATE_STOPPING state 2355 * so that ct_event can be successfully removed from the event list. 2356 * It should not be called when the server mutex is held or when the 2357 * server is removed from the server list. 2358 */ 2359 void 2360 smb_threshold_fini(smb_cmd_threshold_t *ct) 2361 { 2362 smb_event_destroy(ct->ct_event); 2363 mutex_destroy(&ct->ct_mutex); 2364 bzero(ct, sizeof (smb_cmd_threshold_t)); 2365 } 2366 2367 /* 2368 * This threshold mechanism can be used to limit the number of simultaneous 2369 * requests, which serves to limit the stress that can be applied to the 2370 * service and also allows the service to respond to requests before the 2371 * client times out and reports that the server is not responding, 2372 * 2373 * If the number of requests exceeds the threshold, new requests will be 2374 * stalled until the number drops back to the threshold. Stalled requests 2375 * will be notified as appropriate, in which case 0 will be returned. 2376 * If the timeout expires before the request is notified, a non-zero errno 2377 * value will be returned. 2378 * 2379 * To avoid a flood of messages, the message rate is throttled as well. 2380 */ 2381 int 2382 smb_threshold_enter(smb_cmd_threshold_t *ct) 2383 { 2384 int rc; 2385 2386 mutex_enter(&ct->ct_mutex); 2387 if (ct->ct_active_cnt >= ct->ct_threshold && ct->ct_event != NULL) { 2388 atomic_inc_32(&ct->ct_blocked_cnt); 2389 2390 if (smb_threshold_debug) { 2391 cmn_err(CE_NOTE, "smb_threshold_enter[%s]: blocked " 2392 "(blocked ops: %u, inflight ops: %u)", 2393 ct->ct_cmd, ct->ct_blocked_cnt, ct->ct_active_cnt); 2394 } 2395 2396 mutex_exit(&ct->ct_mutex); 2397 2398 if ((rc = smb_event_wait(ct->ct_event)) != 0) { 2399 if (rc == ECANCELED) 2400 return (rc); 2401 2402 mutex_enter(&ct->ct_mutex); 2403 if (ct->ct_active_cnt >= ct->ct_threshold) { 2404 2405 if ((ct->ct_error_cnt % 2406 SMB_THRESHOLD_REPORT_THROTTLE) == 0) { 2407 cmn_err(CE_NOTE, "%s: server busy: " 2408 "threshold %d exceeded)", 2409 ct->ct_cmd, ct->ct_threshold); 2410 } 2411 2412 atomic_inc_32(&ct->ct_error_cnt); 2413 mutex_exit(&ct->ct_mutex); 2414 return (rc); 2415 } 2416 2417 mutex_exit(&ct->ct_mutex); 2418 2419 } 2420 2421 mutex_enter(&ct->ct_mutex); 2422 atomic_dec_32(&ct->ct_blocked_cnt); 2423 if (smb_threshold_debug) { 2424 cmn_err(CE_NOTE, "smb_threshold_enter[%s]: resumed " 2425 "(blocked ops: %u, inflight ops: %u)", ct->ct_cmd, 2426 ct->ct_blocked_cnt, ct->ct_active_cnt); 2427 } 2428 } 2429 2430 atomic_inc_32(&ct->ct_active_cnt); 2431 mutex_exit(&ct->ct_mutex); 2432 return (0); 2433 } 2434 2435 void 2436 smb_threshold_exit(smb_cmd_threshold_t *ct, smb_server_t *sv) 2437 { 2438 mutex_enter(&ct->ct_mutex); 2439 atomic_dec_32(&ct->ct_active_cnt); 2440 mutex_exit(&ct->ct_mutex); 2441 smb_event_notify(sv, ct->ct_event_id); 2442 } 2443