1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "@(#)smb_lock.c 1.10 08/08/04 SMI" 27 28 /* 29 * This module provides range lock functionality for CIFS/SMB clients. 30 * Lock range service functions process SMB lock and and unlock 31 * requests for a file by applying lock rules and marks file range 32 * as locked if the lock is successful otherwise return proper 33 * error code. 34 */ 35 36 #include <smbsrv/smb_incl.h> 37 #include <smbsrv/smb_fsops.h> 38 #include <sys/nbmlock.h> 39 #include <sys/param.h> 40 41 extern caller_context_t smb_ct; 42 43 static void smb_lock_posix_unlock(smb_node_t *, smb_lock_t *, cred_t *); 44 static boolean_t smb_is_range_unlocked(uint64_t, uint64_t, uint32_t, 45 smb_llist_t *, uint64_t *); 46 static int smb_lock_range_overlap(smb_lock_t *, uint64_t, uint64_t); 47 static uint32_t smb_lock_range_lckrules(smb_request_t *, smb_ofile_t *, 48 smb_node_t *, smb_lock_t *, smb_lock_t **); 49 static clock_t smb_lock_wait(smb_request_t *, smb_lock_t *, smb_lock_t *); 50 static uint32_t smb_lock_range_ulckrules(smb_request_t *, smb_node_t *, 51 uint64_t, uint64_t, smb_lock_t **nodelock); 52 static smb_lock_t *smb_lock_create(smb_request_t *, uint64_t, uint64_t, 53 uint32_t, uint32_t); 54 static void smb_lock_destroy(smb_lock_t *); 55 static void smb_lock_free(smb_lock_t *); 56 57 58 59 /* 60 * smb_unlock_range 61 * 62 * locates lock range performed for corresponding to unlock request. 63 * 64 * NT_STATUS_SUCCESS - Lock range performed successfully. 65 * !NT_STATUS_SUCCESS - Error in unlock range operation. 66 */ 67 uint32_t 68 smb_unlock_range( 69 smb_request_t *sr, 70 smb_node_t *node, 71 uint64_t start, 72 uint64_t length) 73 { 74 smb_lock_t *lock = NULL; 75 uint32_t status; 76 77 /* Apply unlocking rules */ 78 smb_llist_enter(&node->n_lock_list, RW_WRITER); 79 status = smb_lock_range_ulckrules(sr, node, start, length, &lock); 80 if (status != NT_STATUS_SUCCESS) { 81 /* 82 * If lock range is not matching in the list 83 * return error. 84 */ 85 ASSERT(lock == NULL); 86 smb_llist_exit(&node->n_lock_list); 87 return (status); 88 } 89 90 smb_llist_remove(&node->n_lock_list, lock); 91 smb_lock_posix_unlock(node, lock, sr->user_cr); 92 smb_llist_exit(&node->n_lock_list); 93 smb_lock_destroy(lock); 94 95 return (status); 96 } 97 98 /* 99 * smb_lock_range 100 * 101 * checks for integrity of file lock operation for the given range of file data. 102 * This is performed by applying lock rules with all the elements of the node 103 * lock list. 104 * 105 * The function returns with new lock added if lock request is non-conflicting 106 * with existing range lock for the file. Otherwise smb request is filed 107 * without returning. 108 * 109 * NT_STATUS_SUCCESS - Lock range performed successfully. 110 * !NT_STATUS_SUCCESS - Error in lock range operation. 111 */ 112 uint32_t 113 smb_lock_range( 114 smb_request_t *sr, 115 uint64_t start, 116 uint64_t length, 117 uint32_t timeout, 118 uint32_t locktype) 119 { 120 smb_ofile_t *file = sr->fid_ofile; 121 smb_node_t *node = file->f_node; 122 smb_lock_t *lock; 123 smb_lock_t *clock = NULL; 124 uint32_t result = NT_STATUS_SUCCESS; 125 boolean_t lock_has_timeout = (timeout != 0); 126 127 lock = smb_lock_create(sr, start, length, locktype, timeout); 128 129 smb_llist_enter(&node->n_lock_list, RW_WRITER); 130 for (;;) { 131 clock_t rc; 132 133 /* Apply locking rules */ 134 result = smb_lock_range_lckrules(sr, file, node, lock, &clock); 135 136 if ((result == NT_STATUS_CANCELLED) || 137 (result == NT_STATUS_SUCCESS) || 138 (result == NT_STATUS_RANGE_NOT_LOCKED)) { 139 ASSERT(clock == NULL); 140 break; 141 } else if (timeout == 0) { 142 break; 143 } 144 145 ASSERT(result == NT_STATUS_LOCK_NOT_GRANTED); 146 ASSERT(clock); 147 /* 148 * Call smb_lock_wait holding write lock for 149 * node lock list. smb_lock_wait will release 150 * this lock if it blocks. 151 */ 152 ASSERT(node == clock->l_file->f_node); 153 154 rc = smb_lock_wait(sr, lock, clock); 155 if (rc == 0) { 156 result = NT_STATUS_CANCELLED; 157 break; 158 } 159 if (rc == -1) 160 timeout = 0; 161 162 clock = NULL; 163 } 164 165 lock->l_blocked_by = NULL; 166 167 if (result != NT_STATUS_SUCCESS) { 168 /* 169 * Under certain conditions NT_STATUS_FILE_LOCK_CONFLICT 170 * should be returned instead of NT_STATUS_LOCK_NOT_GRANTED. 171 */ 172 if (result == NT_STATUS_LOCK_NOT_GRANTED) { 173 /* 174 * Locks with timeouts always return 175 * NT_STATUS_FILE_LOCK_CONFLICT 176 */ 177 if (lock_has_timeout) 178 result = NT_STATUS_FILE_LOCK_CONFLICT; 179 180 /* 181 * Locks starting higher than 0xef000000 that do not 182 * have the MSB set always return 183 * NT_STATUS_FILE_LOCK_CONFLICT 184 */ 185 if ((lock->l_start >= 0xef000000) && 186 !(lock->l_start & (1ULL << 63))) { 187 result = NT_STATUS_FILE_LOCK_CONFLICT; 188 } 189 190 /* 191 * If the last lock attempt to fail on this file handle 192 * started at the same offset as this one then return 193 * NT_STATUS_FILE_LOCK_CONFLICT 194 */ 195 mutex_enter(&file->f_mutex); 196 if ((file->f_flags & SMB_OFLAGS_LLF_POS_VALID) && 197 (lock->l_start == file->f_llf_pos)) { 198 result = NT_STATUS_FILE_LOCK_CONFLICT; 199 } 200 mutex_exit(&file->f_mutex); 201 } 202 203 /* Update last lock failed offset */ 204 mutex_enter(&file->f_mutex); 205 file->f_llf_pos = lock->l_start; 206 file->f_flags |= SMB_OFLAGS_LLF_POS_VALID; 207 mutex_exit(&file->f_mutex); 208 209 smb_lock_free(lock); 210 } else { 211 /* 212 * don't insert into the CIFS lock list unless the 213 * posix lock worked 214 */ 215 if (smb_fsop_frlock(node, lock, B_FALSE, sr->user_cr)) 216 result = NT_STATUS_FILE_LOCK_CONFLICT; 217 else 218 smb_llist_insert_tail(&node->n_lock_list, lock); 219 } 220 smb_llist_exit(&node->n_lock_list); 221 222 return (result); 223 } 224 225 226 /* 227 * smb_lock_range_access 228 * 229 * scans node lock list 230 * to check if there is any overlapping lock. Overlapping 231 * lock is allowed only under same session and client pid. 232 * 233 * Return values 234 * NT_STATUS_SUCCESS lock access granted. 235 * NT_STATUS_FILE_LOCK_CONFLICT access denied due to lock conflict. 236 */ 237 int 238 smb_lock_range_access( 239 smb_request_t *sr, 240 smb_node_t *node, 241 uint64_t start, 242 uint64_t length, 243 boolean_t will_write) 244 { 245 smb_lock_t *lock; 246 smb_llist_t *llist; 247 int status = NT_STATUS_SUCCESS; 248 249 llist = &node->n_lock_list; 250 smb_llist_enter(llist, RW_READER); 251 /* Search for any applicable lock */ 252 for (lock = smb_llist_head(llist); 253 lock != NULL; 254 lock = smb_llist_next(llist, lock)) { 255 256 if (!smb_lock_range_overlap(lock, start, length)) 257 /* Lock does not overlap */ 258 continue; 259 260 if (lock->l_type == SMB_LOCK_TYPE_READONLY && !will_write) 261 continue; 262 263 if (lock->l_type == SMB_LOCK_TYPE_READWRITE && 264 lock->l_session_kid == sr->session->s_kid && 265 lock->l_pid == sr->smb_pid) 266 continue; 267 268 status = NT_STATUS_FILE_LOCK_CONFLICT; 269 break; 270 } 271 smb_llist_exit(llist); 272 return (status); 273 } 274 275 void 276 smb_node_destroy_lock_by_ofile(smb_node_t *node, smb_ofile_t *file) 277 { 278 smb_lock_t *lock; 279 smb_lock_t *nxtl; 280 list_t destroy_list; 281 282 ASSERT(node); 283 ASSERT(node->n_magic == SMB_NODE_MAGIC); 284 ASSERT(node->n_state == SMB_NODE_STATE_AVAILABLE); 285 ASSERT(node->n_refcnt); 286 287 /* 288 * Move locks matching the specified file from the node->n_lock_list 289 * to a temporary list (holding the lock the entire time) then 290 * destroy all the matching locks. We can't call smb_lock_destroy 291 * while we are holding the lock for node->n_lock_list because we will 292 * deadlock and we can't drop the lock because the list contents might 293 * change (for example nxtl might get removed on another thread). 294 */ 295 list_create(&destroy_list, sizeof (smb_lock_t), 296 offsetof(smb_lock_t, l_lnd)); 297 298 smb_llist_enter(&node->n_lock_list, RW_WRITER); 299 lock = smb_llist_head(&node->n_lock_list); 300 while (lock) { 301 nxtl = smb_llist_next(&node->n_lock_list, lock); 302 if (lock->l_file == file) { 303 smb_llist_remove(&node->n_lock_list, lock); 304 smb_lock_posix_unlock(node, lock, file->f_user->u_cred); 305 list_insert_tail(&destroy_list, lock); 306 } 307 lock = nxtl; 308 } 309 smb_llist_exit(&node->n_lock_list); 310 311 lock = list_head(&destroy_list); 312 while (lock) { 313 nxtl = list_next(&destroy_list, lock); 314 list_remove(&destroy_list, lock); 315 smb_lock_destroy(lock); 316 lock = nxtl; 317 } 318 319 list_destroy(&destroy_list); 320 } 321 322 void 323 smb_lock_range_error(smb_request_t *sr, uint32_t status32) 324 { 325 uint16_t errcode; 326 327 if (status32 == NT_STATUS_CANCELLED) 328 errcode = ERROR_OPERATION_ABORTED; 329 else 330 errcode = ERRlock; 331 332 smbsr_error(sr, status32, ERRDOS, errcode); 333 } 334 335 /* 336 * smb_range_check() 337 * 338 * Perform range checking. First check for internal CIFS range conflicts 339 * and then check for external conflicts, for example, with NFS or local 340 * access. 341 * 342 * If nbmand is enabled, this function must be called from within an nbmand 343 * critical region 344 */ 345 346 DWORD 347 smb_range_check(smb_request_t *sr, smb_node_t *node, uint64_t start, 348 uint64_t length, boolean_t will_write) 349 { 350 smb_error_t smberr; 351 int svmand; 352 int nbl_op; 353 int rc; 354 355 ASSERT(node); 356 ASSERT(node->n_magic == SMB_NODE_MAGIC); 357 ASSERT(node->n_state == SMB_NODE_STATE_AVAILABLE); 358 359 ASSERT(smb_node_in_crit(node)); 360 361 if (node->attr.sa_vattr.va_type == VDIR) 362 return (NT_STATUS_SUCCESS); 363 364 rc = smb_lock_range_access(sr, node, start, length, will_write); 365 if (rc) 366 return (NT_STATUS_FILE_LOCK_CONFLICT); 367 368 if ((rc = nbl_svmand(node->vp, kcred, &svmand)) != 0) { 369 smbsr_map_errno(rc, &smberr); 370 return (smberr.status); 371 } 372 373 nbl_op = (will_write) ? NBL_WRITE : NBL_READ; 374 375 if (nbl_lock_conflict(node->vp, nbl_op, start, length, svmand, &smb_ct)) 376 return (NT_STATUS_FILE_LOCK_CONFLICT); 377 378 return (NT_STATUS_SUCCESS); 379 } 380 381 /* 382 * smb_lock_posix_unlock 383 * 384 * checks if the current unlock request is in another lock and repeatedly calls 385 * smb_is_range_unlocked on a sliding basis to unlock all bits of the lock 386 * that are not in other locks 387 * 388 */ 389 static void 390 smb_lock_posix_unlock(smb_node_t *node, smb_lock_t *lock, cred_t *cr) 391 { 392 uint64_t new_mark; 393 uint64_t unlock_start; 394 uint64_t unlock_end; 395 smb_lock_t new_unlock; 396 smb_llist_t *llist; 397 boolean_t can_unlock; 398 399 new_mark = 0; 400 unlock_start = lock->l_start; 401 unlock_end = unlock_start + lock->l_length; 402 llist = &node->n_lock_list; 403 404 for (;;) { 405 can_unlock = smb_is_range_unlocked(unlock_start, unlock_end, 406 lock->l_file->f_uniqid, llist, &new_mark); 407 if (can_unlock) { 408 if (new_mark) { 409 new_unlock = *lock; 410 new_unlock.l_start = unlock_start; 411 new_unlock.l_length = new_mark - unlock_start; 412 (void) smb_fsop_frlock(node, &new_unlock, 413 B_TRUE, cr); 414 unlock_start = new_mark; 415 } else { 416 new_unlock = *lock; 417 new_unlock.l_start = unlock_start; 418 new_unlock.l_length = unlock_end - unlock_start; 419 (void) smb_fsop_frlock(node, &new_unlock, 420 B_TRUE, cr); 421 break; 422 } 423 } else if (new_mark) { 424 unlock_start = new_mark; 425 } else { 426 break; 427 } 428 } 429 } 430 431 /* 432 * smb_lock_range_overlap 433 * 434 * Checks if lock range(start, length) overlaps range in lock structure. 435 * 436 * Zero-length byte range locks actually affect no single byte of the stream, 437 * meaning they can still be accessed even with such locks in place. However, 438 * they do conflict with other ranges in the following manner: 439 * conflict will only exist if the positive-length range contains the 440 * zero-length range's offset but doesn't start at it 441 * 442 * return values: 443 * 0 - Lock range doesn't overlap 444 * 1 - Lock range overlaps. 445 */ 446 447 #define RANGE_NO_OVERLAP 0 448 #define RANGE_OVERLAP 1 449 450 static int 451 smb_lock_range_overlap(struct smb_lock *lock, uint64_t start, uint64_t length) 452 { 453 if (length == 0) { 454 if ((lock->l_start < start) && 455 ((lock->l_start + lock->l_length) > start)) 456 return (RANGE_OVERLAP); 457 458 return (RANGE_NO_OVERLAP); 459 } 460 461 /* The following test is intended to catch roll over locks. */ 462 if ((start == lock->l_start) && (length == lock->l_length)) 463 return (RANGE_OVERLAP); 464 465 if (start < lock->l_start) { 466 if (start + length > lock->l_start) 467 return (RANGE_OVERLAP); 468 } else if (start < lock->l_start + lock->l_length) 469 return (RANGE_OVERLAP); 470 471 return (RANGE_NO_OVERLAP); 472 } 473 474 /* 475 * smb_lock_range_lckrules 476 * 477 * Lock range rules: 478 * 1. Overlapping read locks are allowed if the 479 * current locks in the region are only read locks 480 * irrespective of pid of smb client issuing lock request. 481 * 482 * 2. Read lock in the overlapped region of write lock 483 * are allowed if the pervious lock is performed by the 484 * same pid and connection. 485 * 486 * return status: 487 * NT_STATUS_SUCCESS - Input lock range adapts to lock rules. 488 * NT_STATUS_LOCK_NOT_GRANTED - Input lock conflicts lock rules. 489 * NT_STATUS_CANCELLED - Error in processing lock rules 490 */ 491 static uint32_t 492 smb_lock_range_lckrules( 493 smb_request_t *sr, 494 smb_ofile_t *file, 495 smb_node_t *node, 496 smb_lock_t *dlock, 497 smb_lock_t **clockp) 498 { 499 smb_lock_t *lock; 500 uint32_t status = NT_STATUS_SUCCESS; 501 502 /* Check if file is closed */ 503 if (!smb_ofile_is_open(file)) { 504 return (NT_STATUS_RANGE_NOT_LOCKED); 505 } 506 507 /* Caller must hold lock for node->n_lock_list */ 508 for (lock = smb_llist_head(&node->n_lock_list); 509 lock != NULL; 510 lock = smb_llist_next(&node->n_lock_list, lock)) { 511 512 if (!smb_lock_range_overlap(lock, dlock->l_start, 513 dlock->l_length)) 514 continue; 515 516 /* 517 * Check to see if lock in the overlapping record 518 * is only read lock. Current finding is read 519 * locks can overlapped irrespective of pids. 520 */ 521 if ((lock->l_type == SMB_LOCK_TYPE_READONLY) && 522 (dlock->l_type == SMB_LOCK_TYPE_READONLY)) { 523 continue; 524 } 525 526 /* 527 * When the read lock overlaps write lock, check if 528 * allowed. 529 */ 530 if ((dlock->l_type == SMB_LOCK_TYPE_READONLY) && 531 !(lock->l_type == SMB_LOCK_TYPE_READONLY)) { 532 if (lock->l_file == sr->fid_ofile && 533 lock->l_session_kid == sr->session->s_kid && 534 lock->l_pid == sr->smb_pid && 535 lock->l_uid == sr->smb_uid) { 536 continue; 537 } 538 } 539 540 /* Conflict in overlapping lock element */ 541 *clockp = lock; 542 status = NT_STATUS_LOCK_NOT_GRANTED; 543 break; 544 } 545 546 return (status); 547 } 548 549 /* 550 * smb_lock_wait 551 * 552 * Wait operation for smb overlapping lock to be released. Caller must hold 553 * write lock for node->n_lock_list so that the set of active locks can't 554 * change unexpectedly. The lock for node->n_lock_list will be released 555 * within this function during the sleep after the lock dependency has 556 * been recorded. 557 * 558 * return value 559 * 560 * 0 The request was canceled. 561 * -1 The timeout was reached. 562 * >0 Condition met. 563 */ 564 static clock_t 565 smb_lock_wait(smb_request_t *sr, smb_lock_t *b_lock, smb_lock_t *c_lock) 566 { 567 clock_t rc; 568 569 ASSERT(sr->sr_awaiting == NULL); 570 571 mutex_enter(&sr->sr_mutex); 572 573 switch (sr->sr_state) { 574 case SMB_REQ_STATE_ACTIVE: 575 /* 576 * Wait up till the timeout time keeping track of actual 577 * time waited for possible retry failure. 578 */ 579 sr->sr_state = SMB_REQ_STATE_WAITING_LOCK; 580 sr->sr_awaiting = c_lock; 581 mutex_exit(&sr->sr_mutex); 582 583 mutex_enter(&c_lock->l_mutex); 584 /* 585 * The conflict list (l_conflict_list) for a lock contains 586 * all the locks that are blocked by and in conflict with 587 * that lock. Add the new lock to the conflict list for the 588 * active lock. 589 * 590 * l_conflict_list is currently a fancy way of representing 591 * the references/dependencies on a lock. It could be 592 * replaced with a reference count but this approach 593 * has the advantage that MDB can display the lock 594 * dependencies at any point in time. In the future 595 * we should be able to leverage the list to implement 596 * an asynchronous locking model. 597 * 598 * l_blocked_by is the reverse of the conflict list. It 599 * points to the lock that the new lock conflicts with. 600 * As currently implemented this value is purely for 601 * debug purposes -- there are windows of time when 602 * l_blocked_by may be non-NULL even though there is no 603 * conflict list 604 */ 605 b_lock->l_blocked_by = c_lock; 606 smb_slist_insert_tail(&c_lock->l_conflict_list, b_lock); 607 smb_llist_exit(&c_lock->l_file->f_node->n_lock_list); 608 609 /* 610 * XXX Hack.. drop s_lock to avoid blocking subsequent SMBs 611 * that might affect the state of this lock (i.e. 612 * smb_com_close). We shouldn't sleep while holding 613 * locks anyway. 614 */ 615 smb_rwx_rwexit(&sr->session->s_lock); 616 617 if (SMB_LOCK_INDEFINITE_WAIT(b_lock)) { 618 cv_wait(&c_lock->l_cv, &c_lock->l_mutex); 619 } else { 620 rc = cv_timedwait(&c_lock->l_cv, 621 &c_lock->l_mutex, b_lock->l_end_time); 622 } 623 624 /* 625 * XXX Hack continued from above... re-acquire s_lock 626 * OK to hardcode RW_READER since this is just a hack and 627 * we really should yank it out and do something else. 628 */ 629 smb_rwx_rwenter(&sr->session->s_lock, RW_READER); 630 631 mutex_exit(&c_lock->l_mutex); 632 633 smb_llist_enter(&c_lock->l_file->f_node->n_lock_list, 634 RW_WRITER); 635 smb_slist_remove(&c_lock->l_conflict_list, b_lock); 636 637 mutex_enter(&sr->sr_mutex); 638 sr->sr_awaiting = NULL; 639 if (sr->sr_state == SMB_REQ_STATE_CANCELED) { 640 rc = 0; 641 } else { 642 sr->sr_state = SMB_REQ_STATE_ACTIVE; 643 } 644 break; 645 646 default: 647 ASSERT(sr->sr_state == SMB_REQ_STATE_CANCELED); 648 rc = 0; 649 break; 650 } 651 mutex_exit(&sr->sr_mutex); 652 653 return (rc); 654 } 655 656 /* 657 * smb_lock_range_ulckrules 658 * 659 * 1. Unlock should be performed at exactly matching ends. 660 * This has been changed because overlapping ends is 661 * allowed and there is no other precise way of locating 662 * lock entity in node lock list. 663 * 664 * 2. Unlock is failed if there is no corresponding lock exists. 665 * 666 * Return values 667 * 668 * NT_STATUS_SUCCESS Unlock request matches lock record 669 * pointed by 'nodelock' lock structure. 670 * 671 * NT_STATUS_RANGE_NOT_LOCKED Unlock request doen't match any 672 * of lock record in node lock request or 673 * error in unlock range processing. 674 */ 675 static uint32_t 676 smb_lock_range_ulckrules( 677 smb_request_t *sr, 678 smb_node_t *node, 679 uint64_t start, 680 uint64_t length, 681 smb_lock_t **nodelock) 682 { 683 smb_lock_t *lock; 684 uint32_t status = NT_STATUS_RANGE_NOT_LOCKED; 685 686 /* Caller must hold lock for node->n_lock_list */ 687 for (lock = smb_llist_head(&node->n_lock_list); 688 lock != NULL; 689 lock = smb_llist_next(&node->n_lock_list, lock)) { 690 691 if ((start == lock->l_start) && 692 (length == lock->l_length) && 693 lock->l_file == sr->fid_ofile && 694 lock->l_session_kid == sr->session->s_kid && 695 lock->l_pid == sr->smb_pid && 696 lock->l_uid == sr->smb_uid) { 697 *nodelock = lock; 698 status = NT_STATUS_SUCCESS; 699 break; 700 } 701 } 702 703 return (status); 704 } 705 706 static smb_lock_t * 707 smb_lock_create( 708 smb_request_t *sr, 709 uint64_t start, 710 uint64_t length, 711 uint32_t locktype, 712 uint32_t timeout) 713 { 714 smb_lock_t *lock; 715 716 ASSERT(locktype == SMB_LOCK_TYPE_READWRITE || 717 locktype == SMB_LOCK_TYPE_READONLY); 718 719 lock = kmem_zalloc(sizeof (smb_lock_t), KM_SLEEP); 720 lock->l_magic = SMB_LOCK_MAGIC; 721 lock->l_sr = sr; /* Invalid after lock is active */ 722 lock->l_session_kid = sr->session->s_kid; 723 lock->l_session = sr->session; 724 lock->l_file = sr->fid_ofile; 725 lock->l_uid = sr->smb_uid; 726 lock->l_pid = sr->smb_pid; 727 lock->l_type = locktype; 728 lock->l_start = start; 729 lock->l_length = length; 730 /* 731 * Calculate the absolute end time so that we can use it 732 * in cv_timedwait. 733 */ 734 lock->l_end_time = lbolt + MSEC_TO_TICK(timeout); 735 if (timeout == UINT_MAX) 736 lock->l_flags |= SMB_LOCK_FLAG_INDEFINITE; 737 738 mutex_init(&lock->l_mutex, NULL, MUTEX_DEFAULT, NULL); 739 cv_init(&lock->l_cv, NULL, CV_DEFAULT, NULL); 740 smb_slist_constructor(&lock->l_conflict_list, sizeof (smb_lock_t), 741 offsetof(smb_lock_t, l_conflict_lnd)); 742 743 return (lock); 744 } 745 746 static void 747 smb_lock_free(smb_lock_t *lock) 748 { 749 smb_slist_destructor(&lock->l_conflict_list); 750 cv_destroy(&lock->l_cv); 751 mutex_destroy(&lock->l_mutex); 752 753 kmem_free(lock, sizeof (smb_lock_t)); 754 } 755 756 /* 757 * smb_lock_destroy 758 * 759 * Caller must hold node->n_lock_list 760 */ 761 static void 762 smb_lock_destroy(smb_lock_t *lock) 763 { 764 /* 765 * Caller must hold node->n_lock_list lock. 766 */ 767 mutex_enter(&lock->l_mutex); 768 cv_broadcast(&lock->l_cv); 769 mutex_exit(&lock->l_mutex); 770 771 /* 772 * The cv_broadcast above should wake up any locks that previous 773 * had conflicts with this lock. Wait for the locking threads 774 * to remove their references to this lock. 775 */ 776 smb_slist_wait_for_empty(&lock->l_conflict_list); 777 778 smb_lock_free(lock); 779 } 780 781 /* 782 * smb_is_range_unlocked 783 * 784 * Checks if the current unlock byte range request overlaps another lock 785 * This function is used to determine where POSIX unlocks should be 786 * applied. 787 * 788 * The return code and the value of new_mark must be interpreted as 789 * follows: 790 * 791 * B_TRUE and (new_mark == 0): 792 * This is the last or only lock left to be unlocked 793 * 794 * B_TRUE and (new_mark > 0): 795 * The range from start to new_mark can be unlocked 796 * 797 * B_FALSE and (new_mark == 0): 798 * The unlock can't be performed and we are done 799 * 800 * B_FALSE and (new_mark > 0), 801 * The range from start to new_mark can't be unlocked 802 * Start should be reset to new_mark for the next pass 803 */ 804 805 static boolean_t 806 smb_is_range_unlocked(uint64_t start, uint64_t end, uint32_t uniqid, 807 smb_llist_t *llist_head, uint64_t *new_mark) 808 { 809 struct smb_lock *lk = NULL; 810 uint64_t low_water_mark = MAXOFFSET_T; 811 uint64_t lk_start; 812 uint64_t lk_end; 813 814 *new_mark = 0; 815 lk = smb_llist_head(llist_head); 816 while (lk) { 817 if (lk->l_length == 0) { 818 lk = smb_llist_next(llist_head, lk); 819 continue; 820 } 821 822 if (lk->l_file->f_uniqid != uniqid) { 823 lk = smb_llist_next(llist_head, lk); 824 continue; 825 } 826 827 lk_end = lk->l_start + lk->l_length - 1; 828 lk_start = lk->l_start; 829 830 /* 831 * there is no overlap for the first 2 cases 832 * check next node 833 */ 834 if (lk_end < start) { 835 lk = smb_llist_next(llist_head, lk); 836 continue; 837 } 838 if (lk_start > end) { 839 lk = smb_llist_next(llist_head, lk); 840 continue; 841 } 842 843 /* this range is completely locked */ 844 if ((lk_start <= start) && (lk_end >= end)) { 845 return (B_FALSE); 846 } 847 848 /* the first part of this range is locked */ 849 if ((start >= lk_start) && (start <= lk_end)) { 850 if (end > lk_end) 851 *new_mark = lk_end + 1; 852 return (B_FALSE); 853 } 854 855 /* this piece is unlocked */ 856 if ((lk_start >= start) && (lk_start <= end)) { 857 if (low_water_mark > lk_start) 858 low_water_mark = lk_start; 859 } 860 861 lk = smb_llist_next(llist_head, lk); 862 } 863 864 if (low_water_mark != MAXOFFSET_T) { 865 *new_mark = low_water_mark; 866 return (B_TRUE); 867 } 868 /* the range is completely unlocked */ 869 return (B_TRUE); 870 } 871