1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 24 */ 25 26 /* 27 * This module provides range lock functionality for CIFS/SMB clients. 28 * Lock range service functions process SMB lock and and unlock 29 * requests for a file by applying lock rules and marks file range 30 * as locked if the lock is successful otherwise return proper 31 * error code. 32 */ 33 34 #include <smbsrv/smb_kproto.h> 35 #include <smbsrv/smb_fsops.h> 36 #include <sys/nbmlock.h> 37 #include <sys/param.h> 38 39 extern caller_context_t smb_ct; 40 41 static void smb_lock_posix_unlock(smb_node_t *, smb_lock_t *, cred_t *); 42 static boolean_t smb_is_range_unlocked(uint64_t, uint64_t, uint32_t, 43 smb_llist_t *, uint64_t *); 44 static int smb_lock_range_overlap(smb_lock_t *, uint64_t, uint64_t); 45 static uint32_t smb_lock_range_lckrules(smb_request_t *, smb_ofile_t *, 46 smb_node_t *, smb_lock_t *, smb_lock_t **); 47 static clock_t smb_lock_wait(smb_request_t *, smb_lock_t *, smb_lock_t *); 48 static uint32_t smb_lock_range_ulckrules(smb_request_t *, smb_node_t *, 49 uint64_t, uint64_t, smb_lock_t **nodelock); 50 static smb_lock_t *smb_lock_create(smb_request_t *, uint64_t, uint64_t, 51 uint32_t, uint32_t); 52 static void smb_lock_destroy(smb_lock_t *); 53 static void smb_lock_free(smb_lock_t *); 54 55 /* 56 * Return the number of range locks on the specified ofile. 57 */ 58 uint32_t 59 smb_lock_get_lock_count(smb_node_t *node, smb_ofile_t *of) 60 { 61 smb_lock_t *lock; 62 smb_llist_t *llist; 63 uint32_t count = 0; 64 65 SMB_NODE_VALID(node); 66 SMB_OFILE_VALID(of); 67 68 llist = &node->n_lock_list; 69 70 smb_llist_enter(llist, RW_READER); 71 for (lock = smb_llist_head(llist); 72 lock != NULL; 73 lock = smb_llist_next(llist, lock)) { 74 if (lock->l_file == of) 75 ++count; 76 } 77 smb_llist_exit(llist); 78 79 return (count); 80 } 81 82 /* 83 * smb_unlock_range 84 * 85 * locates lock range performed for corresponding to unlock request. 86 * 87 * NT_STATUS_SUCCESS - Lock range performed successfully. 88 * !NT_STATUS_SUCCESS - Error in unlock range operation. 89 */ 90 uint32_t 91 smb_unlock_range( 92 smb_request_t *sr, 93 smb_node_t *node, 94 uint64_t start, 95 uint64_t length) 96 { 97 smb_lock_t *lock = NULL; 98 uint32_t status; 99 100 /* Apply unlocking rules */ 101 smb_llist_enter(&node->n_lock_list, RW_WRITER); 102 status = smb_lock_range_ulckrules(sr, node, start, length, &lock); 103 if (status != NT_STATUS_SUCCESS) { 104 /* 105 * If lock range is not matching in the list 106 * return error. 107 */ 108 ASSERT(lock == NULL); 109 smb_llist_exit(&node->n_lock_list); 110 return (status); 111 } 112 113 smb_llist_remove(&node->n_lock_list, lock); 114 smb_lock_posix_unlock(node, lock, sr->user_cr); 115 smb_llist_exit(&node->n_lock_list); 116 smb_lock_destroy(lock); 117 118 return (status); 119 } 120 121 /* 122 * smb_lock_range 123 * 124 * Checks for integrity of file lock operation for the given range of file data. 125 * This is performed by applying lock rules with all the elements of the node 126 * lock list. 127 * 128 * Break shared (levelII) oplocks. If there is an exclusive oplock, it is 129 * owned by this ofile and therefore should not be broken. 130 * 131 * The function returns with new lock added if lock request is non-conflicting 132 * with existing range lock for the file. Otherwise smb request is filed 133 * without returning. 134 * 135 * NT_STATUS_SUCCESS - Lock range performed successfully. 136 * !NT_STATUS_SUCCESS - Error in lock range operation. 137 */ 138 uint32_t 139 smb_lock_range( 140 smb_request_t *sr, 141 uint64_t start, 142 uint64_t length, 143 uint32_t timeout, 144 uint32_t locktype) 145 { 146 smb_ofile_t *file = sr->fid_ofile; 147 smb_node_t *node = file->f_node; 148 smb_lock_t *lock; 149 smb_lock_t *clock = NULL; 150 uint32_t result = NT_STATUS_SUCCESS; 151 boolean_t lock_has_timeout = 152 (timeout != 0 && timeout != UINT_MAX); 153 154 lock = smb_lock_create(sr, start, length, locktype, timeout); 155 156 smb_llist_enter(&node->n_lock_list, RW_WRITER); 157 for (;;) { 158 clock_t rc; 159 160 /* Apply locking rules */ 161 result = smb_lock_range_lckrules(sr, file, node, lock, &clock); 162 163 if ((result == NT_STATUS_CANCELLED) || 164 (result == NT_STATUS_SUCCESS) || 165 (result == NT_STATUS_RANGE_NOT_LOCKED)) { 166 ASSERT(clock == NULL); 167 break; 168 } else if (timeout == 0) { 169 break; 170 } 171 172 ASSERT(result == NT_STATUS_LOCK_NOT_GRANTED); 173 ASSERT(clock); 174 /* 175 * Call smb_lock_wait holding write lock for 176 * node lock list. smb_lock_wait will release 177 * this lock if it blocks. 178 */ 179 ASSERT(node == clock->l_file->f_node); 180 181 rc = smb_lock_wait(sr, lock, clock); 182 if (rc == 0) { 183 result = NT_STATUS_CANCELLED; 184 break; 185 } 186 if (rc == -1) 187 timeout = 0; 188 189 clock = NULL; 190 } 191 192 lock->l_blocked_by = NULL; 193 194 if (result != NT_STATUS_SUCCESS) { 195 /* 196 * Under certain conditions NT_STATUS_FILE_LOCK_CONFLICT 197 * should be returned instead of NT_STATUS_LOCK_NOT_GRANTED. 198 * All of this appears to be specific to SMB1 199 */ 200 if (sr->session->dialect <= NT_LM_0_12 && 201 result == NT_STATUS_LOCK_NOT_GRANTED) { 202 /* 203 * Locks with timeouts always return 204 * NT_STATUS_FILE_LOCK_CONFLICT 205 */ 206 if (lock_has_timeout) 207 result = NT_STATUS_FILE_LOCK_CONFLICT; 208 209 /* 210 * Locks starting higher than 0xef000000 that do not 211 * have the MSB set always return 212 * NT_STATUS_FILE_LOCK_CONFLICT 213 */ 214 if ((lock->l_start >= 0xef000000) && 215 !(lock->l_start & (1ULL << 63))) { 216 result = NT_STATUS_FILE_LOCK_CONFLICT; 217 } 218 219 /* 220 * If the last lock attempt to fail on this file handle 221 * started at the same offset as this one then return 222 * NT_STATUS_FILE_LOCK_CONFLICT 223 */ 224 mutex_enter(&file->f_mutex); 225 if ((file->f_flags & SMB_OFLAGS_LLF_POS_VALID) && 226 (lock->l_start == file->f_llf_pos)) { 227 result = NT_STATUS_FILE_LOCK_CONFLICT; 228 } 229 mutex_exit(&file->f_mutex); 230 } 231 232 /* Update last lock failed offset */ 233 mutex_enter(&file->f_mutex); 234 file->f_llf_pos = lock->l_start; 235 file->f_flags |= SMB_OFLAGS_LLF_POS_VALID; 236 mutex_exit(&file->f_mutex); 237 238 smb_lock_free(lock); 239 } else { 240 /* 241 * don't insert into the CIFS lock list unless the 242 * posix lock worked 243 */ 244 if (smb_fsop_frlock(node, lock, B_FALSE, sr->user_cr)) 245 result = NT_STATUS_FILE_LOCK_CONFLICT; 246 else 247 smb_llist_insert_tail(&node->n_lock_list, lock); 248 } 249 smb_llist_exit(&node->n_lock_list); 250 251 if (result == NT_STATUS_SUCCESS) 252 smb_oplock_break_levelII(node); 253 254 return (result); 255 } 256 257 258 /* 259 * smb_lock_range_access 260 * 261 * scans node lock list 262 * to check if there is any overlapping lock. Overlapping 263 * lock is allowed only under same session and client pid. 264 * 265 * Return values 266 * NT_STATUS_SUCCESS lock access granted. 267 * NT_STATUS_FILE_LOCK_CONFLICT access denied due to lock conflict. 268 */ 269 int 270 smb_lock_range_access( 271 smb_request_t *sr, 272 smb_node_t *node, 273 uint64_t start, 274 uint64_t length, /* zero means to EoF */ 275 boolean_t will_write) 276 { 277 smb_lock_t *lock; 278 smb_llist_t *llist; 279 int status = NT_STATUS_SUCCESS; 280 281 llist = &node->n_lock_list; 282 smb_llist_enter(llist, RW_READER); 283 /* Search for any applicable lock */ 284 for (lock = smb_llist_head(llist); 285 lock != NULL; 286 lock = smb_llist_next(llist, lock)) { 287 288 if (!smb_lock_range_overlap(lock, start, length)) 289 /* Lock does not overlap */ 290 continue; 291 292 if (lock->l_type == SMB_LOCK_TYPE_READONLY && !will_write) 293 continue; 294 295 if (lock->l_type == SMB_LOCK_TYPE_READWRITE && 296 lock->l_session_kid == sr->session->s_kid && 297 lock->l_pid == sr->smb_pid) 298 continue; 299 300 status = NT_STATUS_FILE_LOCK_CONFLICT; 301 break; 302 } 303 smb_llist_exit(llist); 304 return (status); 305 } 306 307 void 308 smb_node_destroy_lock_by_ofile(smb_node_t *node, smb_ofile_t *file) 309 { 310 smb_lock_t *lock; 311 smb_lock_t *nxtl; 312 list_t destroy_list; 313 314 SMB_NODE_VALID(node); 315 ASSERT(node->n_refcnt); 316 317 /* 318 * Move locks matching the specified file from the node->n_lock_list 319 * to a temporary list (holding the lock the entire time) then 320 * destroy all the matching locks. We can't call smb_lock_destroy 321 * while we are holding the lock for node->n_lock_list because we will 322 * deadlock and we can't drop the lock because the list contents might 323 * change (for example nxtl might get removed on another thread). 324 */ 325 list_create(&destroy_list, sizeof (smb_lock_t), 326 offsetof(smb_lock_t, l_lnd)); 327 328 smb_llist_enter(&node->n_lock_list, RW_WRITER); 329 lock = smb_llist_head(&node->n_lock_list); 330 while (lock) { 331 nxtl = smb_llist_next(&node->n_lock_list, lock); 332 if (lock->l_file == file) { 333 smb_llist_remove(&node->n_lock_list, lock); 334 smb_lock_posix_unlock(node, lock, file->f_user->u_cred); 335 list_insert_tail(&destroy_list, lock); 336 } 337 lock = nxtl; 338 } 339 smb_llist_exit(&node->n_lock_list); 340 341 lock = list_head(&destroy_list); 342 while (lock) { 343 nxtl = list_next(&destroy_list, lock); 344 list_remove(&destroy_list, lock); 345 smb_lock_destroy(lock); 346 lock = nxtl; 347 } 348 349 list_destroy(&destroy_list); 350 } 351 352 void 353 smb_lock_range_error(smb_request_t *sr, uint32_t status32) 354 { 355 uint16_t errcode; 356 357 if (status32 == NT_STATUS_CANCELLED) 358 errcode = ERROR_OPERATION_ABORTED; 359 else 360 errcode = ERRlock; 361 362 smbsr_error(sr, status32, ERRDOS, errcode); 363 } 364 365 /* 366 * An SMB variant of nbl_conflict(). 367 * 368 * SMB prevents remove or rename when conflicting locks exist 369 * (unlike NFS, which is why we can't just use nbl_conflict). 370 * 371 * Returns: 372 * NT_STATUS_SHARING_VIOLATION - nbl_share_conflict 373 * NT_STATUS_FILE_LOCK_CONFLICT - nbl_lock_conflict 374 * NT_STATUS_SUCCESS - operation can proceed 375 * 376 * NB: This function used to also check the list of ofiles, 377 * via: smb_lock_range_access() but we _can't_ do that here 378 * due to lock order constraints between node->n_lock_list 379 * and node->vp->vnbllock (taken via nvl_start_crit). 380 * They must be taken in that order, and in here, we 381 * already hold vp->vnbllock. 382 */ 383 DWORD 384 smb_nbl_conflict(smb_node_t *node, uint64_t off, uint64_t len, nbl_op_t op) 385 { 386 int svmand; 387 388 SMB_NODE_VALID(node); 389 ASSERT(smb_node_in_crit(node)); 390 ASSERT(op == NBL_READ || op == NBL_WRITE || op == NBL_READWRITE || 391 op == NBL_REMOVE || op == NBL_RENAME); 392 393 if (smb_node_is_dir(node)) 394 return (NT_STATUS_SUCCESS); 395 396 if (nbl_share_conflict(node->vp, op, &smb_ct)) 397 return (NT_STATUS_SHARING_VIOLATION); 398 399 /* 400 * When checking for lock conflicts, rename and remove 401 * are not allowed, so treat those as read/write. 402 */ 403 if (op == NBL_RENAME || op == NBL_REMOVE) 404 op = NBL_READWRITE; 405 406 if (nbl_svmand(node->vp, zone_kcred(), &svmand)) 407 svmand = 1; 408 409 if (nbl_lock_conflict(node->vp, op, off, len, svmand, &smb_ct)) 410 return (NT_STATUS_FILE_LOCK_CONFLICT); 411 412 return (NT_STATUS_SUCCESS); 413 } 414 415 /* 416 * smb_lock_posix_unlock 417 * 418 * checks if the current unlock request is in another lock and repeatedly calls 419 * smb_is_range_unlocked on a sliding basis to unlock all bits of the lock 420 * that are not in other locks 421 * 422 */ 423 static void 424 smb_lock_posix_unlock(smb_node_t *node, smb_lock_t *lock, cred_t *cr) 425 { 426 uint64_t new_mark; 427 uint64_t unlock_start; 428 uint64_t unlock_end; 429 smb_lock_t new_unlock; 430 smb_llist_t *llist; 431 boolean_t can_unlock; 432 433 new_mark = 0; 434 unlock_start = lock->l_start; 435 unlock_end = unlock_start + lock->l_length; 436 llist = &node->n_lock_list; 437 438 for (;;) { 439 can_unlock = smb_is_range_unlocked(unlock_start, unlock_end, 440 lock->l_file->f_uniqid, llist, &new_mark); 441 if (can_unlock) { 442 if (new_mark) { 443 new_unlock = *lock; 444 new_unlock.l_start = unlock_start; 445 new_unlock.l_length = new_mark - unlock_start; 446 (void) smb_fsop_frlock(node, &new_unlock, 447 B_TRUE, cr); 448 unlock_start = new_mark; 449 } else { 450 new_unlock = *lock; 451 new_unlock.l_start = unlock_start; 452 new_unlock.l_length = unlock_end - unlock_start; 453 (void) smb_fsop_frlock(node, &new_unlock, 454 B_TRUE, cr); 455 break; 456 } 457 } else if (new_mark) { 458 unlock_start = new_mark; 459 } else { 460 break; 461 } 462 } 463 } 464 465 /* 466 * smb_lock_range_overlap 467 * 468 * Checks if lock range(start, length) overlaps range in lock structure. 469 * 470 * Zero-length byte range locks actually affect no single byte of the stream, 471 * meaning they can still be accessed even with such locks in place. However, 472 * they do conflict with other ranges in the following manner: 473 * conflict will only exist if the positive-length range contains the 474 * zero-length range's offset but doesn't start at it 475 * 476 * return values: 477 * 0 - Lock range doesn't overlap 478 * 1 - Lock range overlaps. 479 */ 480 481 #define RANGE_NO_OVERLAP 0 482 #define RANGE_OVERLAP 1 483 484 static int 485 smb_lock_range_overlap(struct smb_lock *lock, uint64_t start, uint64_t length) 486 { 487 if (length == 0) { 488 if ((lock->l_start < start) && 489 ((lock->l_start + lock->l_length) > start)) 490 return (RANGE_OVERLAP); 491 492 return (RANGE_NO_OVERLAP); 493 } 494 495 /* The following test is intended to catch roll over locks. */ 496 if ((start == lock->l_start) && (length == lock->l_length)) 497 return (RANGE_OVERLAP); 498 499 if (start < lock->l_start) { 500 if (start + length > lock->l_start) 501 return (RANGE_OVERLAP); 502 } else if (start < lock->l_start + lock->l_length) 503 return (RANGE_OVERLAP); 504 505 return (RANGE_NO_OVERLAP); 506 } 507 508 /* 509 * smb_lock_range_lckrules 510 * 511 * Lock range rules: 512 * 1. Overlapping read locks are allowed if the 513 * current locks in the region are only read locks 514 * irrespective of pid of smb client issuing lock request. 515 * 516 * 2. Read lock in the overlapped region of write lock 517 * are allowed if the pervious lock is performed by the 518 * same pid and connection. 519 * 520 * return status: 521 * NT_STATUS_SUCCESS - Input lock range adapts to lock rules. 522 * NT_STATUS_LOCK_NOT_GRANTED - Input lock conflicts lock rules. 523 * NT_STATUS_CANCELLED - Error in processing lock rules 524 */ 525 static uint32_t 526 smb_lock_range_lckrules( 527 smb_request_t *sr, 528 smb_ofile_t *file, 529 smb_node_t *node, 530 smb_lock_t *dlock, 531 smb_lock_t **clockp) 532 { 533 smb_lock_t *lock; 534 uint32_t status = NT_STATUS_SUCCESS; 535 536 /* Check if file is closed */ 537 if (!smb_ofile_is_open(file)) { 538 return (NT_STATUS_RANGE_NOT_LOCKED); 539 } 540 541 /* Caller must hold lock for node->n_lock_list */ 542 for (lock = smb_llist_head(&node->n_lock_list); 543 lock != NULL; 544 lock = smb_llist_next(&node->n_lock_list, lock)) { 545 546 if (!smb_lock_range_overlap(lock, dlock->l_start, 547 dlock->l_length)) 548 continue; 549 550 /* 551 * Check to see if lock in the overlapping record 552 * is only read lock. Current finding is read 553 * locks can overlapped irrespective of pids. 554 */ 555 if ((lock->l_type == SMB_LOCK_TYPE_READONLY) && 556 (dlock->l_type == SMB_LOCK_TYPE_READONLY)) { 557 continue; 558 } 559 560 /* 561 * When the read lock overlaps write lock, check if 562 * allowed. 563 */ 564 if ((dlock->l_type == SMB_LOCK_TYPE_READONLY) && 565 !(lock->l_type == SMB_LOCK_TYPE_READONLY)) { 566 if (lock->l_file == sr->fid_ofile && 567 lock->l_session_kid == sr->session->s_kid && 568 lock->l_pid == sr->smb_pid && 569 lock->l_uid == sr->smb_uid) { 570 continue; 571 } 572 } 573 574 /* Conflict in overlapping lock element */ 575 *clockp = lock; 576 status = NT_STATUS_LOCK_NOT_GRANTED; 577 break; 578 } 579 580 return (status); 581 } 582 583 /* 584 * Cancel method for smb_lock_wait() 585 * 586 * This request is waiting on a lock. Wakeup everything 587 * waiting on the lock so that the relevant thread regains 588 * control and notices that is has been cancelled. The 589 * other lock request threads waiting on this lock will go 590 * back to sleep when they discover they are still blocked. 591 */ 592 static void 593 smb_lock_cancel(smb_request_t *sr) 594 { 595 smb_lock_t *lock = sr->cancel_arg2; 596 597 ASSERT(lock != NULL); 598 cv_broadcast(&lock->l_cv); 599 } 600 601 /* 602 * smb_lock_wait 603 * 604 * Wait operation for smb overlapping lock to be released. Caller must hold 605 * write lock for node->n_lock_list so that the set of active locks can't 606 * change unexpectedly. The lock for node->n_lock_list will be released 607 * within this function during the sleep after the lock dependency has 608 * been recorded. 609 * 610 * return value 611 * 612 * 0 The request was cancelled. 613 * -1 The timeout was reached. 614 * >0 Condition met. 615 */ 616 static clock_t 617 smb_lock_wait(smb_request_t *sr, smb_lock_t *b_lock, smb_lock_t *c_lock) 618 { 619 clock_t rc = 0; 620 621 mutex_enter(&sr->sr_mutex); 622 if (sr->sr_state != SMB_REQ_STATE_ACTIVE) { 623 mutex_exit(&sr->sr_mutex); 624 return (0); /* cancelled */ 625 } 626 627 /* 628 * Wait up till the timeout time keeping track of actual 629 * time waited for possible retry failure. 630 */ 631 sr->sr_state = SMB_REQ_STATE_WAITING_LOCK; 632 sr->cancel_method = smb_lock_cancel; 633 sr->cancel_arg2 = c_lock; 634 mutex_exit(&sr->sr_mutex); 635 636 mutex_enter(&c_lock->l_mutex); 637 /* 638 * The conflict list (l_conflict_list) for a lock contains 639 * all the locks that are blocked by and in conflict with 640 * that lock. Add the new lock to the conflict list for the 641 * active lock. 642 * 643 * l_conflict_list is currently a fancy way of representing 644 * the references/dependencies on a lock. It could be 645 * replaced with a reference count but this approach 646 * has the advantage that MDB can display the lock 647 * dependencies at any point in time. In the future 648 * we should be able to leverage the list to implement 649 * an asynchronous locking model. 650 * 651 * l_blocked_by is the reverse of the conflict list. It 652 * points to the lock that the new lock conflicts with. 653 * As currently implemented this value is purely for 654 * debug purposes -- there are windows of time when 655 * l_blocked_by may be non-NULL even though there is no 656 * conflict list 657 */ 658 b_lock->l_blocked_by = c_lock; 659 smb_slist_insert_tail(&c_lock->l_conflict_list, b_lock); 660 smb_llist_exit(&c_lock->l_file->f_node->n_lock_list); 661 662 if (SMB_LOCK_INDEFINITE_WAIT(b_lock)) { 663 cv_wait(&c_lock->l_cv, &c_lock->l_mutex); 664 } else { 665 rc = cv_timedwait(&c_lock->l_cv, 666 &c_lock->l_mutex, b_lock->l_end_time); 667 } 668 669 mutex_exit(&c_lock->l_mutex); 670 671 smb_llist_enter(&c_lock->l_file->f_node->n_lock_list, RW_WRITER); 672 smb_slist_remove(&c_lock->l_conflict_list, b_lock); 673 674 mutex_enter(&sr->sr_mutex); 675 sr->cancel_method = NULL; 676 sr->cancel_arg2 = NULL; 677 678 switch (sr->sr_state) { 679 case SMB_REQ_STATE_WAITING_LOCK: 680 /* normal wakeup, rc from above */ 681 sr->sr_state = SMB_REQ_STATE_ACTIVE; 682 break; 683 684 case SMB_REQ_STATE_CANCEL_PENDING: 685 /* Cancelled via smb_lock_cancel */ 686 sr->sr_state = SMB_REQ_STATE_CANCELLED; 687 rc = 0; 688 break; 689 690 case SMB_REQ_STATE_CANCELLED: 691 /* Cancelled before this function ran. */ 692 rc = 0; 693 break; 694 695 default: 696 rc = 0; 697 break; 698 } 699 mutex_exit(&sr->sr_mutex); 700 701 return (rc); 702 } 703 704 /* 705 * smb_lock_range_ulckrules 706 * 707 * 1. Unlock should be performed at exactly matching ends. 708 * This has been changed because overlapping ends is 709 * allowed and there is no other precise way of locating 710 * lock entity in node lock list. 711 * 712 * 2. Unlock is failed if there is no corresponding lock exists. 713 * 714 * Return values 715 * 716 * NT_STATUS_SUCCESS Unlock request matches lock record 717 * pointed by 'nodelock' lock structure. 718 * 719 * NT_STATUS_RANGE_NOT_LOCKED Unlock request doen't match any 720 * of lock record in node lock request or 721 * error in unlock range processing. 722 */ 723 static uint32_t 724 smb_lock_range_ulckrules( 725 smb_request_t *sr, 726 smb_node_t *node, 727 uint64_t start, 728 uint64_t length, 729 smb_lock_t **nodelock) 730 { 731 smb_lock_t *lock; 732 uint32_t status = NT_STATUS_RANGE_NOT_LOCKED; 733 734 /* Caller must hold lock for node->n_lock_list */ 735 for (lock = smb_llist_head(&node->n_lock_list); 736 lock != NULL; 737 lock = smb_llist_next(&node->n_lock_list, lock)) { 738 739 if ((start == lock->l_start) && 740 (length == lock->l_length) && 741 lock->l_file == sr->fid_ofile && 742 lock->l_session_kid == sr->session->s_kid && 743 lock->l_pid == sr->smb_pid && 744 lock->l_uid == sr->smb_uid) { 745 *nodelock = lock; 746 status = NT_STATUS_SUCCESS; 747 break; 748 } 749 } 750 751 return (status); 752 } 753 754 static smb_lock_t * 755 smb_lock_create( 756 smb_request_t *sr, 757 uint64_t start, 758 uint64_t length, 759 uint32_t locktype, 760 uint32_t timeout) 761 { 762 smb_lock_t *lock; 763 764 ASSERT(locktype == SMB_LOCK_TYPE_READWRITE || 765 locktype == SMB_LOCK_TYPE_READONLY); 766 767 lock = kmem_zalloc(sizeof (smb_lock_t), KM_SLEEP); 768 lock->l_magic = SMB_LOCK_MAGIC; 769 lock->l_sr = sr; /* Invalid after lock is active */ 770 lock->l_session_kid = sr->session->s_kid; 771 lock->l_session = sr->session; 772 lock->l_file = sr->fid_ofile; 773 lock->l_uid = sr->smb_uid; 774 lock->l_pid = sr->smb_pid; 775 lock->l_type = locktype; 776 lock->l_start = start; 777 lock->l_length = length; 778 /* 779 * Calculate the absolute end time so that we can use it 780 * in cv_timedwait. 781 */ 782 lock->l_end_time = ddi_get_lbolt() + MSEC_TO_TICK(timeout); 783 if (timeout == UINT_MAX) 784 lock->l_flags |= SMB_LOCK_FLAG_INDEFINITE; 785 786 mutex_init(&lock->l_mutex, NULL, MUTEX_DEFAULT, NULL); 787 cv_init(&lock->l_cv, NULL, CV_DEFAULT, NULL); 788 smb_slist_constructor(&lock->l_conflict_list, sizeof (smb_lock_t), 789 offsetof(smb_lock_t, l_conflict_lnd)); 790 791 return (lock); 792 } 793 794 static void 795 smb_lock_free(smb_lock_t *lock) 796 { 797 smb_slist_destructor(&lock->l_conflict_list); 798 cv_destroy(&lock->l_cv); 799 mutex_destroy(&lock->l_mutex); 800 801 kmem_free(lock, sizeof (smb_lock_t)); 802 } 803 804 /* 805 * smb_lock_destroy 806 * 807 * Caller must hold node->n_lock_list 808 */ 809 static void 810 smb_lock_destroy(smb_lock_t *lock) 811 { 812 /* 813 * Caller must hold node->n_lock_list lock. 814 */ 815 mutex_enter(&lock->l_mutex); 816 cv_broadcast(&lock->l_cv); 817 mutex_exit(&lock->l_mutex); 818 819 /* 820 * The cv_broadcast above should wake up any locks that previous 821 * had conflicts with this lock. Wait for the locking threads 822 * to remove their references to this lock. 823 */ 824 smb_slist_wait_for_empty(&lock->l_conflict_list); 825 826 smb_lock_free(lock); 827 } 828 829 /* 830 * smb_is_range_unlocked 831 * 832 * Checks if the current unlock byte range request overlaps another lock 833 * This function is used to determine where POSIX unlocks should be 834 * applied. 835 * 836 * The return code and the value of new_mark must be interpreted as 837 * follows: 838 * 839 * B_TRUE and (new_mark == 0): 840 * This is the last or only lock left to be unlocked 841 * 842 * B_TRUE and (new_mark > 0): 843 * The range from start to new_mark can be unlocked 844 * 845 * B_FALSE and (new_mark == 0): 846 * The unlock can't be performed and we are done 847 * 848 * B_FALSE and (new_mark > 0), 849 * The range from start to new_mark can't be unlocked 850 * Start should be reset to new_mark for the next pass 851 */ 852 853 static boolean_t 854 smb_is_range_unlocked(uint64_t start, uint64_t end, uint32_t uniqid, 855 smb_llist_t *llist_head, uint64_t *new_mark) 856 { 857 struct smb_lock *lk = NULL; 858 uint64_t low_water_mark = MAXOFFSET_T; 859 uint64_t lk_start; 860 uint64_t lk_end; 861 862 *new_mark = 0; 863 lk = smb_llist_head(llist_head); 864 while (lk) { 865 if (lk->l_length == 0) { 866 lk = smb_llist_next(llist_head, lk); 867 continue; 868 } 869 870 if (lk->l_file->f_uniqid != uniqid) { 871 lk = smb_llist_next(llist_head, lk); 872 continue; 873 } 874 875 lk_end = lk->l_start + lk->l_length - 1; 876 lk_start = lk->l_start; 877 878 /* 879 * there is no overlap for the first 2 cases 880 * check next node 881 */ 882 if (lk_end < start) { 883 lk = smb_llist_next(llist_head, lk); 884 continue; 885 } 886 if (lk_start > end) { 887 lk = smb_llist_next(llist_head, lk); 888 continue; 889 } 890 891 /* this range is completely locked */ 892 if ((lk_start <= start) && (lk_end >= end)) { 893 return (B_FALSE); 894 } 895 896 /* the first part of this range is locked */ 897 if ((start >= lk_start) && (start <= lk_end)) { 898 if (end > lk_end) 899 *new_mark = lk_end + 1; 900 return (B_FALSE); 901 } 902 903 /* this piece is unlocked */ 904 if ((lk_start >= start) && (lk_start <= end)) { 905 if (low_water_mark > lk_start) 906 low_water_mark = lk_start; 907 } 908 909 lk = smb_llist_next(llist_head, lk); 910 } 911 912 if (low_water_mark != MAXOFFSET_T) { 913 *new_mark = low_water_mark; 914 return (B_TRUE); 915 } 916 /* the range is completely unlocked */ 917 return (B_TRUE); 918 } 919