1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
24 */
25
26 /*
27 * This module provides range lock functionality for CIFS/SMB clients.
28 * Lock range service functions process SMB lock and and unlock
29 * requests for a file by applying lock rules and marks file range
30 * as locked if the lock is successful otherwise return proper
31 * error code.
32 */
33
34 #include <smbsrv/smb_kproto.h>
35 #include <smbsrv/smb_fsops.h>
36 #include <sys/nbmlock.h>
37 #include <sys/param.h>
38
39 extern caller_context_t smb_ct;
40
41 static void smb_lock_posix_unlock(smb_node_t *, smb_lock_t *, cred_t *);
42 static boolean_t smb_is_range_unlocked(uint64_t, uint64_t, uint32_t,
43 smb_llist_t *, uint64_t *);
44 static int smb_lock_range_overlap(smb_lock_t *, uint64_t, uint64_t);
45 static uint32_t smb_lock_range_lckrules(smb_request_t *, smb_ofile_t *,
46 smb_node_t *, smb_lock_t *, smb_lock_t **);
47 static clock_t smb_lock_wait(smb_request_t *, smb_lock_t *, smb_lock_t *);
48 static uint32_t smb_lock_range_ulckrules(smb_request_t *, smb_node_t *,
49 uint64_t, uint64_t, smb_lock_t **nodelock);
50 static smb_lock_t *smb_lock_create(smb_request_t *, uint64_t, uint64_t,
51 uint32_t, uint32_t);
52 static void smb_lock_destroy(smb_lock_t *);
53 static void smb_lock_free(smb_lock_t *);
54
55 /*
56 * Return the number of range locks on the specified ofile.
57 */
58 uint32_t
smb_lock_get_lock_count(smb_node_t * node,smb_ofile_t * of)59 smb_lock_get_lock_count(smb_node_t *node, smb_ofile_t *of)
60 {
61 smb_lock_t *lock;
62 smb_llist_t *llist;
63 uint32_t count = 0;
64
65 SMB_NODE_VALID(node);
66 SMB_OFILE_VALID(of);
67
68 llist = &node->n_lock_list;
69
70 smb_llist_enter(llist, RW_READER);
71 for (lock = smb_llist_head(llist);
72 lock != NULL;
73 lock = smb_llist_next(llist, lock)) {
74 if (lock->l_file == of)
75 ++count;
76 }
77 smb_llist_exit(llist);
78
79 return (count);
80 }
81
82 /*
83 * smb_unlock_range
84 *
85 * locates lock range performed for corresponding to unlock request.
86 *
87 * NT_STATUS_SUCCESS - Lock range performed successfully.
88 * !NT_STATUS_SUCCESS - Error in unlock range operation.
89 */
90 uint32_t
smb_unlock_range(smb_request_t * sr,smb_node_t * node,uint64_t start,uint64_t length)91 smb_unlock_range(
92 smb_request_t *sr,
93 smb_node_t *node,
94 uint64_t start,
95 uint64_t length)
96 {
97 smb_lock_t *lock = NULL;
98 uint32_t status;
99
100 /* Apply unlocking rules */
101 smb_llist_enter(&node->n_lock_list, RW_WRITER);
102 status = smb_lock_range_ulckrules(sr, node, start, length, &lock);
103 if (status != NT_STATUS_SUCCESS) {
104 /*
105 * If lock range is not matching in the list
106 * return error.
107 */
108 ASSERT(lock == NULL);
109 smb_llist_exit(&node->n_lock_list);
110 return (status);
111 }
112
113 smb_llist_remove(&node->n_lock_list, lock);
114 smb_lock_posix_unlock(node, lock, sr->user_cr);
115 smb_llist_exit(&node->n_lock_list);
116 smb_lock_destroy(lock);
117
118 return (status);
119 }
120
121 /*
122 * smb_lock_range
123 *
124 * Checks for integrity of file lock operation for the given range of file data.
125 * This is performed by applying lock rules with all the elements of the node
126 * lock list.
127 *
128 * Break shared (levelII) oplocks. If there is an exclusive oplock, it is
129 * owned by this ofile and therefore should not be broken.
130 *
131 * The function returns with new lock added if lock request is non-conflicting
132 * with existing range lock for the file. Otherwise smb request is filed
133 * without returning.
134 *
135 * NT_STATUS_SUCCESS - Lock range performed successfully.
136 * !NT_STATUS_SUCCESS - Error in lock range operation.
137 */
138 uint32_t
smb_lock_range(smb_request_t * sr,uint64_t start,uint64_t length,uint32_t timeout,uint32_t locktype)139 smb_lock_range(
140 smb_request_t *sr,
141 uint64_t start,
142 uint64_t length,
143 uint32_t timeout,
144 uint32_t locktype)
145 {
146 smb_ofile_t *file = sr->fid_ofile;
147 smb_node_t *node = file->f_node;
148 smb_lock_t *lock;
149 smb_lock_t *clock = NULL;
150 uint32_t result = NT_STATUS_SUCCESS;
151 boolean_t lock_has_timeout = (timeout != 0);
152
153 lock = smb_lock_create(sr, start, length, locktype, timeout);
154
155 smb_llist_enter(&node->n_lock_list, RW_WRITER);
156 for (;;) {
157 clock_t rc;
158
159 /* Apply locking rules */
160 result = smb_lock_range_lckrules(sr, file, node, lock, &clock);
161
162 if ((result == NT_STATUS_CANCELLED) ||
163 (result == NT_STATUS_SUCCESS) ||
164 (result == NT_STATUS_RANGE_NOT_LOCKED)) {
165 ASSERT(clock == NULL);
166 break;
167 } else if (timeout == 0) {
168 break;
169 }
170
171 ASSERT(result == NT_STATUS_LOCK_NOT_GRANTED);
172 ASSERT(clock);
173 /*
174 * Call smb_lock_wait holding write lock for
175 * node lock list. smb_lock_wait will release
176 * this lock if it blocks.
177 */
178 ASSERT(node == clock->l_file->f_node);
179
180 rc = smb_lock_wait(sr, lock, clock);
181 if (rc == 0) {
182 result = NT_STATUS_CANCELLED;
183 break;
184 }
185 if (rc == -1)
186 timeout = 0;
187
188 clock = NULL;
189 }
190
191 lock->l_blocked_by = NULL;
192
193 if (result != NT_STATUS_SUCCESS) {
194 /*
195 * Under certain conditions NT_STATUS_FILE_LOCK_CONFLICT
196 * should be returned instead of NT_STATUS_LOCK_NOT_GRANTED.
197 */
198 if (result == NT_STATUS_LOCK_NOT_GRANTED) {
199 /*
200 * Locks with timeouts always return
201 * NT_STATUS_FILE_LOCK_CONFLICT
202 */
203 if (lock_has_timeout)
204 result = NT_STATUS_FILE_LOCK_CONFLICT;
205
206 /*
207 * Locks starting higher than 0xef000000 that do not
208 * have the MSB set always return
209 * NT_STATUS_FILE_LOCK_CONFLICT
210 */
211 if ((lock->l_start >= 0xef000000) &&
212 !(lock->l_start & (1ULL << 63))) {
213 result = NT_STATUS_FILE_LOCK_CONFLICT;
214 }
215
216 /*
217 * If the last lock attempt to fail on this file handle
218 * started at the same offset as this one then return
219 * NT_STATUS_FILE_LOCK_CONFLICT
220 */
221 mutex_enter(&file->f_mutex);
222 if ((file->f_flags & SMB_OFLAGS_LLF_POS_VALID) &&
223 (lock->l_start == file->f_llf_pos)) {
224 result = NT_STATUS_FILE_LOCK_CONFLICT;
225 }
226 mutex_exit(&file->f_mutex);
227 }
228
229 /* Update last lock failed offset */
230 mutex_enter(&file->f_mutex);
231 file->f_llf_pos = lock->l_start;
232 file->f_flags |= SMB_OFLAGS_LLF_POS_VALID;
233 mutex_exit(&file->f_mutex);
234
235 smb_lock_free(lock);
236 } else {
237 /*
238 * don't insert into the CIFS lock list unless the
239 * posix lock worked
240 */
241 if (smb_fsop_frlock(node, lock, B_FALSE, sr->user_cr))
242 result = NT_STATUS_FILE_LOCK_CONFLICT;
243 else
244 smb_llist_insert_tail(&node->n_lock_list, lock);
245 }
246 smb_llist_exit(&node->n_lock_list);
247
248 if (result == NT_STATUS_SUCCESS)
249 smb_oplock_break_levelII(node);
250
251 return (result);
252 }
253
254
255 /*
256 * smb_lock_range_access
257 *
258 * scans node lock list
259 * to check if there is any overlapping lock. Overlapping
260 * lock is allowed only under same session and client pid.
261 *
262 * Return values
263 * NT_STATUS_SUCCESS lock access granted.
264 * NT_STATUS_FILE_LOCK_CONFLICT access denied due to lock conflict.
265 */
266 int
smb_lock_range_access(smb_request_t * sr,smb_node_t * node,uint64_t start,uint64_t length,boolean_t will_write)267 smb_lock_range_access(
268 smb_request_t *sr,
269 smb_node_t *node,
270 uint64_t start,
271 uint64_t length, /* zero means to EoF */
272 boolean_t will_write)
273 {
274 smb_lock_t *lock;
275 smb_llist_t *llist;
276 int status = NT_STATUS_SUCCESS;
277
278 llist = &node->n_lock_list;
279 smb_llist_enter(llist, RW_READER);
280 /* Search for any applicable lock */
281 for (lock = smb_llist_head(llist);
282 lock != NULL;
283 lock = smb_llist_next(llist, lock)) {
284
285 if (!smb_lock_range_overlap(lock, start, length))
286 /* Lock does not overlap */
287 continue;
288
289 if (lock->l_type == SMB_LOCK_TYPE_READONLY && !will_write)
290 continue;
291
292 if (lock->l_type == SMB_LOCK_TYPE_READWRITE &&
293 lock->l_session_kid == sr->session->s_kid &&
294 lock->l_pid == sr->smb_pid)
295 continue;
296
297 status = NT_STATUS_FILE_LOCK_CONFLICT;
298 break;
299 }
300 smb_llist_exit(llist);
301 return (status);
302 }
303
304 void
smb_node_destroy_lock_by_ofile(smb_node_t * node,smb_ofile_t * file)305 smb_node_destroy_lock_by_ofile(smb_node_t *node, smb_ofile_t *file)
306 {
307 smb_lock_t *lock;
308 smb_lock_t *nxtl;
309 list_t destroy_list;
310
311 SMB_NODE_VALID(node);
312 ASSERT(node->n_refcnt);
313
314 /*
315 * Move locks matching the specified file from the node->n_lock_list
316 * to a temporary list (holding the lock the entire time) then
317 * destroy all the matching locks. We can't call smb_lock_destroy
318 * while we are holding the lock for node->n_lock_list because we will
319 * deadlock and we can't drop the lock because the list contents might
320 * change (for example nxtl might get removed on another thread).
321 */
322 list_create(&destroy_list, sizeof (smb_lock_t),
323 offsetof(smb_lock_t, l_lnd));
324
325 smb_llist_enter(&node->n_lock_list, RW_WRITER);
326 lock = smb_llist_head(&node->n_lock_list);
327 while (lock) {
328 nxtl = smb_llist_next(&node->n_lock_list, lock);
329 if (lock->l_file == file) {
330 smb_llist_remove(&node->n_lock_list, lock);
331 smb_lock_posix_unlock(node, lock, file->f_user->u_cred);
332 list_insert_tail(&destroy_list, lock);
333 }
334 lock = nxtl;
335 }
336 smb_llist_exit(&node->n_lock_list);
337
338 lock = list_head(&destroy_list);
339 while (lock) {
340 nxtl = list_next(&destroy_list, lock);
341 list_remove(&destroy_list, lock);
342 smb_lock_destroy(lock);
343 lock = nxtl;
344 }
345
346 list_destroy(&destroy_list);
347 }
348
349 void
smb_lock_range_error(smb_request_t * sr,uint32_t status32)350 smb_lock_range_error(smb_request_t *sr, uint32_t status32)
351 {
352 uint16_t errcode;
353
354 if (status32 == NT_STATUS_CANCELLED)
355 errcode = ERROR_OPERATION_ABORTED;
356 else
357 errcode = ERRlock;
358
359 smbsr_error(sr, status32, ERRDOS, errcode);
360 }
361
362 /*
363 * An SMB variant of nbl_conflict().
364 *
365 * SMB prevents remove or rename when conflicting locks exist
366 * (unlike NFS, which is why we can't just use nbl_conflict).
367 *
368 * Returns:
369 * NT_STATUS_SHARING_VIOLATION - nbl_share_conflict
370 * NT_STATUS_FILE_LOCK_CONFLICT - nbl_lock_conflict
371 * NT_STATUS_SUCCESS - operation can proceed
372 *
373 * NB: This function used to also check the list of ofiles,
374 * via: smb_lock_range_access() but we _can't_ do that here
375 * due to lock order constraints between node->n_lock_list
376 * and node->vp->vnbllock (taken via nvl_start_crit).
377 * They must be taken in that order, and in here, we
378 * already hold vp->vnbllock.
379 */
380 DWORD
smb_nbl_conflict(smb_node_t * node,uint64_t off,uint64_t len,nbl_op_t op)381 smb_nbl_conflict(smb_node_t *node, uint64_t off, uint64_t len, nbl_op_t op)
382 {
383 int svmand;
384
385 SMB_NODE_VALID(node);
386 ASSERT(smb_node_in_crit(node));
387 ASSERT(op == NBL_READ || op == NBL_WRITE || op == NBL_READWRITE ||
388 op == NBL_REMOVE || op == NBL_RENAME);
389
390 if (smb_node_is_dir(node))
391 return (NT_STATUS_SUCCESS);
392
393 if (nbl_share_conflict(node->vp, op, &smb_ct))
394 return (NT_STATUS_SHARING_VIOLATION);
395
396 /*
397 * When checking for lock conflicts, rename and remove
398 * are not allowed, so treat those as read/write.
399 */
400 if (op == NBL_RENAME || op == NBL_REMOVE)
401 op = NBL_READWRITE;
402
403 if (nbl_svmand(node->vp, zone_kcred(), &svmand))
404 svmand = 1;
405
406 if (nbl_lock_conflict(node->vp, op, off, len, svmand, &smb_ct))
407 return (NT_STATUS_FILE_LOCK_CONFLICT);
408
409 return (NT_STATUS_SUCCESS);
410 }
411
412 /*
413 * smb_lock_posix_unlock
414 *
415 * checks if the current unlock request is in another lock and repeatedly calls
416 * smb_is_range_unlocked on a sliding basis to unlock all bits of the lock
417 * that are not in other locks
418 *
419 */
420 static void
smb_lock_posix_unlock(smb_node_t * node,smb_lock_t * lock,cred_t * cr)421 smb_lock_posix_unlock(smb_node_t *node, smb_lock_t *lock, cred_t *cr)
422 {
423 uint64_t new_mark;
424 uint64_t unlock_start;
425 uint64_t unlock_end;
426 smb_lock_t new_unlock;
427 smb_llist_t *llist;
428 boolean_t can_unlock;
429
430 new_mark = 0;
431 unlock_start = lock->l_start;
432 unlock_end = unlock_start + lock->l_length;
433 llist = &node->n_lock_list;
434
435 for (;;) {
436 can_unlock = smb_is_range_unlocked(unlock_start, unlock_end,
437 lock->l_file->f_uniqid, llist, &new_mark);
438 if (can_unlock) {
439 if (new_mark) {
440 new_unlock = *lock;
441 new_unlock.l_start = unlock_start;
442 new_unlock.l_length = new_mark - unlock_start;
443 (void) smb_fsop_frlock(node, &new_unlock,
444 B_TRUE, cr);
445 unlock_start = new_mark;
446 } else {
447 new_unlock = *lock;
448 new_unlock.l_start = unlock_start;
449 new_unlock.l_length = unlock_end - unlock_start;
450 (void) smb_fsop_frlock(node, &new_unlock,
451 B_TRUE, cr);
452 break;
453 }
454 } else if (new_mark) {
455 unlock_start = new_mark;
456 } else {
457 break;
458 }
459 }
460 }
461
462 /*
463 * smb_lock_range_overlap
464 *
465 * Checks if lock range(start, length) overlaps range in lock structure.
466 *
467 * Zero-length byte range locks actually affect no single byte of the stream,
468 * meaning they can still be accessed even with such locks in place. However,
469 * they do conflict with other ranges in the following manner:
470 * conflict will only exist if the positive-length range contains the
471 * zero-length range's offset but doesn't start at it
472 *
473 * return values:
474 * 0 - Lock range doesn't overlap
475 * 1 - Lock range overlaps.
476 */
477
478 #define RANGE_NO_OVERLAP 0
479 #define RANGE_OVERLAP 1
480
481 static int
smb_lock_range_overlap(struct smb_lock * lock,uint64_t start,uint64_t length)482 smb_lock_range_overlap(struct smb_lock *lock, uint64_t start, uint64_t length)
483 {
484 if (length == 0) {
485 if ((lock->l_start < start) &&
486 ((lock->l_start + lock->l_length) > start))
487 return (RANGE_OVERLAP);
488
489 return (RANGE_NO_OVERLAP);
490 }
491
492 /* The following test is intended to catch roll over locks. */
493 if ((start == lock->l_start) && (length == lock->l_length))
494 return (RANGE_OVERLAP);
495
496 if (start < lock->l_start) {
497 if (start + length > lock->l_start)
498 return (RANGE_OVERLAP);
499 } else if (start < lock->l_start + lock->l_length)
500 return (RANGE_OVERLAP);
501
502 return (RANGE_NO_OVERLAP);
503 }
504
505 /*
506 * smb_lock_range_lckrules
507 *
508 * Lock range rules:
509 * 1. Overlapping read locks are allowed if the
510 * current locks in the region are only read locks
511 * irrespective of pid of smb client issuing lock request.
512 *
513 * 2. Read lock in the overlapped region of write lock
514 * are allowed if the pervious lock is performed by the
515 * same pid and connection.
516 *
517 * return status:
518 * NT_STATUS_SUCCESS - Input lock range adapts to lock rules.
519 * NT_STATUS_LOCK_NOT_GRANTED - Input lock conflicts lock rules.
520 * NT_STATUS_CANCELLED - Error in processing lock rules
521 */
522 static uint32_t
smb_lock_range_lckrules(smb_request_t * sr,smb_ofile_t * file,smb_node_t * node,smb_lock_t * dlock,smb_lock_t ** clockp)523 smb_lock_range_lckrules(
524 smb_request_t *sr,
525 smb_ofile_t *file,
526 smb_node_t *node,
527 smb_lock_t *dlock,
528 smb_lock_t **clockp)
529 {
530 smb_lock_t *lock;
531 uint32_t status = NT_STATUS_SUCCESS;
532
533 /* Check if file is closed */
534 if (!smb_ofile_is_open(file)) {
535 return (NT_STATUS_RANGE_NOT_LOCKED);
536 }
537
538 /* Caller must hold lock for node->n_lock_list */
539 for (lock = smb_llist_head(&node->n_lock_list);
540 lock != NULL;
541 lock = smb_llist_next(&node->n_lock_list, lock)) {
542
543 if (!smb_lock_range_overlap(lock, dlock->l_start,
544 dlock->l_length))
545 continue;
546
547 /*
548 * Check to see if lock in the overlapping record
549 * is only read lock. Current finding is read
550 * locks can overlapped irrespective of pids.
551 */
552 if ((lock->l_type == SMB_LOCK_TYPE_READONLY) &&
553 (dlock->l_type == SMB_LOCK_TYPE_READONLY)) {
554 continue;
555 }
556
557 /*
558 * When the read lock overlaps write lock, check if
559 * allowed.
560 */
561 if ((dlock->l_type == SMB_LOCK_TYPE_READONLY) &&
562 !(lock->l_type == SMB_LOCK_TYPE_READONLY)) {
563 if (lock->l_file == sr->fid_ofile &&
564 lock->l_session_kid == sr->session->s_kid &&
565 lock->l_pid == sr->smb_pid &&
566 lock->l_uid == sr->smb_uid) {
567 continue;
568 }
569 }
570
571 /* Conflict in overlapping lock element */
572 *clockp = lock;
573 status = NT_STATUS_LOCK_NOT_GRANTED;
574 break;
575 }
576
577 return (status);
578 }
579
580 /*
581 * smb_lock_wait
582 *
583 * Wait operation for smb overlapping lock to be released. Caller must hold
584 * write lock for node->n_lock_list so that the set of active locks can't
585 * change unexpectedly. The lock for node->n_lock_list will be released
586 * within this function during the sleep after the lock dependency has
587 * been recorded.
588 *
589 * return value
590 *
591 * 0 The request was canceled.
592 * -1 The timeout was reached.
593 * >0 Condition met.
594 */
595 static clock_t
smb_lock_wait(smb_request_t * sr,smb_lock_t * b_lock,smb_lock_t * c_lock)596 smb_lock_wait(smb_request_t *sr, smb_lock_t *b_lock, smb_lock_t *c_lock)
597 {
598 clock_t rc = 0;
599
600 ASSERT(sr->sr_awaiting == NULL);
601
602 mutex_enter(&sr->sr_mutex);
603
604 switch (sr->sr_state) {
605 case SMB_REQ_STATE_ACTIVE:
606 /*
607 * Wait up till the timeout time keeping track of actual
608 * time waited for possible retry failure.
609 */
610 sr->sr_state = SMB_REQ_STATE_WAITING_LOCK;
611 sr->sr_awaiting = c_lock;
612 mutex_exit(&sr->sr_mutex);
613
614 mutex_enter(&c_lock->l_mutex);
615 /*
616 * The conflict list (l_conflict_list) for a lock contains
617 * all the locks that are blocked by and in conflict with
618 * that lock. Add the new lock to the conflict list for the
619 * active lock.
620 *
621 * l_conflict_list is currently a fancy way of representing
622 * the references/dependencies on a lock. It could be
623 * replaced with a reference count but this approach
624 * has the advantage that MDB can display the lock
625 * dependencies at any point in time. In the future
626 * we should be able to leverage the list to implement
627 * an asynchronous locking model.
628 *
629 * l_blocked_by is the reverse of the conflict list. It
630 * points to the lock that the new lock conflicts with.
631 * As currently implemented this value is purely for
632 * debug purposes -- there are windows of time when
633 * l_blocked_by may be non-NULL even though there is no
634 * conflict list
635 */
636 b_lock->l_blocked_by = c_lock;
637 smb_slist_insert_tail(&c_lock->l_conflict_list, b_lock);
638 smb_llist_exit(&c_lock->l_file->f_node->n_lock_list);
639
640 if (SMB_LOCK_INDEFINITE_WAIT(b_lock)) {
641 cv_wait(&c_lock->l_cv, &c_lock->l_mutex);
642 } else {
643 rc = cv_timedwait(&c_lock->l_cv,
644 &c_lock->l_mutex, b_lock->l_end_time);
645 }
646
647 mutex_exit(&c_lock->l_mutex);
648
649 smb_llist_enter(&c_lock->l_file->f_node->n_lock_list,
650 RW_WRITER);
651 smb_slist_remove(&c_lock->l_conflict_list, b_lock);
652
653 mutex_enter(&sr->sr_mutex);
654 sr->sr_awaiting = NULL;
655 if (sr->sr_state == SMB_REQ_STATE_CANCELED) {
656 rc = 0;
657 } else {
658 sr->sr_state = SMB_REQ_STATE_ACTIVE;
659 }
660 break;
661
662 default:
663 ASSERT(sr->sr_state == SMB_REQ_STATE_CANCELED);
664 rc = 0;
665 break;
666 }
667 mutex_exit(&sr->sr_mutex);
668
669 return (rc);
670 }
671
672 /*
673 * smb_lock_range_ulckrules
674 *
675 * 1. Unlock should be performed at exactly matching ends.
676 * This has been changed because overlapping ends is
677 * allowed and there is no other precise way of locating
678 * lock entity in node lock list.
679 *
680 * 2. Unlock is failed if there is no corresponding lock exists.
681 *
682 * Return values
683 *
684 * NT_STATUS_SUCCESS Unlock request matches lock record
685 * pointed by 'nodelock' lock structure.
686 *
687 * NT_STATUS_RANGE_NOT_LOCKED Unlock request doen't match any
688 * of lock record in node lock request or
689 * error in unlock range processing.
690 */
691 static uint32_t
smb_lock_range_ulckrules(smb_request_t * sr,smb_node_t * node,uint64_t start,uint64_t length,smb_lock_t ** nodelock)692 smb_lock_range_ulckrules(
693 smb_request_t *sr,
694 smb_node_t *node,
695 uint64_t start,
696 uint64_t length,
697 smb_lock_t **nodelock)
698 {
699 smb_lock_t *lock;
700 uint32_t status = NT_STATUS_RANGE_NOT_LOCKED;
701
702 /* Caller must hold lock for node->n_lock_list */
703 for (lock = smb_llist_head(&node->n_lock_list);
704 lock != NULL;
705 lock = smb_llist_next(&node->n_lock_list, lock)) {
706
707 if ((start == lock->l_start) &&
708 (length == lock->l_length) &&
709 lock->l_file == sr->fid_ofile &&
710 lock->l_session_kid == sr->session->s_kid &&
711 lock->l_pid == sr->smb_pid &&
712 lock->l_uid == sr->smb_uid) {
713 *nodelock = lock;
714 status = NT_STATUS_SUCCESS;
715 break;
716 }
717 }
718
719 return (status);
720 }
721
722 static smb_lock_t *
smb_lock_create(smb_request_t * sr,uint64_t start,uint64_t length,uint32_t locktype,uint32_t timeout)723 smb_lock_create(
724 smb_request_t *sr,
725 uint64_t start,
726 uint64_t length,
727 uint32_t locktype,
728 uint32_t timeout)
729 {
730 smb_lock_t *lock;
731
732 ASSERT(locktype == SMB_LOCK_TYPE_READWRITE ||
733 locktype == SMB_LOCK_TYPE_READONLY);
734
735 lock = kmem_zalloc(sizeof (smb_lock_t), KM_SLEEP);
736 lock->l_magic = SMB_LOCK_MAGIC;
737 lock->l_sr = sr; /* Invalid after lock is active */
738 lock->l_session_kid = sr->session->s_kid;
739 lock->l_session = sr->session;
740 lock->l_file = sr->fid_ofile;
741 lock->l_uid = sr->smb_uid;
742 lock->l_pid = sr->smb_pid;
743 lock->l_type = locktype;
744 lock->l_start = start;
745 lock->l_length = length;
746 /*
747 * Calculate the absolute end time so that we can use it
748 * in cv_timedwait.
749 */
750 lock->l_end_time = ddi_get_lbolt() + MSEC_TO_TICK(timeout);
751 if (timeout == UINT_MAX)
752 lock->l_flags |= SMB_LOCK_FLAG_INDEFINITE;
753
754 mutex_init(&lock->l_mutex, NULL, MUTEX_DEFAULT, NULL);
755 cv_init(&lock->l_cv, NULL, CV_DEFAULT, NULL);
756 smb_slist_constructor(&lock->l_conflict_list, sizeof (smb_lock_t),
757 offsetof(smb_lock_t, l_conflict_lnd));
758
759 return (lock);
760 }
761
762 static void
smb_lock_free(smb_lock_t * lock)763 smb_lock_free(smb_lock_t *lock)
764 {
765 smb_slist_destructor(&lock->l_conflict_list);
766 cv_destroy(&lock->l_cv);
767 mutex_destroy(&lock->l_mutex);
768
769 kmem_free(lock, sizeof (smb_lock_t));
770 }
771
772 /*
773 * smb_lock_destroy
774 *
775 * Caller must hold node->n_lock_list
776 */
777 static void
smb_lock_destroy(smb_lock_t * lock)778 smb_lock_destroy(smb_lock_t *lock)
779 {
780 /*
781 * Caller must hold node->n_lock_list lock.
782 */
783 mutex_enter(&lock->l_mutex);
784 cv_broadcast(&lock->l_cv);
785 mutex_exit(&lock->l_mutex);
786
787 /*
788 * The cv_broadcast above should wake up any locks that previous
789 * had conflicts with this lock. Wait for the locking threads
790 * to remove their references to this lock.
791 */
792 smb_slist_wait_for_empty(&lock->l_conflict_list);
793
794 smb_lock_free(lock);
795 }
796
797 /*
798 * smb_is_range_unlocked
799 *
800 * Checks if the current unlock byte range request overlaps another lock
801 * This function is used to determine where POSIX unlocks should be
802 * applied.
803 *
804 * The return code and the value of new_mark must be interpreted as
805 * follows:
806 *
807 * B_TRUE and (new_mark == 0):
808 * This is the last or only lock left to be unlocked
809 *
810 * B_TRUE and (new_mark > 0):
811 * The range from start to new_mark can be unlocked
812 *
813 * B_FALSE and (new_mark == 0):
814 * The unlock can't be performed and we are done
815 *
816 * B_FALSE and (new_mark > 0),
817 * The range from start to new_mark can't be unlocked
818 * Start should be reset to new_mark for the next pass
819 */
820
821 static boolean_t
smb_is_range_unlocked(uint64_t start,uint64_t end,uint32_t uniqid,smb_llist_t * llist_head,uint64_t * new_mark)822 smb_is_range_unlocked(uint64_t start, uint64_t end, uint32_t uniqid,
823 smb_llist_t *llist_head, uint64_t *new_mark)
824 {
825 struct smb_lock *lk = NULL;
826 uint64_t low_water_mark = MAXOFFSET_T;
827 uint64_t lk_start;
828 uint64_t lk_end;
829
830 *new_mark = 0;
831 lk = smb_llist_head(llist_head);
832 while (lk) {
833 if (lk->l_length == 0) {
834 lk = smb_llist_next(llist_head, lk);
835 continue;
836 }
837
838 if (lk->l_file->f_uniqid != uniqid) {
839 lk = smb_llist_next(llist_head, lk);
840 continue;
841 }
842
843 lk_end = lk->l_start + lk->l_length - 1;
844 lk_start = lk->l_start;
845
846 /*
847 * there is no overlap for the first 2 cases
848 * check next node
849 */
850 if (lk_end < start) {
851 lk = smb_llist_next(llist_head, lk);
852 continue;
853 }
854 if (lk_start > end) {
855 lk = smb_llist_next(llist_head, lk);
856 continue;
857 }
858
859 /* this range is completely locked */
860 if ((lk_start <= start) && (lk_end >= end)) {
861 return (B_FALSE);
862 }
863
864 /* the first part of this range is locked */
865 if ((start >= lk_start) && (start <= lk_end)) {
866 if (end > lk_end)
867 *new_mark = lk_end + 1;
868 return (B_FALSE);
869 }
870
871 /* this piece is unlocked */
872 if ((lk_start >= start) && (lk_start <= end)) {
873 if (low_water_mark > lk_start)
874 low_water_mark = lk_start;
875 }
876
877 lk = smb_llist_next(llist_head, lk);
878 }
879
880 if (low_water_mark != MAXOFFSET_T) {
881 *new_mark = low_water_mark;
882 return (B_TRUE);
883 }
884 /* the range is completely unlocked */
885 return (B_TRUE);
886 }
887