xref: /titanic_50/usr/src/uts/common/fs/smbsrv/smb_lock.c (revision a90cf9f29973990687fa61de9f1f6ea22e924e40)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
24  */
25 
26 /*
27  * This module provides range lock functionality for CIFS/SMB clients.
28  * Lock range service functions process SMB lock and and unlock
29  * requests for a file by applying lock rules and marks file range
30  * as locked if the lock is successful otherwise return proper
31  * error code.
32  */
33 
34 #include <smbsrv/smb_kproto.h>
35 #include <smbsrv/smb_fsops.h>
36 #include <sys/nbmlock.h>
37 #include <sys/param.h>
38 
39 extern caller_context_t smb_ct;
40 
41 static void smb_lock_posix_unlock(smb_node_t *, smb_lock_t *, cred_t *);
42 static boolean_t smb_is_range_unlocked(uint64_t, uint64_t, uint32_t,
43     smb_llist_t *, uint64_t *);
44 static int smb_lock_range_overlap(smb_lock_t *, uint64_t, uint64_t);
45 static uint32_t smb_lock_range_lckrules(smb_request_t *, smb_ofile_t *,
46     smb_node_t *, smb_lock_t *, smb_lock_t **);
47 static clock_t smb_lock_wait(smb_request_t *, smb_lock_t *, smb_lock_t *);
48 static uint32_t smb_lock_range_ulckrules(smb_request_t *, smb_node_t *,
49     uint64_t, uint64_t, smb_lock_t **nodelock);
50 static smb_lock_t *smb_lock_create(smb_request_t *, uint64_t, uint64_t,
51     uint32_t, uint32_t);
52 static void smb_lock_destroy(smb_lock_t *);
53 static void smb_lock_free(smb_lock_t *);
54 
55 /*
56  * Return the number of range locks on the specified ofile.
57  */
58 uint32_t
smb_lock_get_lock_count(smb_node_t * node,smb_ofile_t * of)59 smb_lock_get_lock_count(smb_node_t *node, smb_ofile_t *of)
60 {
61 	smb_lock_t 	*lock;
62 	smb_llist_t	*llist;
63 	uint32_t	count = 0;
64 
65 	SMB_NODE_VALID(node);
66 	SMB_OFILE_VALID(of);
67 
68 	llist = &node->n_lock_list;
69 
70 	smb_llist_enter(llist, RW_READER);
71 	for (lock = smb_llist_head(llist);
72 	    lock != NULL;
73 	    lock = smb_llist_next(llist, lock)) {
74 		if (lock->l_file == of)
75 			++count;
76 	}
77 	smb_llist_exit(llist);
78 
79 	return (count);
80 }
81 
82 /*
83  * smb_unlock_range
84  *
85  * locates lock range performed for corresponding to unlock request.
86  *
87  * NT_STATUS_SUCCESS - Lock range performed successfully.
88  * !NT_STATUS_SUCCESS - Error in unlock range operation.
89  */
90 uint32_t
smb_unlock_range(smb_request_t * sr,smb_node_t * node,uint64_t start,uint64_t length)91 smb_unlock_range(
92     smb_request_t	*sr,
93     smb_node_t		*node,
94     uint64_t		start,
95     uint64_t		length)
96 {
97 	smb_lock_t	*lock = NULL;
98 	uint32_t	status;
99 
100 	/* Apply unlocking rules */
101 	smb_llist_enter(&node->n_lock_list, RW_WRITER);
102 	status = smb_lock_range_ulckrules(sr, node, start, length, &lock);
103 	if (status != NT_STATUS_SUCCESS) {
104 		/*
105 		 * If lock range is not matching in the list
106 		 * return error.
107 		 */
108 		ASSERT(lock == NULL);
109 		smb_llist_exit(&node->n_lock_list);
110 		return (status);
111 	}
112 
113 	smb_llist_remove(&node->n_lock_list, lock);
114 	smb_lock_posix_unlock(node, lock, sr->user_cr);
115 	smb_llist_exit(&node->n_lock_list);
116 	smb_lock_destroy(lock);
117 
118 	return (status);
119 }
120 
121 /*
122  * smb_lock_range
123  *
124  * Checks for integrity of file lock operation for the given range of file data.
125  * This is performed by applying lock rules with all the elements of the node
126  * lock list.
127  *
128  * Break shared (levelII) oplocks. If there is an exclusive oplock, it is
129  * owned by this ofile and therefore should not be broken.
130  *
131  * The function returns with new lock added if lock request is non-conflicting
132  * with existing range lock for the file. Otherwise smb request is filed
133  * without returning.
134  *
135  * NT_STATUS_SUCCESS - Lock range performed successfully.
136  * !NT_STATUS_SUCCESS - Error in lock range operation.
137  */
138 uint32_t
smb_lock_range(smb_request_t * sr,uint64_t start,uint64_t length,uint32_t timeout,uint32_t locktype)139 smb_lock_range(
140     smb_request_t	*sr,
141     uint64_t		start,
142     uint64_t		length,
143     uint32_t		timeout,
144     uint32_t		locktype)
145 {
146 	smb_ofile_t	*file = sr->fid_ofile;
147 	smb_node_t	*node = file->f_node;
148 	smb_lock_t	*lock;
149 	smb_lock_t	*clock = NULL;
150 	uint32_t	result = NT_STATUS_SUCCESS;
151 	boolean_t	lock_has_timeout =
152 	    (timeout != 0 && timeout != UINT_MAX);
153 
154 	lock = smb_lock_create(sr, start, length, locktype, timeout);
155 
156 	smb_llist_enter(&node->n_lock_list, RW_WRITER);
157 	for (;;) {
158 		clock_t	rc;
159 
160 		/* Apply locking rules */
161 		result = smb_lock_range_lckrules(sr, file, node, lock, &clock);
162 
163 		if ((result == NT_STATUS_CANCELLED) ||
164 		    (result == NT_STATUS_SUCCESS) ||
165 		    (result == NT_STATUS_RANGE_NOT_LOCKED)) {
166 			ASSERT(clock == NULL);
167 			break;
168 		} else if (timeout == 0) {
169 			break;
170 		}
171 
172 		ASSERT(result == NT_STATUS_LOCK_NOT_GRANTED);
173 		ASSERT(clock);
174 		/*
175 		 * Call smb_lock_wait holding write lock for
176 		 * node lock list.  smb_lock_wait will release
177 		 * this lock if it blocks.
178 		 */
179 		ASSERT(node == clock->l_file->f_node);
180 
181 		rc = smb_lock_wait(sr, lock, clock);
182 		if (rc == 0) {
183 			result = NT_STATUS_CANCELLED;
184 			break;
185 		}
186 		if (rc == -1)
187 			timeout = 0;
188 
189 		clock = NULL;
190 	}
191 
192 	lock->l_blocked_by = NULL;
193 
194 	if (result != NT_STATUS_SUCCESS) {
195 		/*
196 		 * Under certain conditions NT_STATUS_FILE_LOCK_CONFLICT
197 		 * should be returned instead of NT_STATUS_LOCK_NOT_GRANTED.
198 		 * All of this appears to be specific to SMB1
199 		 */
200 		if (sr->session->dialect <= NT_LM_0_12 &&
201 		    result == NT_STATUS_LOCK_NOT_GRANTED) {
202 			/*
203 			 * Locks with timeouts always return
204 			 * NT_STATUS_FILE_LOCK_CONFLICT
205 			 */
206 			if (lock_has_timeout)
207 				result = NT_STATUS_FILE_LOCK_CONFLICT;
208 
209 			/*
210 			 * Locks starting higher than 0xef000000 that do not
211 			 * have the MSB set always return
212 			 * NT_STATUS_FILE_LOCK_CONFLICT
213 			 */
214 			if ((lock->l_start >= 0xef000000) &&
215 			    !(lock->l_start & (1ULL << 63))) {
216 				result = NT_STATUS_FILE_LOCK_CONFLICT;
217 			}
218 
219 			/*
220 			 * If the last lock attempt to fail on this file handle
221 			 * started at the same offset as this one then return
222 			 * NT_STATUS_FILE_LOCK_CONFLICT
223 			 */
224 			mutex_enter(&file->f_mutex);
225 			if ((file->f_flags & SMB_OFLAGS_LLF_POS_VALID) &&
226 			    (lock->l_start == file->f_llf_pos)) {
227 				result = NT_STATUS_FILE_LOCK_CONFLICT;
228 			}
229 			mutex_exit(&file->f_mutex);
230 		}
231 
232 		/* Update last lock failed offset */
233 		mutex_enter(&file->f_mutex);
234 		file->f_llf_pos = lock->l_start;
235 		file->f_flags |= SMB_OFLAGS_LLF_POS_VALID;
236 		mutex_exit(&file->f_mutex);
237 
238 		smb_lock_free(lock);
239 	} else {
240 		/*
241 		 * don't insert into the CIFS lock list unless the
242 		 * posix lock worked
243 		 */
244 		if (smb_fsop_frlock(node, lock, B_FALSE, sr->user_cr))
245 			result = NT_STATUS_FILE_LOCK_CONFLICT;
246 		else
247 			smb_llist_insert_tail(&node->n_lock_list, lock);
248 	}
249 	smb_llist_exit(&node->n_lock_list);
250 
251 	if (result == NT_STATUS_SUCCESS)
252 		smb_oplock_break_levelII(node);
253 
254 	return (result);
255 }
256 
257 
258 /*
259  * smb_lock_range_access
260  *
261  * scans node lock list
262  * to check if there is any overlapping lock. Overlapping
263  * lock is allowed only under same session and client pid.
264  *
265  * Return values
266  *	NT_STATUS_SUCCESS		lock access granted.
267  *	NT_STATUS_FILE_LOCK_CONFLICT 	access denied due to lock conflict.
268  */
269 int
smb_lock_range_access(smb_request_t * sr,smb_node_t * node,uint64_t start,uint64_t length,boolean_t will_write)270 smb_lock_range_access(
271     smb_request_t	*sr,
272     smb_node_t		*node,
273     uint64_t		start,
274     uint64_t		length,	 /* zero means to EoF */
275     boolean_t		will_write)
276 {
277 	smb_lock_t	*lock;
278 	smb_llist_t	*llist;
279 	int		status = NT_STATUS_SUCCESS;
280 
281 	llist = &node->n_lock_list;
282 	smb_llist_enter(llist, RW_READER);
283 	/* Search for any applicable lock */
284 	for (lock = smb_llist_head(llist);
285 	    lock != NULL;
286 	    lock = smb_llist_next(llist, lock)) {
287 
288 		if (!smb_lock_range_overlap(lock, start, length))
289 			/* Lock does not overlap */
290 			continue;
291 
292 		if (lock->l_type == SMB_LOCK_TYPE_READONLY && !will_write)
293 			continue;
294 
295 		if (lock->l_type == SMB_LOCK_TYPE_READWRITE &&
296 		    lock->l_session_kid == sr->session->s_kid &&
297 		    lock->l_pid == sr->smb_pid)
298 			continue;
299 
300 		status = NT_STATUS_FILE_LOCK_CONFLICT;
301 		break;
302 	}
303 	smb_llist_exit(llist);
304 	return (status);
305 }
306 
307 void
smb_node_destroy_lock_by_ofile(smb_node_t * node,smb_ofile_t * file)308 smb_node_destroy_lock_by_ofile(smb_node_t *node, smb_ofile_t *file)
309 {
310 	smb_lock_t	*lock;
311 	smb_lock_t	*nxtl;
312 	list_t		destroy_list;
313 
314 	SMB_NODE_VALID(node);
315 	ASSERT(node->n_refcnt);
316 
317 	/*
318 	 * Move locks matching the specified file from the node->n_lock_list
319 	 * to a temporary list (holding the lock the entire time) then
320 	 * destroy all the matching locks.  We can't call smb_lock_destroy
321 	 * while we are holding the lock for node->n_lock_list because we will
322 	 * deadlock and we can't drop the lock because the list contents might
323 	 * change (for example nxtl might get removed on another thread).
324 	 */
325 	list_create(&destroy_list, sizeof (smb_lock_t),
326 	    offsetof(smb_lock_t, l_lnd));
327 
328 	smb_llist_enter(&node->n_lock_list, RW_WRITER);
329 	lock = smb_llist_head(&node->n_lock_list);
330 	while (lock) {
331 		nxtl = smb_llist_next(&node->n_lock_list, lock);
332 		if (lock->l_file == file) {
333 			smb_llist_remove(&node->n_lock_list, lock);
334 			smb_lock_posix_unlock(node, lock, file->f_user->u_cred);
335 			list_insert_tail(&destroy_list, lock);
336 		}
337 		lock = nxtl;
338 	}
339 	smb_llist_exit(&node->n_lock_list);
340 
341 	lock = list_head(&destroy_list);
342 	while (lock) {
343 		nxtl = list_next(&destroy_list, lock);
344 		list_remove(&destroy_list, lock);
345 		smb_lock_destroy(lock);
346 		lock = nxtl;
347 	}
348 
349 	list_destroy(&destroy_list);
350 }
351 
352 void
smb_lock_range_error(smb_request_t * sr,uint32_t status32)353 smb_lock_range_error(smb_request_t *sr, uint32_t status32)
354 {
355 	uint16_t errcode;
356 
357 	if (status32 == NT_STATUS_CANCELLED)
358 		errcode = ERROR_OPERATION_ABORTED;
359 	else
360 		errcode = ERRlock;
361 
362 	smbsr_error(sr, status32, ERRDOS, errcode);
363 }
364 
365 /*
366  * An SMB variant of nbl_conflict().
367  *
368  * SMB prevents remove or rename when conflicting locks exist
369  * (unlike NFS, which is why we can't just use nbl_conflict).
370  *
371  * Returns:
372  *	NT_STATUS_SHARING_VIOLATION - nbl_share_conflict
373  *	NT_STATUS_FILE_LOCK_CONFLICT - nbl_lock_conflict
374  *	NT_STATUS_SUCCESS - operation can proceed
375  *
376  * NB: This function used to also check the list of ofiles,
377  * via: smb_lock_range_access() but we _can't_ do that here
378  * due to lock order constraints between node->n_lock_list
379  * and node->vp->vnbllock (taken via nvl_start_crit).
380  * They must be taken in that order, and in here, we
381  * already hold vp->vnbllock.
382  */
383 DWORD
smb_nbl_conflict(smb_node_t * node,uint64_t off,uint64_t len,nbl_op_t op)384 smb_nbl_conflict(smb_node_t *node, uint64_t off, uint64_t len, nbl_op_t op)
385 {
386 	int svmand;
387 
388 	SMB_NODE_VALID(node);
389 	ASSERT(smb_node_in_crit(node));
390 	ASSERT(op == NBL_READ || op == NBL_WRITE || op == NBL_READWRITE ||
391 	    op == NBL_REMOVE || op == NBL_RENAME);
392 
393 	if (smb_node_is_dir(node))
394 		return (NT_STATUS_SUCCESS);
395 
396 	if (nbl_share_conflict(node->vp, op, &smb_ct))
397 		return (NT_STATUS_SHARING_VIOLATION);
398 
399 	/*
400 	 * When checking for lock conflicts, rename and remove
401 	 * are not allowed, so treat those as read/write.
402 	 */
403 	if (op == NBL_RENAME || op == NBL_REMOVE)
404 		op = NBL_READWRITE;
405 
406 	if (nbl_svmand(node->vp, zone_kcred(), &svmand))
407 		svmand = 1;
408 
409 	if (nbl_lock_conflict(node->vp, op, off, len, svmand, &smb_ct))
410 		return (NT_STATUS_FILE_LOCK_CONFLICT);
411 
412 	return (NT_STATUS_SUCCESS);
413 }
414 
415 /*
416  * smb_lock_posix_unlock
417  *
418  * checks if the current unlock request is in another lock and repeatedly calls
419  * smb_is_range_unlocked on a sliding basis to unlock all bits of the lock
420  * that are not in other locks
421  *
422  */
423 static void
smb_lock_posix_unlock(smb_node_t * node,smb_lock_t * lock,cred_t * cr)424 smb_lock_posix_unlock(smb_node_t *node, smb_lock_t *lock, cred_t *cr)
425 {
426 	uint64_t	new_mark;
427 	uint64_t	unlock_start;
428 	uint64_t	unlock_end;
429 	smb_lock_t	new_unlock;
430 	smb_llist_t	*llist;
431 	boolean_t	can_unlock;
432 
433 	new_mark = 0;
434 	unlock_start = lock->l_start;
435 	unlock_end = unlock_start + lock->l_length;
436 	llist = &node->n_lock_list;
437 
438 	for (;;) {
439 		can_unlock = smb_is_range_unlocked(unlock_start, unlock_end,
440 		    lock->l_file->f_uniqid, llist, &new_mark);
441 		if (can_unlock) {
442 			if (new_mark) {
443 				new_unlock = *lock;
444 				new_unlock.l_start = unlock_start;
445 				new_unlock.l_length = new_mark - unlock_start;
446 				(void) smb_fsop_frlock(node, &new_unlock,
447 				    B_TRUE, cr);
448 				unlock_start = new_mark;
449 			} else {
450 				new_unlock = *lock;
451 				new_unlock.l_start = unlock_start;
452 				new_unlock.l_length = unlock_end - unlock_start;
453 				(void) smb_fsop_frlock(node, &new_unlock,
454 				    B_TRUE, cr);
455 				break;
456 			}
457 		} else if (new_mark) {
458 			unlock_start = new_mark;
459 		} else {
460 			break;
461 		}
462 	}
463 }
464 
465 /*
466  * smb_lock_range_overlap
467  *
468  * Checks if lock range(start, length) overlaps range in lock structure.
469  *
470  * Zero-length byte range locks actually affect no single byte of the stream,
471  * meaning they can still be accessed even with such locks in place. However,
472  * they do conflict with other ranges in the following manner:
473  *  conflict will only exist if the positive-length range contains the
474  *  zero-length range's offset but doesn't start at it
475  *
476  * return values:
477  *	0 - Lock range doesn't overlap
478  *	1 - Lock range overlaps.
479  */
480 
481 #define	RANGE_NO_OVERLAP	0
482 #define	RANGE_OVERLAP		1
483 
484 static int
smb_lock_range_overlap(struct smb_lock * lock,uint64_t start,uint64_t length)485 smb_lock_range_overlap(struct smb_lock *lock, uint64_t start, uint64_t length)
486 {
487 	if (length == 0) {
488 		if ((lock->l_start < start) &&
489 		    ((lock->l_start + lock->l_length) > start))
490 			return (RANGE_OVERLAP);
491 
492 		return (RANGE_NO_OVERLAP);
493 	}
494 
495 	/* The following test is intended to catch roll over locks. */
496 	if ((start == lock->l_start) && (length == lock->l_length))
497 		return (RANGE_OVERLAP);
498 
499 	if (start < lock->l_start) {
500 		if (start + length > lock->l_start)
501 			return (RANGE_OVERLAP);
502 	} else if (start < lock->l_start + lock->l_length)
503 		return (RANGE_OVERLAP);
504 
505 	return (RANGE_NO_OVERLAP);
506 }
507 
508 /*
509  * smb_lock_range_lckrules
510  *
511  * Lock range rules:
512  *	1. Overlapping read locks are allowed if the
513  *	   current locks in the region are only read locks
514  *	   irrespective of pid of smb client issuing lock request.
515  *
516  *	2. Read lock in the overlapped region of write lock
517  *	   are allowed if the pervious lock is performed by the
518  *	   same pid and connection.
519  *
520  * return status:
521  *	NT_STATUS_SUCCESS - Input lock range adapts to lock rules.
522  *	NT_STATUS_LOCK_NOT_GRANTED - Input lock conflicts lock rules.
523  *	NT_STATUS_CANCELLED - Error in processing lock rules
524  */
525 static uint32_t
smb_lock_range_lckrules(smb_request_t * sr,smb_ofile_t * file,smb_node_t * node,smb_lock_t * dlock,smb_lock_t ** clockp)526 smb_lock_range_lckrules(
527     smb_request_t	*sr,
528     smb_ofile_t		*file,
529     smb_node_t		*node,
530     smb_lock_t		*dlock,
531     smb_lock_t		**clockp)
532 {
533 	smb_lock_t	*lock;
534 	uint32_t	status = NT_STATUS_SUCCESS;
535 
536 	/* Check if file is closed */
537 	if (!smb_ofile_is_open(file)) {
538 		return (NT_STATUS_RANGE_NOT_LOCKED);
539 	}
540 
541 	/* Caller must hold lock for node->n_lock_list */
542 	for (lock = smb_llist_head(&node->n_lock_list);
543 	    lock != NULL;
544 	    lock = smb_llist_next(&node->n_lock_list, lock)) {
545 
546 		if (!smb_lock_range_overlap(lock, dlock->l_start,
547 		    dlock->l_length))
548 			continue;
549 
550 		/*
551 		 * Check to see if lock in the overlapping record
552 		 * is only read lock. Current finding is read
553 		 * locks can overlapped irrespective of pids.
554 		 */
555 		if ((lock->l_type == SMB_LOCK_TYPE_READONLY) &&
556 		    (dlock->l_type == SMB_LOCK_TYPE_READONLY)) {
557 			continue;
558 		}
559 
560 		/*
561 		 * When the read lock overlaps write lock, check if
562 		 * allowed.
563 		 */
564 		if ((dlock->l_type == SMB_LOCK_TYPE_READONLY) &&
565 		    !(lock->l_type == SMB_LOCK_TYPE_READONLY)) {
566 			if (lock->l_file == sr->fid_ofile &&
567 			    lock->l_session_kid == sr->session->s_kid &&
568 			    lock->l_pid == sr->smb_pid &&
569 			    lock->l_uid == sr->smb_uid) {
570 				continue;
571 			}
572 		}
573 
574 		/* Conflict in overlapping lock element */
575 		*clockp = lock;
576 		status = NT_STATUS_LOCK_NOT_GRANTED;
577 		break;
578 	}
579 
580 	return (status);
581 }
582 
583 /*
584  * smb_lock_wait
585  *
586  * Wait operation for smb overlapping lock to be released.  Caller must hold
587  * write lock for node->n_lock_list so that the set of active locks can't
588  * change unexpectedly.  The lock for node->n_lock_list  will be released
589  * within this function during the sleep after the lock dependency has
590  * been recorded.
591  *
592  * return value
593  *
594  *	0	The request was canceled.
595  *	-1	The timeout was reached.
596  *	>0	Condition met.
597  */
598 static clock_t
smb_lock_wait(smb_request_t * sr,smb_lock_t * b_lock,smb_lock_t * c_lock)599 smb_lock_wait(smb_request_t *sr, smb_lock_t *b_lock, smb_lock_t *c_lock)
600 {
601 	clock_t		rc = 0;
602 
603 	ASSERT(sr->sr_awaiting == NULL);
604 
605 	mutex_enter(&sr->sr_mutex);
606 
607 	switch (sr->sr_state) {
608 	case SMB_REQ_STATE_ACTIVE:
609 		/*
610 		 * Wait up till the timeout time keeping track of actual
611 		 * time waited for possible retry failure.
612 		 */
613 		sr->sr_state = SMB_REQ_STATE_WAITING_LOCK;
614 		sr->sr_awaiting = c_lock;
615 		mutex_exit(&sr->sr_mutex);
616 
617 		mutex_enter(&c_lock->l_mutex);
618 		/*
619 		 * The conflict list (l_conflict_list) for a lock contains
620 		 * all the locks that are blocked by and in conflict with
621 		 * that lock.  Add the new lock to the conflict list for the
622 		 * active lock.
623 		 *
624 		 * l_conflict_list is currently a fancy way of representing
625 		 * the references/dependencies on a lock.  It could be
626 		 * replaced with a reference count but this approach
627 		 * has the advantage that MDB can display the lock
628 		 * dependencies at any point in time.  In the future
629 		 * we should be able to leverage the list to implement
630 		 * an asynchronous locking model.
631 		 *
632 		 * l_blocked_by is the reverse of the conflict list.  It
633 		 * points to the lock that the new lock conflicts with.
634 		 * As currently implemented this value is purely for
635 		 * debug purposes -- there are windows of time when
636 		 * l_blocked_by may be non-NULL even though there is no
637 		 * conflict list
638 		 */
639 		b_lock->l_blocked_by = c_lock;
640 		smb_slist_insert_tail(&c_lock->l_conflict_list, b_lock);
641 		smb_llist_exit(&c_lock->l_file->f_node->n_lock_list);
642 
643 		if (SMB_LOCK_INDEFINITE_WAIT(b_lock)) {
644 			cv_wait(&c_lock->l_cv, &c_lock->l_mutex);
645 		} else {
646 			rc = cv_timedwait(&c_lock->l_cv,
647 			    &c_lock->l_mutex, b_lock->l_end_time);
648 		}
649 
650 		mutex_exit(&c_lock->l_mutex);
651 
652 		smb_llist_enter(&c_lock->l_file->f_node->n_lock_list,
653 		    RW_WRITER);
654 		smb_slist_remove(&c_lock->l_conflict_list, b_lock);
655 
656 		mutex_enter(&sr->sr_mutex);
657 		sr->sr_awaiting = NULL;
658 		if (sr->sr_state == SMB_REQ_STATE_CANCELED) {
659 			rc = 0;
660 		} else {
661 			sr->sr_state = SMB_REQ_STATE_ACTIVE;
662 		}
663 		break;
664 
665 	default:
666 		ASSERT(sr->sr_state == SMB_REQ_STATE_CANCELED);
667 		rc = 0;
668 		break;
669 	}
670 	mutex_exit(&sr->sr_mutex);
671 
672 	return (rc);
673 }
674 
675 /*
676  * smb_lock_range_ulckrules
677  *
678  *	1. Unlock should be performed at exactly matching ends.
679  *	   This has been changed because overlapping ends is
680  *	   allowed and there is no other precise way of locating
681  *	   lock entity in node lock list.
682  *
683  *	2. Unlock is failed if there is no corresponding lock exists.
684  *
685  * Return values
686  *
687  *	NT_STATUS_SUCCESS		Unlock request matches lock record
688  *					pointed by 'nodelock' lock structure.
689  *
690  *	NT_STATUS_RANGE_NOT_LOCKED	Unlock request doen't match any
691  *					of lock record in node lock request or
692  *					error in unlock range processing.
693  */
694 static uint32_t
smb_lock_range_ulckrules(smb_request_t * sr,smb_node_t * node,uint64_t start,uint64_t length,smb_lock_t ** nodelock)695 smb_lock_range_ulckrules(
696     smb_request_t	*sr,
697     smb_node_t		*node,
698     uint64_t		start,
699     uint64_t		length,
700     smb_lock_t		**nodelock)
701 {
702 	smb_lock_t	*lock;
703 	uint32_t	status = NT_STATUS_RANGE_NOT_LOCKED;
704 
705 	/* Caller must hold lock for node->n_lock_list */
706 	for (lock = smb_llist_head(&node->n_lock_list);
707 	    lock != NULL;
708 	    lock = smb_llist_next(&node->n_lock_list, lock)) {
709 
710 		if ((start == lock->l_start) &&
711 		    (length == lock->l_length) &&
712 		    lock->l_file == sr->fid_ofile &&
713 		    lock->l_session_kid == sr->session->s_kid &&
714 		    lock->l_pid == sr->smb_pid &&
715 		    lock->l_uid == sr->smb_uid) {
716 			*nodelock = lock;
717 			status = NT_STATUS_SUCCESS;
718 			break;
719 		}
720 	}
721 
722 	return (status);
723 }
724 
725 static smb_lock_t *
smb_lock_create(smb_request_t * sr,uint64_t start,uint64_t length,uint32_t locktype,uint32_t timeout)726 smb_lock_create(
727     smb_request_t *sr,
728     uint64_t start,
729     uint64_t length,
730     uint32_t locktype,
731     uint32_t timeout)
732 {
733 	smb_lock_t *lock;
734 
735 	ASSERT(locktype == SMB_LOCK_TYPE_READWRITE ||
736 	    locktype == SMB_LOCK_TYPE_READONLY);
737 
738 	lock = kmem_zalloc(sizeof (smb_lock_t), KM_SLEEP);
739 	lock->l_magic = SMB_LOCK_MAGIC;
740 	lock->l_sr = sr; /* Invalid after lock is active */
741 	lock->l_session_kid = sr->session->s_kid;
742 	lock->l_session = sr->session;
743 	lock->l_file = sr->fid_ofile;
744 	lock->l_uid = sr->smb_uid;
745 	lock->l_pid = sr->smb_pid;
746 	lock->l_type = locktype;
747 	lock->l_start = start;
748 	lock->l_length = length;
749 	/*
750 	 * Calculate the absolute end time so that we can use it
751 	 * in cv_timedwait.
752 	 */
753 	lock->l_end_time = ddi_get_lbolt() + MSEC_TO_TICK(timeout);
754 	if (timeout == UINT_MAX)
755 		lock->l_flags |= SMB_LOCK_FLAG_INDEFINITE;
756 
757 	mutex_init(&lock->l_mutex, NULL, MUTEX_DEFAULT, NULL);
758 	cv_init(&lock->l_cv, NULL, CV_DEFAULT, NULL);
759 	smb_slist_constructor(&lock->l_conflict_list, sizeof (smb_lock_t),
760 	    offsetof(smb_lock_t, l_conflict_lnd));
761 
762 	return (lock);
763 }
764 
765 static void
smb_lock_free(smb_lock_t * lock)766 smb_lock_free(smb_lock_t *lock)
767 {
768 	smb_slist_destructor(&lock->l_conflict_list);
769 	cv_destroy(&lock->l_cv);
770 	mutex_destroy(&lock->l_mutex);
771 
772 	kmem_free(lock, sizeof (smb_lock_t));
773 }
774 
775 /*
776  * smb_lock_destroy
777  *
778  * Caller must hold node->n_lock_list
779  */
780 static void
smb_lock_destroy(smb_lock_t * lock)781 smb_lock_destroy(smb_lock_t *lock)
782 {
783 	/*
784 	 * Caller must hold node->n_lock_list lock.
785 	 */
786 	mutex_enter(&lock->l_mutex);
787 	cv_broadcast(&lock->l_cv);
788 	mutex_exit(&lock->l_mutex);
789 
790 	/*
791 	 * The cv_broadcast above should wake up any locks that previous
792 	 * had conflicts with this lock.  Wait for the locking threads
793 	 * to remove their references to this lock.
794 	 */
795 	smb_slist_wait_for_empty(&lock->l_conflict_list);
796 
797 	smb_lock_free(lock);
798 }
799 
800 /*
801  * smb_is_range_unlocked
802  *
803  * Checks if the current unlock byte range request overlaps another lock
804  * This function is used to determine where POSIX unlocks should be
805  * applied.
806  *
807  * The return code and the value of new_mark must be interpreted as
808  * follows:
809  *
810  * B_TRUE and (new_mark == 0):
811  *   This is the last or only lock left to be unlocked
812  *
813  * B_TRUE and (new_mark > 0):
814  *   The range from start to new_mark can be unlocked
815  *
816  * B_FALSE and (new_mark == 0):
817  *   The unlock can't be performed and we are done
818  *
819  * B_FALSE and (new_mark > 0),
820  *   The range from start to new_mark can't be unlocked
821  *   Start should be reset to new_mark for the next pass
822  */
823 
824 static boolean_t
smb_is_range_unlocked(uint64_t start,uint64_t end,uint32_t uniqid,smb_llist_t * llist_head,uint64_t * new_mark)825 smb_is_range_unlocked(uint64_t start, uint64_t end, uint32_t uniqid,
826     smb_llist_t *llist_head, uint64_t *new_mark)
827 {
828 	struct smb_lock *lk = NULL;
829 	uint64_t low_water_mark = MAXOFFSET_T;
830 	uint64_t lk_start;
831 	uint64_t lk_end;
832 
833 	*new_mark = 0;
834 	lk = smb_llist_head(llist_head);
835 	while (lk) {
836 		if (lk->l_length == 0) {
837 			lk = smb_llist_next(llist_head, lk);
838 			continue;
839 		}
840 
841 		if (lk->l_file->f_uniqid != uniqid) {
842 			lk = smb_llist_next(llist_head, lk);
843 			continue;
844 		}
845 
846 		lk_end = lk->l_start + lk->l_length - 1;
847 		lk_start = lk->l_start;
848 
849 		/*
850 		 * there is no overlap for the first 2 cases
851 		 * check next node
852 		 */
853 		if (lk_end < start) {
854 			lk = smb_llist_next(llist_head, lk);
855 			continue;
856 		}
857 		if (lk_start > end) {
858 			lk = smb_llist_next(llist_head, lk);
859 			continue;
860 		}
861 
862 		/* this range is completely locked */
863 		if ((lk_start <= start) && (lk_end >= end)) {
864 			return (B_FALSE);
865 		}
866 
867 		/* the first part of this range is locked */
868 		if ((start >= lk_start) && (start <= lk_end)) {
869 			if (end > lk_end)
870 				*new_mark = lk_end + 1;
871 			return (B_FALSE);
872 		}
873 
874 		/* this piece is unlocked */
875 		if ((lk_start >= start) && (lk_start <= end)) {
876 			if (low_water_mark > lk_start)
877 				low_water_mark  = lk_start;
878 		}
879 
880 		lk = smb_llist_next(llist_head, lk);
881 	}
882 
883 	if (low_water_mark != MAXOFFSET_T) {
884 		*new_mark = low_water_mark;
885 		return (B_TRUE);
886 	}
887 	/* the range is completely unlocked */
888 	return (B_TRUE);
889 }
890