xref: /illumos-gate/usr/src/uts/common/fs/smbsrv/smb_srv_oplock.c (revision b8052df9f609edb713f6828c9eecc3d7be19dfb3)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2021 Tintri by DDN, Inc. All rights reserved.
14  * Copyright 2021 RackTop Systems, Inc.
15  */
16 
17 /*
18  * (SMB1/SMB2) Server-level Oplock support.
19  *
20  * Conceptually, this is a separate layer on top of the
21  * file system (FS) layer oplock code in smb_cmn_oplock.c.
22  * If these layers were more distinct, the FS layer would
23  * need to use call-back functions (installed from here)
24  * to "indicate an oplock break to the server" (see below).
25  * As these layers are all in the same kernel module, the
26  * delivery of these break indications just uses a direct
27  * function call to smb_oplock_ind_break() below.
28  *
29  * This layer is responsible for handling the break indication,
30  * which often requires scheduling a taskq job in the server,
31  * and sending an oplock break mesage to the client using
32  * the appropriate protocol for the open handle affected.
33  *
34  * The details of composing an oplock break message, the
35  * protocol-specific details of requesting an oplock, and
36  * returning that oplock to the client are in the files:
37  *  smb_oplock.c, smb2_oplock.c, smb2_lease.c
38  */
39 
40 #include <smbsrv/smb2_kproto.h>
41 #include <smbsrv/smb_oplock.h>
42 
43 /*
44  * Verify relationship between BREAK_TO_... and CACHE bits,
45  * used when setting the BREAK_TO_... below.
46  */
47 #if BREAK_TO_READ_CACHING != (READ_CACHING << BREAK_SHIFT)
48 #error "BREAK_TO_READ_CACHING"
49 #endif
50 #if BREAK_TO_HANDLE_CACHING != (HANDLE_CACHING << BREAK_SHIFT)
51 #error "BREAK_TO_HANDLE_CACHING"
52 #endif
53 #if BREAK_TO_WRITE_CACHING != (WRITE_CACHING << BREAK_SHIFT)
54 #error "BREAK_TO_WRITE_CACHING"
55 #endif
56 #define	CACHE_RWH (READ_CACHING | WRITE_CACHING | HANDLE_CACHING)
57 
58 /*
59  * This is the timeout used in the thread that sends an
60  * oplock break and waits for the client to respond
61  * before it breaks the oplock locally.
62  */
63 int smb_oplock_timeout_ack = 30000; /* mSec. */
64 
65 /*
66  * This is the timeout used in threads that have just
67  * finished some sort of oplock request and now must
68  * wait for (possibly multiple) breaks to complete.
69  * This value must be at least a couple seconds LONGER
70  * than the ack timeout above so that I/O callers won't
71  * give up waiting before the local ack timeout.
72  */
73 int smb_oplock_timeout_def = 45000; /* mSec. */
74 
75 static void smb_oplock_async_break(void *);
76 static void smb_oplock_hdl_clear(smb_ofile_t *);
77 static void smb_oplock_wait_break_cancel(smb_request_t *sr);
78 
79 
80 /*
81  * 2.1.5.17.3 Indicating an Oplock Break to the Server
82  *
83  * The inputs for indicating an oplock break to the server are:
84  *
85  *	BreakingOplockOpen: The Open used to request the oplock
86  *	  that is now breaking.
87  *	 NewOplockLevel: The type of oplock the requested oplock
88  *	  has been broken to.  Valid values are as follows:
89  *		LEVEL_NONE (that is, no oplock)
90  *		LEVEL_TWO
91  *		A combination of one or more of the following flags:
92  *			READ_CACHING
93  *			HANDLE_CACHING
94  *			WRITE_CACHING
95  *	AcknowledgeRequired: A Boolean value; TRUE if the server
96  *	  MUST acknowledge the oplock break, FALSE if not,
97  *	  as specified in section 2.1.5.18.
98  *	OplockCompletionStatus: The NTSTATUS code to return to the server.
99  *
100  * This algorithm simply represents the completion of an oplock request,
101  * as specified in section 2.1.5.17.1 or section 2.1.5.17.2. The server
102  * is expected to associate the return status from this algorithm with
103  * BreakingOplockOpen, which is the Open passed in when it requested
104  * the oplock that is now breaking.
105  *
106  * It is important to note that because several oplocks can be outstanding
107  * in parallel, although this algorithm represents the completion of an
108  * oplock request, it might not result in the completion of the algorithm
109  * that called it. In particular, calling this algorithm will result in
110  * completion of the caller only if BreakingOplockOpen is the same as the
111  * Open with which the calling algorithm was itself called. To mitigate
112  * confusion, each algorithm that refers to this section will specify
113  * whether that algorithm's operation terminates at that point or not.
114  *
115  * The object store MUST return OplockCompletionStatus,
116  * AcknowledgeRequired, and NewOplockLevel to the server (the algorithm is
117  * as specified in section 2.1.5.17.1 and section 2.1.5.17.2).
118  *
119  * Implementation:
120  *
121  * We use two versions of this function:
122  *	smb_oplock_ind_break_in_ack
123  *	smb_oplock_ind_break
124  *
125  * The first is used when we're handling an Oplock Break Ack.
126  * The second is used when other operations cause a break,
127  * generally in one of the smb_oplock_break_... functions.
128  *
129  * Note that these are call-back functions that may be called with the
130  * node ofile list rwlock held and the node oplock mutex entered, so
131  * these should ONLY schedule oplock break work, and MUST NOT attempt
132  * any actions that might require either of those locks.
133  */
134 
135 /*
136  * smb_oplock_ind_break_in_ack
137  *
138  * Variant of smb_oplock_ind_break() for the oplock Ack handler.
139  * When we need to indicate another oplock break from within the
140  * Ack handler (during the Ack. of some previous oplock break)
141  * we need to make sure this new break indication goes out only
142  * AFTER the reply to the current break ack. is sent out.
143  *
144  * In this case, we always have an SR (the break ack) so we can
145  * append the "ind break" work to the current SR and let the
146  * request hander thread do this work after the reply is sent.
147  * Note: this is always an SMB2 or later request, because this
148  * only happens for "granular" oplocks, which are SMB2-only.
149  *
150  * This is mostly the same as smb_oplock_ind_break() except:
151  * - The only CompletionStatus possible is STATUS_CANT_GRANT.
152  * - Instead of taskq_dispatch this appends the new SR to
153  *   the "post work" queue on the current SR.
154  *
155  * Note called with the node ofile list rwlock held and
156  * the oplock mutex entered.
157  */
158 void
159 smb_oplock_ind_break_in_ack(smb_request_t *ack_sr, smb_ofile_t *ofile,
160     uint32_t NewLevel, boolean_t AckRequired)
161 {
162 	smb_request_t *new_sr;
163 
164 	/*
165 	 * This should happen only with SMB2 or later,
166 	 * but in case that ever changes...
167 	 */
168 	if (ack_sr->session->dialect < SMB_VERS_2_BASE) {
169 		smb_oplock_ind_break(ofile, NewLevel,
170 		    AckRequired, STATUS_CANT_GRANT);
171 		return;
172 	}
173 
174 	/*
175 	 * We're going to schedule a request that will have a
176 	 * reference to this ofile. Get the hold first.
177 	 */
178 	if (ofile->f_oplock_closing ||
179 	    !smb_ofile_hold_olbrk(ofile)) {
180 		/* It's closing (or whatever).  Nothing to do. */
181 		return;
182 	}
183 
184 	/*
185 	 * When called from Ack processing, we want to use a
186 	 * request on the session doing the ack.  If we can't
187 	 * allocate a request on that session (because it's
188 	 * now disconnecting) just fall-back to the normal
189 	 * oplock break code path which deals with that.
190 	 * Once we have a request on the ack session, that
191 	 * session won't go away until the request is done.
192 	 */
193 	new_sr = smb_request_alloc(ack_sr->session, 0);
194 	if (new_sr == NULL) {
195 		smb_oplock_ind_break(ofile, NewLevel,
196 		    AckRequired, STATUS_CANT_GRANT);
197 		smb_ofile_release(ofile);
198 		return;
199 	}
200 
201 	new_sr->sr_state = SMB_REQ_STATE_SUBMITTED;
202 	new_sr->smb2_async = B_TRUE;
203 	new_sr->user_cr = zone_kcred();
204 	new_sr->fid_ofile = ofile;
205 	if (ofile->f_tree != NULL) {
206 		new_sr->tid_tree = ofile->f_tree;
207 		smb_tree_hold_internal(ofile->f_tree);
208 	}
209 	if (ofile->f_user != NULL) {
210 		new_sr->uid_user = ofile->f_user;
211 		smb_user_hold_internal(ofile->f_user);
212 	}
213 	new_sr->arg.olbrk.NewLevel = NewLevel;
214 	new_sr->arg.olbrk.AckRequired = AckRequired;
215 
216 	/*
217 	 * Using smb2_cmd_code to indicate what to call.
218 	 * work func. will call smb_oplock_send_brk
219 	 */
220 	new_sr->smb2_cmd_code = SMB2_OPLOCK_BREAK;
221 	smb2sr_append_postwork(ack_sr, new_sr);
222 }
223 
224 /*
225  * smb_oplock_ind_break
226  *
227  * This is the function described in [MS-FSA] 2.1.5.17.3
228  * which is called many places in the oplock break code.
229  *
230  * Schedule a request & taskq job to do oplock break work
231  * as requested by the FS-level code (smb_cmn_oplock.c).
232  *
233  * Note called with the node ofile list rwlock held and
234  * the oplock mutex entered.
235  */
236 void
237 smb_oplock_ind_break(smb_ofile_t *ofile, uint32_t NewLevel,
238     boolean_t AckRequired, uint32_t CompletionStatus)
239 {
240 	smb_server_t *sv = ofile->f_server;
241 	smb_node_t *node = ofile->f_node;
242 	smb_request_t *sr = NULL;
243 
244 	/*
245 	 * See notes at smb_oplock_async_break re. CompletionStatus
246 	 * Check for any invalid codes here, so assert happens in
247 	 * the thread passing an unexpected value.
248 	 * The real work happens in a taskq job.
249 	 */
250 	switch (CompletionStatus) {
251 
252 	case NT_STATUS_SUCCESS:
253 	case STATUS_CANT_GRANT:
254 		/* Send break via taskq job. */
255 		break;
256 
257 	case STATUS_NEW_HANDLE:
258 		/* nothing to do (keep for observability) */
259 		return;
260 
261 	case NT_STATUS_OPLOCK_HANDLE_CLOSED:
262 		smb_oplock_hdl_clear(ofile);
263 		return;
264 
265 	default:
266 		ASSERT(0);
267 		return;
268 	}
269 
270 	/*
271 	 * We're going to schedule a request that will have a
272 	 * reference to this ofile. Get the hold first.
273 	 */
274 	if (ofile->f_oplock_closing ||
275 	    !smb_ofile_hold_olbrk(ofile)) {
276 		/* It's closing (or whatever).  Nothing to do. */
277 		return;
278 	}
279 
280 	/*
281 	 * We need a request allocated on the session that owns
282 	 * this ofile in order to safely send on that session.
283 	 *
284 	 * Note that while we hold a ref. on the ofile, it's
285 	 * f_session will not change.  An ofile in state
286 	 * _ORPHANED will have f_session == NULL, but the
287 	 * f_session won't _change_ while we have a ref,
288 	 * and won't be torn down under our feet.
289 	 * Same for f_tree and f_user
290 	 *
291 	 * If f_session is NULL, or it's in a state that doesn't
292 	 * allow new requests, use the special "server" session.
293 	 */
294 	if (ofile->f_session != NULL)
295 		sr = smb_request_alloc(ofile->f_session, 0);
296 	if (sr == NULL)
297 		sr = smb_request_alloc(sv->sv_session, 0);
298 	if (sr == NULL) {
299 		/*
300 		 * Server must be shutting down.  We took a
301 		 * hold on the ofile that must be released,
302 		 * but we can't release here because we're
303 		 * called with the node ofile list entered.
304 		 * See smb_ofile_release_LL.
305 		 */
306 		smb_llist_post(&node->n_ofile_list, ofile,
307 		    smb_ofile_release_LL);
308 		return;
309 	}
310 
311 	sr->sr_state = SMB_REQ_STATE_SUBMITTED;
312 	sr->smb2_async = B_TRUE;
313 	sr->user_cr = zone_kcred();
314 	sr->fid_ofile = ofile;
315 	if (ofile->f_tree != NULL) {
316 		sr->tid_tree = ofile->f_tree;
317 		smb_tree_hold_internal(sr->tid_tree);
318 	}
319 	if (ofile->f_user != NULL) {
320 		sr->uid_user = ofile->f_user;
321 		smb_user_hold_internal(sr->uid_user);
322 	}
323 	sr->arg.olbrk.NewLevel = NewLevel;
324 	sr->arg.olbrk.AckRequired = AckRequired;
325 	sr->smb2_status = CompletionStatus;
326 
327 	(void) taskq_dispatch(
328 	    sv->sv_worker_pool,
329 	    smb_oplock_async_break, sr, TQ_SLEEP);
330 }
331 
332 /*
333  * smb_oplock_async_break
334  *
335  * Called via the taskq to handle an asynchronous oplock break.
336  * We have a hold on the ofile, which will be released in
337  * smb_request_free (via sr->fid_ofile)
338  *
339  * Note we have: sr->uid_user == NULL, sr->tid_tree == NULL.
340  * Nothing called here needs those.
341  *
342  * Note that NewLevel as provided by the FS up-call does NOT
343  * include the GRANULAR flag.  The SMB level is expected to
344  * keep track of how each oplock was acquired (by lease or
345  * traditional oplock request) and put the GRANULAR flag
346  * back into the oplock state when calling down to the
347  * FS-level code.  Also note that the lease break message
348  * carries only the cache flags, not the GRANULAR flag.
349  */
350 static void
351 smb_oplock_async_break(void *arg)
352 {
353 	smb_request_t	*sr = arg;
354 	uint32_t	CompletionStatus;
355 
356 	SMB_REQ_VALID(sr);
357 
358 	CompletionStatus = sr->smb2_status;
359 	sr->smb2_status = NT_STATUS_SUCCESS;
360 
361 	mutex_enter(&sr->sr_mutex);
362 	sr->sr_worker = curthread;
363 	sr->sr_state = SMB_REQ_STATE_ACTIVE;
364 	mutex_exit(&sr->sr_mutex);
365 
366 	/*
367 	 * Note that the CompletionStatus from the FS level
368 	 * (smb_cmn_oplock.c) encodes what kind of action we
369 	 * need to take at the SMB level.
370 	 */
371 	switch (CompletionStatus) {
372 
373 	case STATUS_CANT_GRANT:
374 	case NT_STATUS_SUCCESS:
375 		smb_oplock_send_brk(sr);
376 		break;
377 
378 	default:
379 		/* Checked by caller. */
380 		ASSERT(0);
381 		break;
382 	}
383 
384 	if (sr->dh_nvl_dirty) {
385 		sr->dh_nvl_dirty = B_FALSE;
386 		smb2_dh_update_nvfile(sr);
387 	}
388 
389 	sr->sr_state = SMB_REQ_STATE_COMPLETED;
390 	smb_request_free(sr);
391 }
392 
393 static void
394 smb_oplock_update(smb_request_t *sr, smb_ofile_t *ofile, uint32_t NewLevel)
395 {
396 	if (ofile->f_lease != NULL)
397 		ofile->f_lease->ls_state = NewLevel & CACHE_RWH;
398 	else
399 		ofile->f_oplock.og_state = NewLevel;
400 
401 	if (ofile->dh_persist) {
402 		smb2_dh_update_oplock(sr, ofile);
403 	}
404 }
405 
406 #ifdef DEBUG
407 int smb_oplock_debug_wait = 0;
408 #endif
409 
410 /*
411  * Send an oplock break over the wire, or if we can't,
412  * then process the oplock break locally.
413  *
414  * Note that we have sr->fid_ofile here but all the other
415  * normal sr members may be NULL:  uid_user, tid_tree.
416  * Also sr->session may or may not be the same session as
417  * the ofile came from (ofile->f_session) depending on
418  * whether this is a "live" open or an orphaned DH,
419  * where ofile->f_session will be NULL.
420  *
421  * Given that we don't always have a session, we determine
422  * the oplock type (lease etc) from f_oplock.og_dialect.
423  */
424 void
425 smb_oplock_send_brk(smb_request_t *sr)
426 {
427 	smb_ofile_t	*ofile;
428 	smb_lease_t	*lease;
429 	uint32_t	NewLevel;
430 	boolean_t	AckReq;
431 	uint32_t	status;
432 	int		rc;
433 
434 	ofile = sr->fid_ofile;
435 	NewLevel = sr->arg.olbrk.NewLevel;
436 	AckReq = sr->arg.olbrk.AckRequired;
437 	lease = ofile->f_lease;
438 
439 	/*
440 	 * Build the break message in sr->reply.
441 	 * It's free'd in smb_request_free().
442 	 * Also updates the lease and NewLevel.
443 	 */
444 	sr->reply.max_bytes = MLEN;
445 	if (lease != NULL) {
446 		/*
447 		 * The ofile has as lease.  Must be SMB2+
448 		 * Oplock state has changed, so update the epoch.
449 		 */
450 		mutex_enter(&lease->ls_mutex);
451 		lease->ls_epoch++;
452 		mutex_exit(&lease->ls_mutex);
453 
454 		/* Note, needs "old" state in ls_state */
455 		smb2_lease_break_notification(sr,
456 		    (NewLevel & CACHE_RWH), AckReq);
457 		NewLevel |= OPLOCK_LEVEL_GRANULAR;
458 	} else if (ofile->f_oplock.og_dialect >= SMB_VERS_2_BASE) {
459 		/*
460 		 * SMB2 using old-style oplock (no lease)
461 		 */
462 		smb2_oplock_break_notification(sr, NewLevel);
463 	} else {
464 		/*
465 		 * SMB1 clients should only get Level II oplocks if they
466 		 * set the capability indicating they know about them.
467 		 */
468 		if (NewLevel == OPLOCK_LEVEL_TWO &&
469 		    ofile->f_oplock.og_dialect < NT_LM_0_12)
470 			NewLevel = OPLOCK_LEVEL_NONE;
471 		smb1_oplock_break_notification(sr, NewLevel);
472 	}
473 
474 	/*
475 	 * Keep track of what we last sent to the client,
476 	 * preserving the GRANULAR flag (if a lease).
477 	 * If we're expecting an ACK, set og_breaking
478 	 * (or maybe lease->ls_breaking) so we can
479 	 * filter unsolicited ACKs.
480 	 */
481 	if (AckReq) {
482 		uint32_t BreakTo;
483 
484 		if (lease != NULL) {
485 			BreakTo = (NewLevel & CACHE_RWH) << BREAK_SHIFT;
486 			if (BreakTo == 0)
487 				BreakTo = BREAK_TO_NO_CACHING;
488 			lease->ls_breaking = BreakTo;
489 		} else {
490 			if ((NewLevel & LEVEL_TWO_OPLOCK) != 0)
491 				BreakTo = BREAK_TO_TWO;
492 			else
493 				BreakTo = BREAK_TO_NONE;
494 			ofile->f_oplock.og_breaking = BreakTo;
495 		}
496 		/* Will update ls/og_state in ack. */
497 	} else {
498 		smb_oplock_update(sr, ofile, NewLevel);
499 	}
500 
501 	/*
502 	 * Try to send the break message to the client.
503 	 * When we get to multi-channel, this is supposed to
504 	 * try to send on every channel before giving up.
505 	 */
506 	if (sr->session == ofile->f_session)
507 		rc = smb_session_send(sr->session, 0, &sr->reply);
508 	else
509 		rc = ENOTCONN;
510 
511 	if (rc == 0) {
512 		/*
513 		 * OK, we were able to send the break message.
514 		 * If no ack. required, we're done.
515 		 */
516 		if (!AckReq)
517 			return;
518 
519 		/*
520 		 * We're expecting an ACK.  Wait in this thread
521 		 * so we can log clients that don't respond.
522 		 *
523 		 * If debugging, may want to break after a
524 		 * short wait to look into why we might be
525 		 * holding up progress.  (i.e. locks?)
526 		 */
527 #ifdef DEBUG
528 		if (smb_oplock_debug_wait > 0) {
529 			status = smb_oplock_wait_break(sr, ofile->f_node,
530 			    smb_oplock_debug_wait);
531 			if (status == 0)
532 				return;
533 			cmn_err(CE_NOTE, "clnt %s oplock break wait debug",
534 			    sr->session->ip_addr_str);
535 			debug_enter("oplock_wait");
536 		}
537 #endif
538 		status = smb_oplock_wait_break(sr, ofile->f_node,
539 		    smb_oplock_timeout_ack);
540 		if (status == 0)
541 			return;
542 
543 		cmn_err(CE_NOTE, "clnt %s oplock break timeout",
544 		    sr->session->ip_addr_str);
545 		DTRACE_PROBE1(break_timeout, smb_ofile_t, ofile);
546 
547 		/*
548 		 * Will do local ack below.  Note, after timeout,
549 		 * do a break to none or "no caching" regardless
550 		 * of what the passed in cache level was.
551 		 * That means: clear all except GRANULAR.
552 		 */
553 		NewLevel &= OPLOCK_LEVEL_GRANULAR;
554 	} else {
555 		/*
556 		 * We were unable to send the oplock break request.
557 		 * Generally, that means we have no connection to this
558 		 * client right now, and this ofile will have state
559 		 * SMB_OFILE_STATE_ORPHANED.  We either close the handle
560 		 * or break the oplock locally, in which case the client
561 		 * gets the updated oplock state when they reconnect.
562 		 * Decide whether to keep or close.
563 		 *
564 		 * Relevant [MS-SMB2] sections:
565 		 *
566 		 * 3.3.4.6 Object Store Indicates an Oplock Break
567 		 * If Open.Connection is NULL, Open.IsResilient is FALSE,
568 		 * Open.IsDurable is FALSE and Open.IsPersistent is FALSE,
569 		 * the server SHOULD close the Open as specified in...
570 		 *
571 		 * 3.3.4.7 Object Store Indicates a Lease Break
572 		 * If Open.Connection is NULL, the server MUST close the
573 		 * Open as specified in ... for the following cases:
574 		 * - Open.IsResilient is FALSE, Open.IsDurable is FALSE,
575 		 *   and Open.IsPersistent is FALSE.
576 		 * - Lease.BreakToLeaseState does not contain
577 		 *   ...HANDLE_CACHING and Open.IsDurable is TRUE.
578 		 * If Lease.LeaseOpens is empty, (... local ack to "none").
579 		 */
580 
581 		/*
582 		 * See similar logic in smb_dh_should_save
583 		 */
584 		switch (ofile->dh_vers) {
585 		case SMB2_RESILIENT:
586 			break;			/* keep DH */
587 
588 		case SMB2_DURABLE_V2:
589 			if (ofile->dh_persist)
590 				break;		/* keep DH */
591 			/* FALLTHROUGH */
592 		case SMB2_DURABLE_V1:
593 			/* IS durable (v1 or v2) */
594 			if ((NewLevel & (OPLOCK_LEVEL_BATCH |
595 			    OPLOCK_LEVEL_CACHE_HANDLE)) != 0)
596 				break;		/* keep DH */
597 			/* FALLTHROUGH */
598 		case SMB2_NOT_DURABLE:
599 		default:
600 			smb_ofile_close(ofile, 0);
601 			return;
602 		}
603 		/* Keep this ofile (durable handle). */
604 
605 		if (!AckReq) {
606 			/* Nothing more to do. */
607 			return;
608 		}
609 	}
610 
611 	/*
612 	 * We get here after either an oplock break ack timeout,
613 	 * or a send failure for a durable handle type that we
614 	 * preserve rather than just close.  Do local ack.
615 	 */
616 	if (lease != NULL)
617 		lease->ls_breaking = 0;
618 	else
619 		ofile->f_oplock.og_breaking = 0;
620 
621 	status = smb_oplock_ack_break(sr, ofile, &NewLevel);
622 	if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
623 		/* Not expecting this status return. */
624 		cmn_err(CE_NOTE, "clnt local oplock ack wait?");
625 		(void) smb_oplock_wait_break(sr, ofile->f_node,
626 		    smb_oplock_timeout_ack);
627 		status = 0;
628 	}
629 	if (status != 0) {
630 		cmn_err(CE_NOTE, "clnt local oplock ack, "
631 		    "status=0x%x", status);
632 	}
633 
634 	/* Update ls/og_state as if we heard from the client. */
635 	smb_oplock_update(sr, ofile, NewLevel);
636 }
637 
638 /*
639  * See: NT_STATUS_OPLOCK_HANDLE_CLOSED above and
640  * smb_ofile_close, smb_oplock_break_CLOSE.
641  *
642  * The FS-level oplock layer calls this to update the
643  * SMB-level state when a handle loses its oplock.
644  */
645 static void
646 smb_oplock_hdl_clear(smb_ofile_t *ofile)
647 {
648 	smb_lease_t *lease = ofile->f_lease;
649 
650 	if (lease != NULL) {
651 		if (lease->ls_oplock_ofile == ofile) {
652 			/*
653 			 * smb2_lease_ofile_close should have
654 			 * moved the oplock to another ofile.
655 			 */
656 			ASSERT(0);
657 			lease->ls_oplock_ofile = NULL;
658 		}
659 	}
660 	ofile->f_oplock.og_state = 0;
661 	ofile->f_oplock.og_breaking = 0;
662 }
663 
664 /*
665  * Called by smb_request_cancel() via sr->cancel_method
666  * Arg is the smb_node_t with the breaking oplock.
667  */
668 static void
669 smb_oplock_wait_break_cancel(smb_request_t *sr)
670 {
671 	smb_node_t   *node = sr->cancel_arg2;
672 	smb_oplock_t *ol;
673 
674 	SMB_NODE_VALID(node);
675 	ol = &node->n_oplock;
676 
677 	mutex_enter(&ol->ol_mutex);
678 	cv_broadcast(&ol->WaitingOpenCV);
679 	mutex_exit(&ol->ol_mutex);
680 }
681 
682 /*
683  * Wait up to "timeout" mSec. for the current oplock "breaking" flags
684  * to be cleared (by smb_oplock_ack_break or smb_oplock_break_CLOSE).
685  *
686  * Callers of the above public oplock functions:
687  *	smb_oplock_request()
688  *	smb_oplock_ack_break()
689  *	smb_oplock_break_OPEN() ...
690  * check for return status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS
691  * and call this function to wait for the break to complete.
692  *
693  * Most callers should use this default timeout, which they get
694  * by passing zero as the timeout arg.  This include places where
695  * we're about to do something that invalidates some cache.
696  */
697 uint32_t
698 smb_oplock_wait_break(smb_request_t *sr, smb_node_t *node, int timeout)
699 {
700 	smb_oplock_t	*ol;
701 	clock_t		time, rv;
702 	uint32_t	status = 0;
703 	smb_req_state_t  srstate;
704 
705 	SMB_NODE_VALID(node);
706 	ol = &node->n_oplock;
707 
708 	if (timeout == 0)
709 		timeout = smb_oplock_timeout_def;
710 	time = MSEC_TO_TICK(timeout) + ddi_get_lbolt();
711 
712 	mutex_enter(&sr->sr_mutex);
713 	if (sr->sr_state != SMB_REQ_STATE_ACTIVE) {
714 		mutex_exit(&sr->sr_mutex);
715 		return (NT_STATUS_CANCELLED);
716 	}
717 	sr->sr_state = SMB_REQ_STATE_WAITING_OLBRK;
718 	sr->cancel_method = smb_oplock_wait_break_cancel;
719 	sr->cancel_arg2 = node;
720 	mutex_exit(&sr->sr_mutex);
721 
722 	mutex_enter(&ol->ol_mutex);
723 	while ((ol->ol_state & BREAK_ANY) != 0) {
724 		ol->waiters++;
725 		rv = cv_timedwait(&ol->WaitingOpenCV,
726 		    &ol->ol_mutex, time);
727 		ol->waiters--;
728 		if (rv < 0) {
729 			/* cv_timewait timeout */
730 			status = NT_STATUS_CANNOT_BREAK_OPLOCK;
731 			break;
732 		}
733 
734 		/*
735 		 * Check if we were woken by smb_request_cancel,
736 		 * which sets state SMB_REQ_STATE_CANCEL_PENDING
737 		 * and signals WaitingOpenCV.
738 		 */
739 		mutex_enter(&sr->sr_mutex);
740 		srstate = sr->sr_state;
741 		mutex_exit(&sr->sr_mutex);
742 		if (srstate != SMB_REQ_STATE_WAITING_OLBRK) {
743 			break;
744 		}
745 	}
746 
747 	mutex_exit(&ol->ol_mutex);
748 
749 	mutex_enter(&sr->sr_mutex);
750 	sr->cancel_method = NULL;
751 	sr->cancel_arg2 = NULL;
752 	switch (sr->sr_state) {
753 	case SMB_REQ_STATE_WAITING_OLBRK:
754 		sr->sr_state = SMB_REQ_STATE_ACTIVE;
755 		/* status from above */
756 		break;
757 	case SMB_REQ_STATE_CANCEL_PENDING:
758 		sr->sr_state = SMB_REQ_STATE_CANCELLED;
759 		status = NT_STATUS_CANCELLED;
760 		break;
761 	default:
762 		status = NT_STATUS_INTERNAL_ERROR;
763 		break;
764 	}
765 	mutex_exit(&sr->sr_mutex);
766 
767 	return (status);
768 }
769 
770 /*
771  * Simplified version used in smb_fem.c, like above,
772  * but no smb_request_cancel stuff.
773  */
774 uint32_t
775 smb_oplock_wait_break_fem(smb_node_t *node, int timeout)  /* mSec. */
776 {
777 	smb_oplock_t	*ol;
778 	clock_t		time, rv;
779 	uint32_t	status = 0;
780 
781 	if (timeout == 0)
782 		timeout = smb_oplock_timeout_def;
783 
784 	SMB_NODE_VALID(node);
785 	ol = &node->n_oplock;
786 
787 	mutex_enter(&ol->ol_mutex);
788 	time = MSEC_TO_TICK(timeout) + ddi_get_lbolt();
789 
790 	while ((ol->ol_state & BREAK_ANY) != 0) {
791 		ol->waiters++;
792 		rv = cv_timedwait(&ol->WaitingOpenCV,
793 		    &ol->ol_mutex, time);
794 		ol->waiters--;
795 		if (rv < 0) {
796 			status = NT_STATUS_CANNOT_BREAK_OPLOCK;
797 			break;
798 		}
799 	}
800 
801 	mutex_exit(&ol->ol_mutex);
802 
803 	return (status);
804 }
805