xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c (revision 20a7641f9918de8574b8b3b47dbe35c4bfc78df1)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  *
39  * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
40  * Copyright 2019 Nexenta Systems, Inc.  All rights reserved.
41  */
42 
43 #ifdef DEBUG
44 /* See sys/queue.h */
45 #define	QUEUEDEBUG 1
46 #endif
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/atomic.h>
51 #include <sys/proc.h>
52 #include <sys/thread.h>
53 #include <sys/file.h>
54 #include <sys/kmem.h>
55 #include <sys/unistd.h>
56 #include <sys/mount.h>
57 #include <sys/vnode.h>
58 #include <sys/types.h>
59 #include <sys/ddi.h>
60 #include <sys/sunddi.h>
61 #include <sys/stream.h>
62 #include <sys/strsun.h>
63 #include <sys/time.h>
64 #include <sys/class.h>
65 #include <sys/disp.h>
66 #include <sys/cmn_err.h>
67 #include <sys/zone.h>
68 #include <sys/sdt.h>
69 
70 #include <netsmb/smb_osdep.h>
71 
72 #include <netsmb/smb.h>
73 #include <netsmb/smb2.h>
74 #include <netsmb/smb_conn.h>
75 #include <netsmb/smb_rq.h>
76 #include <netsmb/smb2_rq.h>
77 #include <netsmb/smb_subr.h>
78 #include <netsmb/smb_tran.h>
79 #include <netsmb/smb_trantcp.h>
80 
81 /*
82  * SMB messages are up to 64K.  Let's leave room for two.
83  * If we negotiate up to SMB2, increase these. XXX todo
84  */
85 static int smb_tcpsndbuf = 0x20000;
86 static int smb_tcprcvbuf = 0x20000;
87 static int smb_connect_timeout = 10; /* seconds */
88 
89 static int smb1_iod_process(smb_vc_t *, mblk_t *);
90 static int smb2_iod_process(smb_vc_t *, mblk_t *);
91 static int smb_iod_send_echo(smb_vc_t *, cred_t *cr);
92 static int smb_iod_logoff(struct smb_vc *vcp, cred_t *cr);
93 
94 /*
95  * This is set/cleared when smbfs loads/unloads
96  * No locks should be necessary, because smbfs
97  * can't unload until all the mounts are gone.
98  */
99 static smb_fscb_t *fscb;
100 void
101 smb_fscb_set(smb_fscb_t *cb)
102 {
103 	fscb = cb;
104 }
105 
106 static void
107 smb_iod_share_disconnected(smb_share_t *ssp)
108 {
109 
110 	smb_share_invalidate(ssp);
111 
112 	/*
113 	 * This is the only fscb hook smbfs currently uses.
114 	 * Replaces smbfs_dead() from Darwin.
115 	 */
116 	if (fscb && fscb->fscb_disconn) {
117 		fscb->fscb_disconn(ssp);
118 	}
119 }
120 
121 /*
122  * State changes are important and infrequent.
123  * Make them easily observable via dtrace.
124  */
125 void
126 smb_iod_newstate(struct smb_vc *vcp, int state)
127 {
128 	vcp->vc_state = state;
129 }
130 
131 /* Lock Held version of the next function. */
132 static inline void
133 smb_iod_rqprocessed_LH(
134 	struct smb_rq *rqp,
135 	int error,
136 	int flags)
137 {
138 	rqp->sr_flags |= flags;
139 	rqp->sr_lerror = error;
140 	rqp->sr_rpgen++;
141 	rqp->sr_state = SMBRQ_NOTIFIED;
142 	cv_broadcast(&rqp->sr_cond);
143 }
144 
145 static void
146 smb_iod_rqprocessed(
147 	struct smb_rq *rqp,
148 	int error,
149 	int flags)
150 {
151 
152 	SMBRQ_LOCK(rqp);
153 	smb_iod_rqprocessed_LH(rqp, error, flags);
154 	SMBRQ_UNLOCK(rqp);
155 }
156 
157 static void
158 smb_iod_invrq(struct smb_vc *vcp)
159 {
160 	struct smb_rq *rqp;
161 
162 	/*
163 	 * Invalidate all outstanding requests for this connection
164 	 * Also wakeup iod_muxwant waiters.
165 	 */
166 	rw_enter(&vcp->iod_rqlock, RW_READER);
167 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
168 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
169 	}
170 	rw_exit(&vcp->iod_rqlock);
171 	cv_broadcast(&vcp->iod_muxwait);
172 }
173 
174 /*
175  * Called by smb_vc_rele/smb_vc_kill on last ref, and by
176  * the driver close function if the IOD closes its minor.
177  * In those cases, the caller should be the IOD thread.
178  *
179  * Forcibly kill the connection.
180  */
181 void
182 smb_iod_disconnect(struct smb_vc *vcp)
183 {
184 
185 	/*
186 	 * Inform everyone of the state change.
187 	 */
188 	SMB_VC_LOCK(vcp);
189 	if (vcp->vc_state != SMBIOD_ST_DEAD) {
190 		smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
191 		cv_broadcast(&vcp->vc_statechg);
192 	}
193 	SMB_VC_UNLOCK(vcp);
194 
195 	SMB_TRAN_DISCONNECT(vcp);
196 }
197 
198 /*
199  * Send one request.
200  *
201  * SMB1 only
202  *
203  * Called by _addrq (for internal requests)
204  * and _sendall (via _addrq, _multirq, _waitrq)
205  * Errors are reported via the smb_rq, using:
206  *   smb_iod_rqprocessed(rqp, ...)
207  */
208 static void
209 smb1_iod_sendrq(struct smb_rq *rqp)
210 {
211 	struct smb_vc *vcp = rqp->sr_vc;
212 	mblk_t *m;
213 	int error;
214 
215 	ASSERT(vcp);
216 	ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
217 	ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
218 
219 	/*
220 	 * Internal requests are allowed in any state;
221 	 * otherwise should be active.
222 	 */
223 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
224 	    vcp->vc_state != SMBIOD_ST_VCACTIVE) {
225 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
226 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
227 		return;
228 	}
229 
230 	/*
231 	 * Overwrite the SMB header with the assigned MID and
232 	 * (if we're signing) sign it.
233 	 */
234 	smb_rq_fillhdr(rqp);
235 	if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
236 		smb_rq_sign(rqp);
237 	}
238 
239 	/*
240 	 * The transport send consumes the message and we'd
241 	 * prefer to keep a copy, so dupmsg() before sending.
242 	 */
243 	m = dupmsg(rqp->sr_rq.mb_top);
244 	if (m == NULL) {
245 		error = ENOBUFS;
246 		goto fatal;
247 	}
248 
249 #ifdef DTRACE_PROBE2
250 	DTRACE_PROBE2(iod_sendrq,
251 	    (smb_rq_t *), rqp, (mblk_t *), m);
252 #endif
253 
254 	error = SMB_TRAN_SEND(vcp, m);
255 	m = 0; /* consumed by SEND */
256 
257 	rqp->sr_lerror = error;
258 	if (error == 0) {
259 		SMBRQ_LOCK(rqp);
260 		rqp->sr_flags |= SMBR_SENT;
261 		rqp->sr_state = SMBRQ_SENT;
262 		SMBRQ_UNLOCK(rqp);
263 		return;
264 	}
265 	/*
266 	 * Transport send returned an error.
267 	 * Was it a fatal one?
268 	 */
269 	if (SMB_TRAN_FATAL(vcp, error)) {
270 		/*
271 		 * No further attempts should be made
272 		 */
273 	fatal:
274 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
275 		smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
276 		return;
277 	}
278 }
279 
280 /*
281  * Send one request.
282  *
283  * SMB2 only
284  *
285  * Called by _addrq (for internal requests)
286  * and _sendall (via _addrq, _multirq, _waitrq)
287  * Errors are reported via the smb_rq, using:
288  *   smb_iod_rqprocessed(rqp, ...)
289  */
290 static void
291 smb2_iod_sendrq(struct smb_rq *rqp)
292 {
293 	struct smb_rq *c_rqp;	/* compound */
294 	struct smb_vc *vcp = rqp->sr_vc;
295 	mblk_t *top_m;
296 	mblk_t *cur_m;
297 	int error;
298 
299 	ASSERT(vcp);
300 	ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
301 	ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
302 
303 	/*
304 	 * Internal requests are allowed in any state;
305 	 * otherwise should be active.
306 	 */
307 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
308 	    vcp->vc_state != SMBIOD_ST_VCACTIVE) {
309 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
310 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
311 		return;
312 	}
313 
314 	/*
315 	 * Overwrite the SMB header with the assigned MID and
316 	 * (if we're signing) sign it.  If there are compounded
317 	 * requests after the top one, do those too.
318 	 */
319 	smb2_rq_fillhdr(rqp);
320 	if (rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) {
321 		smb2_rq_sign(rqp);
322 	}
323 	c_rqp = rqp->sr2_compound_next;
324 	while (c_rqp != NULL) {
325 		smb2_rq_fillhdr(c_rqp);
326 		if (c_rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) {
327 			smb2_rq_sign(c_rqp);
328 		}
329 		c_rqp = c_rqp->sr2_compound_next;
330 	}
331 
332 	/*
333 	 * The transport send consumes the message and we'd
334 	 * prefer to keep a copy, so dupmsg() before sending.
335 	 * We also need this to build the compound message
336 	 * that we'll actually send.  The message offset at
337 	 * the start of each compounded message should be
338 	 * eight-byte aligned.  The caller preparing the
339 	 * compounded request has to take care of that
340 	 * before we get here and sign messages etc.
341 	 */
342 	top_m = dupmsg(rqp->sr_rq.mb_top);
343 	if (top_m == NULL) {
344 		error = ENOBUFS;
345 		goto fatal;
346 	}
347 	c_rqp = rqp->sr2_compound_next;
348 	while (c_rqp != NULL) {
349 		size_t len = msgdsize(top_m);
350 		ASSERT((len & 7) == 0);
351 		cur_m = dupmsg(c_rqp->sr_rq.mb_top);
352 		if (cur_m == NULL) {
353 			freemsg(top_m);
354 			error = ENOBUFS;
355 			goto fatal;
356 		}
357 		linkb(top_m, cur_m);
358 	}
359 
360 	DTRACE_PROBE2(iod_sendrq,
361 	    (smb_rq_t *), rqp, (mblk_t *), top_m);
362 
363 	error = SMB_TRAN_SEND(vcp, top_m);
364 	top_m = 0; /* consumed by SEND */
365 
366 	rqp->sr_lerror = error;
367 	if (error == 0) {
368 		SMBRQ_LOCK(rqp);
369 		rqp->sr_flags |= SMBR_SENT;
370 		rqp->sr_state = SMBRQ_SENT;
371 		SMBRQ_UNLOCK(rqp);
372 		return;
373 	}
374 	/*
375 	 * Transport send returned an error.
376 	 * Was it a fatal one?
377 	 */
378 	if (SMB_TRAN_FATAL(vcp, error)) {
379 		/*
380 		 * No further attempts should be made
381 		 */
382 	fatal:
383 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
384 		smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
385 		return;
386 	}
387 }
388 
389 /*
390  * Receive one NetBIOS (or NBT over TCP) message.  If none have arrived,
391  * wait up to SMB_NBTIMO (15 sec.) for one to arrive, and then if still
392  * none have arrived, return ETIME.
393  */
394 static int
395 smb_iod_recvmsg(struct smb_vc *vcp, mblk_t **mpp)
396 {
397 	mblk_t *m;
398 	int error;
399 
400 top:
401 	m = NULL;
402 	error = SMB_TRAN_RECV(vcp, &m);
403 	if (error == EAGAIN)
404 		goto top;
405 	if (error)
406 		return (error);
407 	ASSERT(m != NULL);
408 
409 	m = m_pullup(m, 4);
410 	if (m == NULL) {
411 		return (ENOSR);
412 	}
413 
414 	*mpp = m;
415 	return (0);
416 }
417 
418 /*
419  * How long should we keep around an unused VC (connection)?
420  * There's usually a good chance connections will be reused,
421  * so the default is to keep such connections for 5 min.
422  */
423 #ifdef	DEBUG
424 int smb_iod_idle_keep_time = 60;	/* seconds */
425 #else
426 int smb_iod_idle_keep_time = 300;	/* seconds */
427 #endif
428 
429 /*
430  * Process incoming packets
431  *
432  * This is the "reader" loop, run by the IOD thread.  Normally we're in
433  * state SMBIOD_ST_VCACTIVE here, but during reconnect we're called in
434  * other states with poll==TRUE
435  *
436  * A non-zero error return here causes the IOD work loop to terminate.
437  */
438 int
439 smb_iod_recvall(struct smb_vc *vcp, boolean_t poll)
440 {
441 	mblk_t *m;
442 	int error = 0;
443 	int etime_idle = 0;	/* How many 15 sec. "ticks" idle. */
444 	int etime_count = 0;	/* ... and when we have requests. */
445 
446 	for (;;) {
447 		/*
448 		 * Check whether someone "killed" this VC,
449 		 * or is asking the IOD to terminate.
450 		 */
451 		if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
452 			SMBIODEBUG("SHUTDOWN set\n");
453 			/* This IOD thread will terminate. */
454 			SMB_VC_LOCK(vcp);
455 			smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
456 			cv_broadcast(&vcp->vc_statechg);
457 			SMB_VC_UNLOCK(vcp);
458 			error = EINTR;
459 			break;
460 		}
461 
462 		m = NULL;
463 		error = smb_iod_recvmsg(vcp, &m);
464 
465 		/*
466 		 * Internal requests (reconnecting) call this in a loop
467 		 * (with poll==TRUE) until the request completes.
468 		 */
469 		if (error == ETIME && poll)
470 			break;
471 
472 		if (error == ETIME &&
473 		    vcp->iod_rqlist.tqh_first != NULL) {
474 
475 			/*
476 			 * Nothing received and requests waiting.
477 			 * Increment etime_count.  If we were idle,
478 			 * skip the 1st tick, because we started
479 			 * waiting before there were any requests.
480 			 */
481 			if (etime_idle != 0) {
482 				etime_idle = 0;
483 			} else if (etime_count < INT16_MAX) {
484 				etime_count++;
485 			}
486 
487 			/*
488 			 * ETIME and requests in the queue.
489 			 * The first time (at 15 sec.)
490 			 * Log an error (just once).
491 			 */
492 			if (etime_count > 0 &&
493 			    vcp->iod_noresp == B_FALSE) {
494 				vcp->iod_noresp = B_TRUE;
495 				zprintf(vcp->vc_zoneid,
496 				    "SMB server %s not responding\n",
497 				    vcp->vc_srvname);
498 			}
499 			/*
500 			 * At 30 sec. try sending an echo, which
501 			 * should cause some response.
502 			 */
503 			if (etime_count == 2) {
504 				SMBIODEBUG("send echo\n");
505 				(void) smb_iod_send_echo(vcp, CRED());
506 			}
507 			/*
508 			 * At 45 sec. give up on the connection
509 			 * and try to reconnect.
510 			 */
511 			if (etime_count == 3) {
512 				SMB_VC_LOCK(vcp);
513 				smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
514 				SMB_VC_UNLOCK(vcp);
515 				SMB_TRAN_DISCONNECT(vcp);
516 				break;
517 			}
518 			continue;
519 		} /* ETIME and requests in the queue */
520 
521 		if (error == ETIME) {
522 			/*
523 			 * Nothing received and no active requests.
524 			 *
525 			 * If we've received nothing from the server for
526 			 * smb_iod_idle_keep_time seconds, and the IOD
527 			 * thread holds the last reference to this VC,
528 			 * move to state IDLE and drop the TCP session.
529 			 * The IDLE handler will destroy the VC unless
530 			 * vc_state goes to RECONNECT before then.
531 			 */
532 			etime_count = 0;
533 			if (etime_idle < INT16_MAX)
534 				etime_idle++;
535 			if ((etime_idle * SMB_NBTIMO) <
536 			    smb_iod_idle_keep_time)
537 				continue;
538 			SMB_VC_LOCK(vcp);
539 			if (vcp->vc_co.co_usecount == 1) {
540 				smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
541 				SMB_VC_UNLOCK(vcp);
542 				SMBIODEBUG("logoff & disconnect\n");
543 				(void) smb_iod_logoff(vcp, CRED());
544 				SMB_TRAN_DISCONNECT(vcp);
545 				error = 0;
546 				break;
547 			}
548 			SMB_VC_UNLOCK(vcp);
549 			continue;
550 		} /* error == ETIME */
551 
552 		if (error) {
553 			/*
554 			 * The recv above returned an error indicating
555 			 * that our TCP session is no longer usable.
556 			 * Disconnect the session and get ready to
557 			 * reconnect.  If we have pending requests,
558 			 * move to state reconnect immediately;
559 			 * otherwise move to state IDLE until a
560 			 * request is issued on this VC.
561 			 */
562 			SMB_VC_LOCK(vcp);
563 			if (vcp->iod_rqlist.tqh_first != NULL)
564 				smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
565 			else
566 				smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
567 			cv_broadcast(&vcp->vc_statechg);
568 			SMB_VC_UNLOCK(vcp);
569 			SMB_TRAN_DISCONNECT(vcp);
570 			break;
571 		}
572 
573 		/*
574 		 * Received something.  Yea!
575 		 */
576 		etime_count = 0;
577 		etime_idle = 0;
578 
579 		/*
580 		 * If we just completed a reconnect after logging
581 		 * "SMB server %s not responding" then log OK now.
582 		 */
583 		if (vcp->iod_noresp) {
584 			vcp->iod_noresp = B_FALSE;
585 			zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
586 			    vcp->vc_srvname);
587 		}
588 
589 		if ((vcp->vc_flags & SMBV_SMB2) != 0) {
590 			error = smb2_iod_process(vcp, m);
591 		} else {
592 			error = smb1_iod_process(vcp, m);
593 		}
594 
595 		/*
596 		 * Reconnect calls this in a loop with poll=TRUE
597 		 * We've received a response, so break now.
598 		 */
599 		if (poll) {
600 			error = 0;
601 			break;
602 		}
603 	}
604 
605 	return (error);
606 }
607 
608 /*
609  * Have what should be an SMB1 reply.  Check and parse the header,
610  * then use the message ID to find the request this belongs to and
611  * post it on that request.
612  *
613  * Returns an error if the reader should give up.
614  * To be safe, error if we read garbage.
615  */
616 static int
617 smb1_iod_process(smb_vc_t *vcp, mblk_t *m)
618 {
619 	struct mdchain md;
620 	struct smb_rq *rqp;
621 	uint8_t cmd, sig[4];
622 	uint16_t mid;
623 	int err, skip;
624 
625 	m = m_pullup(m, SMB_HDRLEN);
626 	if (m == NULL)
627 		return (ENOMEM);
628 
629 	/*
630 	 * Note: Intentionally do NOT md_done(&md)
631 	 * because that would free the message and
632 	 * we just want to peek here.
633 	 */
634 	md_initm(&md, m);
635 
636 	/*
637 	 * Check the SMB header version and get the MID.
638 	 *
639 	 * The header version should be SMB1 except when we're
640 	 * doing SMB1-to-SMB2 negotiation, in which case we may
641 	 * see an SMB2 header with message ID=0 (only allowed in
642 	 * vc_state == SMBIOD_ST_CONNECTED -- negotiationg).
643 	 */
644 	err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
645 	if (err)
646 		return (err);
647 	if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
648 		goto bad_hdr;
649 	}
650 	switch (sig[0]) {
651 	case SMB_HDR_V1:	/* SMB1 */
652 		md_get_uint8(&md, &cmd);
653 		/* Skip to and get the MID. At offset 5 now. */
654 		skip = SMB_HDR_OFF_MID - 5;
655 		md_get_mem(&md, NULL, skip, MB_MSYSTEM);
656 		err = md_get_uint16le(&md, &mid);
657 		if (err)
658 			return (err);
659 		break;
660 	case SMB_HDR_V2:	/* SMB2+ */
661 		if (vcp->vc_state == SMBIOD_ST_CONNECTED) {
662 			/*
663 			 * No need to look, can only be
664 			 * MID=0, cmd=negotiate
665 			 */
666 			cmd = SMB_COM_NEGOTIATE;
667 			mid = 0;
668 			break;
669 		}
670 		/* FALLTHROUGH */
671 	bad_hdr:
672 	default:
673 		SMBIODEBUG("Bad SMB hdr\n");
674 		m_freem(m);
675 		return (EPROTO);
676 	}
677 
678 	/*
679 	 * Find the reqeuest and post the reply
680 	 */
681 	rw_enter(&vcp->iod_rqlock, RW_READER);
682 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
683 
684 		if (rqp->sr_mid != mid)
685 			continue;
686 
687 		DTRACE_PROBE2(iod_post_reply,
688 		    (smb_rq_t *), rqp, (mblk_t *), m);
689 		m_dumpm(m);
690 
691 		SMBRQ_LOCK(rqp);
692 		if (rqp->sr_rp.md_top == NULL) {
693 			md_initm(&rqp->sr_rp, m);
694 		} else {
695 			if (rqp->sr_flags & SMBR_MULTIPACKET) {
696 				md_append_record(&rqp->sr_rp, m);
697 			} else {
698 				SMBRQ_UNLOCK(rqp);
699 				rqp = NULL;
700 				break;
701 			}
702 		}
703 		smb_iod_rqprocessed_LH(rqp, 0, 0);
704 		SMBRQ_UNLOCK(rqp);
705 		break;
706 	}
707 	rw_exit(&vcp->iod_rqlock);
708 
709 	if (rqp == NULL) {
710 		if (cmd != SMB_COM_ECHO) {
711 			SMBSDEBUG("drop resp: MID 0x%04x\n", (uint_t)mid);
712 		}
713 		m_freem(m);
714 		/*
715 		 * Keep going.  It's possible this reply came
716 		 * after the request timed out and went away.
717 		 */
718 	}
719 	return (0);
720 }
721 
722 /*
723  * Have what should be an SMB2 reply.  Check and parse the header,
724  * then use the message ID to find the request this belongs to and
725  * post it on that request.
726  *
727  * We also want to apply any credit grant in this reply now,
728  * rather than waiting for the owner to wake up.
729  */
730 static int
731 smb2_iod_process(smb_vc_t *vcp, mblk_t *m)
732 {
733 	struct mdchain md;
734 	struct smb_rq *rqp;
735 	uint8_t sig[4];
736 	mblk_t *next_m = NULL;
737 	uint64_t message_id, async_id;
738 	uint32_t flags, next_cmd_off, status;
739 	uint16_t command, credits_granted;
740 	int err;
741 
742 top:
743 	m = m_pullup(m, SMB2_HDRLEN);
744 	if (m == NULL)
745 		return (ENOMEM);
746 
747 	/*
748 	 * Note: Intentionally do NOT md_done(&md)
749 	 * because that would free the message and
750 	 * we just want to peek here.
751 	 */
752 	md_initm(&md, m);
753 
754 	/*
755 	 * Check the SMB header.  Must be SMB2
756 	 * (and later, could be SMB3 encrypted)
757 	 */
758 	err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
759 	if (err)
760 		return (err);
761 	if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
762 		goto bad_hdr;
763 	}
764 	switch (sig[0]) {
765 	case SMB_HDR_V2:
766 		break;
767 	case SMB_HDR_V3E:
768 		/*
769 		 * Todo: If encryption enabled, decrypt the message
770 		 * and restart processing on the cleartext.
771 		 */
772 		/* FALLTHROUGH */
773 	bad_hdr:
774 	default:
775 		SMBIODEBUG("Bad SMB2 hdr\n");
776 		m_freem(m);
777 		return (EPROTO);
778 	}
779 
780 	/*
781 	 * Parse the rest of the SMB2 header,
782 	 * skipping what we don't need.
783 	 */
784 	md_get_uint32le(&md, NULL);	/* length, credit_charge */
785 	md_get_uint32le(&md, &status);
786 	md_get_uint16le(&md, &command);
787 	md_get_uint16le(&md, &credits_granted);
788 	md_get_uint32le(&md, &flags);
789 	md_get_uint32le(&md, &next_cmd_off);
790 	md_get_uint64le(&md, &message_id);
791 	if (flags & SMB2_FLAGS_ASYNC_COMMAND) {
792 		md_get_uint64le(&md, &async_id);
793 	} else {
794 		/* PID, TID (not needed) */
795 		async_id = 0;
796 	}
797 
798 	/*
799 	 * If this is a compound reply, split it.
800 	 * Next must be 8-byte aligned.
801 	 */
802 	if (next_cmd_off != 0) {
803 		if ((next_cmd_off & 7) != 0)
804 			SMBIODEBUG("Misaligned next cmd\n");
805 		else
806 			next_m = m_split(m, next_cmd_off, 1);
807 	}
808 
809 	/*
810 	 * SMB2 Negotiate may return zero credits_granted,
811 	 * in which case we should assume it granted one.
812 	 */
813 	if (command == SMB2_NEGOTIATE && credits_granted == 0)
814 		credits_granted = 1;
815 
816 	/*
817 	 * Apply the credit grant
818 	 */
819 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
820 	vcp->vc2_limit_message_id += credits_granted;
821 
822 	/*
823 	 * Find the reqeuest and post the reply
824 	 */
825 	rw_downgrade(&vcp->iod_rqlock);
826 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
827 
828 		if (rqp->sr2_messageid != message_id)
829 			continue;
830 
831 		DTRACE_PROBE2(iod_post_reply,
832 		    (smb_rq_t *), rqp, (mblk_t *), m);
833 		m_dumpm(m);
834 
835 		/*
836 		 * If this is an interim response, just save the
837 		 * async ID but don't wakup the request.
838 		 * Don't need SMBRQ_LOCK for this.
839 		 */
840 		if (status == NT_STATUS_PENDING && async_id != 0) {
841 			rqp->sr2_rspasyncid = async_id;
842 			m_freem(m);
843 			break;
844 		}
845 
846 		SMBRQ_LOCK(rqp);
847 		if (rqp->sr_rp.md_top == NULL) {
848 			md_initm(&rqp->sr_rp, m);
849 		} else {
850 			SMBRQ_UNLOCK(rqp);
851 			rqp = NULL;
852 			break;
853 		}
854 		smb_iod_rqprocessed_LH(rqp, 0, 0);
855 		SMBRQ_UNLOCK(rqp);
856 		break;
857 	}
858 	rw_exit(&vcp->iod_rqlock);
859 
860 	if (rqp == NULL) {
861 		if (command != SMB2_ECHO) {
862 			SMBSDEBUG("drop resp: MID %lld\n",
863 			    (long long)message_id);
864 		}
865 		m_freem(m);
866 		/*
867 		 * Keep going.  It's possible this reply came
868 		 * after the request timed out and went away.
869 		 */
870 	}
871 
872 	/*
873 	 * If we split a compound reply, continue with the
874 	 * next part of the compound.
875 	 */
876 	if (next_m != NULL) {
877 		m = next_m;
878 		goto top;
879 	}
880 
881 	return (0);
882 }
883 
884 /*
885  * The IOD receiver thread has requests pending and
886  * has not received anything in a while.  Try to
887  * send an SMB echo request.  It's tricky to do a
888  * send from the IOD thread because we can't block.
889  *
890  * Using tmo=SMBNOREPLYWAIT in the request
891  * so smb_rq_reply will skip smb_iod_waitrq.
892  * The smb_smb_echo call uses SMBR_INTERNAL
893  * to avoid calling smb_iod_sendall().
894  */
895 static int
896 smb_iod_send_echo(smb_vc_t *vcp, cred_t *cr)
897 {
898 	smb_cred_t scred;
899 	int err, tmo = SMBNOREPLYWAIT;
900 
901 	ASSERT(vcp->iod_thr == curthread);
902 
903 	smb_credinit(&scred, cr);
904 	if ((vcp->vc_flags & SMBV_SMB2) != 0) {
905 		err = smb2_smb_echo(vcp, &scred, tmo);
906 	} else {
907 		err = smb_smb_echo(vcp, &scred, tmo);
908 	}
909 	smb_credrele(&scred);
910 	return (err);
911 }
912 
913 /*
914  * Helper for smb1_iod_addrq, smb2_iod_addrq
915  * Returns zero if interrupted, else 1.
916  */
917 static int
918 smb_iod_muxwait(smb_vc_t *vcp, boolean_t sig_ok)
919 {
920 	int rc;
921 
922 	SMB_VC_LOCK(vcp);
923 	vcp->iod_muxwant++;
924 	if (sig_ok) {
925 		rc = cv_wait_sig(&vcp->iod_muxwait, &vcp->vc_lock);
926 	} else {
927 		cv_wait(&vcp->iod_muxwait, &vcp->vc_lock);
928 		rc = 1;
929 	}
930 	vcp->iod_muxwant--;
931 	SMB_VC_UNLOCK(vcp);
932 
933 	return (rc);
934 }
935 
936 /*
937  * Place request in the queue, and send it.
938  * Called with no locks held.
939  *
940  * Called for SMB1 only
941  *
942  * The logic for how we limit active requests differs between
943  * SMB1 and SMB2.  With SMB1 it's a simple counter ioc_muxcnt.
944  */
945 int
946 smb1_iod_addrq(struct smb_rq *rqp)
947 {
948 	struct smb_vc *vcp = rqp->sr_vc;
949 	uint16_t need;
950 	boolean_t sig_ok =
951 	    (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
952 
953 	ASSERT(rqp->sr_cred);
954 	ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
955 
956 	rqp->sr_owner = curthread;
957 
958 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
959 
960 recheck:
961 	/*
962 	 * Internal requests can be added in any state,
963 	 * but normal requests only in state active.
964 	 */
965 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
966 	    vcp->vc_state != SMBIOD_ST_VCACTIVE) {
967 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
968 		rw_exit(&vcp->iod_rqlock);
969 		return (ENOTCONN);
970 	}
971 
972 	/*
973 	 * If we're at the limit of active requests, block until
974 	 * enough requests complete so we can make ours active.
975 	 * Wakeup in smb_iod_removerq().
976 	 *
977 	 * Normal callers leave one slot free, so internal
978 	 * callers can have the last slot if needed.
979 	 */
980 	need = 1;
981 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
982 		need++;
983 	if ((vcp->iod_muxcnt + need) > vcp->vc_maxmux) {
984 		rw_exit(&vcp->iod_rqlock);
985 		if (rqp->sr_flags & SMBR_INTERNAL)
986 			return (EBUSY);
987 		if (smb_iod_muxwait(vcp, sig_ok) == 0)
988 			return (EINTR);
989 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
990 		goto recheck;
991 	}
992 
993 	/*
994 	 * Add this request to the active list and send it.
995 	 * For SMB2 we may have a sequence of compounded
996 	 * requests, in which case we must add them all.
997 	 * They're sent as a compound in smb2_iod_sendrq.
998 	 */
999 	rqp->sr_mid = vcp->vc_next_mid++;
1000 	/* If signing, set the signing sequence numbers. */
1001 	if (vcp->vc_mackey != NULL && (rqp->sr_rqflags2 &
1002 	    SMB_FLAGS2_SECURITY_SIGNATURE) != 0) {
1003 		rqp->sr_seqno = vcp->vc_next_seq++;
1004 		rqp->sr_rseqno = vcp->vc_next_seq++;
1005 	}
1006 	vcp->iod_muxcnt++;
1007 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
1008 	smb1_iod_sendrq(rqp);
1009 
1010 	rw_exit(&vcp->iod_rqlock);
1011 	return (0);
1012 }
1013 
1014 /*
1015  * Place request in the queue, and send it.
1016  * Called with no locks held.
1017  *
1018  * Called for SMB2 only.
1019  *
1020  * With SMB2 we have a range of valid message IDs, and we may
1021  * only send requests when we can assign a message ID within
1022  * the valid range.  We may need to wait here for some active
1023  * request to finish (and update vc2_limit_message_id) before
1024  * we can get message IDs for our new request(s).  Another
1025  * difference is that the request sequence we're waiting to
1026  * add here may require multipe message IDs, either due to
1027  * either compounding or multi-credit requests.  Therefore
1028  * we need to wait for the availibility of how ever many
1029  * message IDs are required by our request sequence.
1030  */
1031 int
1032 smb2_iod_addrq(struct smb_rq *rqp)
1033 {
1034 	struct smb_vc *vcp = rqp->sr_vc;
1035 	struct smb_rq *c_rqp;	/* compound req */
1036 	uint16_t charge;
1037 	boolean_t sig_ok =
1038 	    (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
1039 
1040 	ASSERT(rqp->sr_cred != NULL);
1041 	ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
1042 
1043 	/*
1044 	 * Figure out the credit charges
1045 	 * No multi-credit messages yet.
1046 	 */
1047 	rqp->sr2_totalcreditcharge = rqp->sr2_creditcharge;
1048 	c_rqp = rqp->sr2_compound_next;
1049 	while (c_rqp != NULL) {
1050 		rqp->sr2_totalcreditcharge += c_rqp->sr2_creditcharge;
1051 		c_rqp = c_rqp->sr2_compound_next;
1052 	}
1053 
1054 	/*
1055 	 * Internal request must not be compounded
1056 	 * and should use exactly one credit.
1057 	 */
1058 	if (rqp->sr_flags & SMBR_INTERNAL) {
1059 		if (rqp->sr2_compound_next != NULL) {
1060 			ASSERT(0);
1061 			return (EINVAL);
1062 		}
1063 	}
1064 
1065 	rqp->sr_owner = curthread;
1066 
1067 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
1068 
1069 recheck:
1070 	/*
1071 	 * Internal requests can be added in any state,
1072 	 * but normal requests only in state active.
1073 	 */
1074 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
1075 	    vcp->vc_state != SMBIOD_ST_VCACTIVE) {
1076 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
1077 		rw_exit(&vcp->iod_rqlock);
1078 		return (ENOTCONN);
1079 	}
1080 
1081 	/*
1082 	 * If we're at the limit of active requests, block until
1083 	 * enough requests complete so we can make ours active.
1084 	 * Wakeup in smb_iod_removerq().
1085 	 *
1086 	 * Normal callers leave one slot free, so internal
1087 	 * callers can have the last slot if needed.
1088 	 */
1089 	charge = rqp->sr2_totalcreditcharge;
1090 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
1091 		charge++;
1092 	if ((vcp->vc2_next_message_id + charge) >
1093 	    vcp->vc2_limit_message_id) {
1094 		rw_exit(&vcp->iod_rqlock);
1095 		if (rqp->sr_flags & SMBR_INTERNAL)
1096 			return (EBUSY);
1097 		if (smb_iod_muxwait(vcp, sig_ok) == 0)
1098 			return (EINTR);
1099 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
1100 		goto recheck;
1101 	}
1102 
1103 	/*
1104 	 * Add this request to the active list and send it.
1105 	 * For SMB2 we may have a sequence of compounded
1106 	 * requests, in which case we must add them all.
1107 	 * They're sent as a compound in smb2_iod_sendrq.
1108 	 */
1109 
1110 	rqp->sr2_messageid = vcp->vc2_next_message_id;
1111 	vcp->vc2_next_message_id += rqp->sr2_creditcharge;
1112 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
1113 
1114 	c_rqp = rqp->sr2_compound_next;
1115 	while (c_rqp != NULL) {
1116 		c_rqp->sr2_messageid = vcp->vc2_next_message_id;
1117 		vcp->vc2_next_message_id += c_rqp->sr2_creditcharge;
1118 		TAILQ_INSERT_TAIL(&vcp->iod_rqlist, c_rqp, sr_link);
1119 		c_rqp = c_rqp->sr2_compound_next;
1120 	}
1121 	smb2_iod_sendrq(rqp);
1122 
1123 	rw_exit(&vcp->iod_rqlock);
1124 	return (0);
1125 }
1126 
1127 /*
1128  * Mark an SMBR_MULTIPACKET request as
1129  * needing another send.  Similar to the
1130  * "normal" part of smb1_iod_addrq.
1131  * Only used by SMB1
1132  */
1133 int
1134 smb1_iod_multirq(struct smb_rq *rqp)
1135 {
1136 	struct smb_vc *vcp = rqp->sr_vc;
1137 
1138 	ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
1139 
1140 	if (vcp->vc_flags & SMBV_SMB2) {
1141 		ASSERT("!SMB2?");
1142 		return (EINVAL);
1143 	}
1144 
1145 	if (rqp->sr_flags & SMBR_INTERNAL)
1146 		return (EINVAL);
1147 
1148 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
1149 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
1150 		return (ENOTCONN);
1151 	}
1152 
1153 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
1154 
1155 	/* Already on iod_rqlist, just reset state. */
1156 	rqp->sr_state = SMBRQ_NOTSENT;
1157 	smb1_iod_sendrq(rqp);
1158 
1159 	rw_exit(&vcp->iod_rqlock);
1160 
1161 	return (0);
1162 }
1163 
1164 /*
1165  * Remove a request from the active list, and
1166  * wake up requests waiting to go active.
1167  *
1168  * Shared by SMB1 + SMB2
1169  *
1170  * The logic for how we limit active requests differs between
1171  * SMB1 and SMB2.  With SMB1 it's a simple counter ioc_muxcnt.
1172  * With SMB2 we have a range of valid message IDs, and when we
1173  * retire the oldest request we need to keep track of what is
1174  * now the oldest message ID.  In both cases, after we take a
1175  * request out of the list here, we should be able to wake up
1176  * a request waiting to get in the active list.
1177  */
1178 void
1179 smb_iod_removerq(struct smb_rq *rqp)
1180 {
1181 	struct smb_rq *rqp2;
1182 	struct smb_vc *vcp = rqp->sr_vc;
1183 	boolean_t was_head = B_FALSE;
1184 
1185 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
1186 
1187 #ifdef QUEUEDEBUG
1188 	/*
1189 	 * Make sure we have not already removed it.
1190 	 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
1191 	 * XXX: Don't like the constant 1 here...
1192 	 */
1193 	ASSERT(rqp->sr_link.tqe_next != (void *)1L);
1194 #endif
1195 
1196 	if (TAILQ_FIRST(&vcp->iod_rqlist) == rqp)
1197 		was_head = B_TRUE;
1198 	TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
1199 	if (vcp->vc_flags & SMBV_SMB2) {
1200 		rqp2 = TAILQ_FIRST(&vcp->iod_rqlist);
1201 		if (was_head && rqp2 != NULL) {
1202 			/* Do we still need this? */
1203 			vcp->vc2_oldest_message_id =
1204 			    rqp2->sr2_messageid;
1205 		}
1206 	} else {
1207 		ASSERT(vcp->iod_muxcnt > 0);
1208 		vcp->iod_muxcnt--;
1209 	}
1210 
1211 	rw_exit(&vcp->iod_rqlock);
1212 
1213 	/*
1214 	 * If there are requests waiting for "mux" slots,
1215 	 * wake one.
1216 	 */
1217 	SMB_VC_LOCK(vcp);
1218 	if (vcp->iod_muxwant != 0)
1219 		cv_signal(&vcp->iod_muxwait);
1220 	SMB_VC_UNLOCK(vcp);
1221 }
1222 
1223 /*
1224  * Wait for a request to complete.
1225  */
1226 int
1227 smb_iod_waitrq(struct smb_rq *rqp)
1228 {
1229 	struct smb_vc *vcp = rqp->sr_vc;
1230 	clock_t tr, tmo1, tmo2;
1231 	int error;
1232 
1233 	if (rqp->sr_flags & SMBR_INTERNAL) {
1234 		/* XXX - Do we ever take this path now? */
1235 		return (smb_iod_waitrq_int(rqp));
1236 	}
1237 
1238 	/*
1239 	 * Make sure this is NOT the IOD thread,
1240 	 * or the wait below will stop the reader.
1241 	 */
1242 	ASSERT(curthread != vcp->iod_thr);
1243 
1244 	SMBRQ_LOCK(rqp);
1245 
1246 	/*
1247 	 * The request has been sent.  Now wait for the response,
1248 	 * with the timeout specified for this request.
1249 	 * Compute all the deadlines now, so we effectively
1250 	 * start the timer(s) after the request is sent.
1251 	 */
1252 	if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
1253 		tmo1 = SEC_TO_TICK(smb_timo_notice);
1254 	else
1255 		tmo1 = 0;
1256 	tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
1257 
1258 	/*
1259 	 * As above, we don't want to allow interrupt for some
1260 	 * requests like open, because we could miss a succesful
1261 	 * response and therefore "leak" a FID.  Such requests
1262 	 * are marked SMBR_NOINTR_RECV to prevent that.
1263 	 *
1264 	 * If "slow server" warnings are enabled, wait first
1265 	 * for the "notice" timeout, and warn if expired.
1266 	 */
1267 	if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
1268 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
1269 			tr = cv_reltimedwait(&rqp->sr_cond,
1270 			    &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
1271 		else
1272 			tr = cv_reltimedwait_sig(&rqp->sr_cond,
1273 			    &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
1274 		if (tr == 0) {
1275 			error = EINTR;
1276 			goto out;
1277 		}
1278 		if (tr < 0) {
1279 			DTRACE_PROBE1(smb_iod_waitrq1,
1280 			    (smb_rq_t *), rqp);
1281 		}
1282 	}
1283 
1284 	/*
1285 	 * Keep waiting until tmo2 is expired.
1286 	 */
1287 	while (rqp->sr_rpgen == rqp->sr_rplast) {
1288 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
1289 			tr = cv_timedwait(&rqp->sr_cond,
1290 			    &rqp->sr_lock, tmo2);
1291 		else
1292 			tr = cv_timedwait_sig(&rqp->sr_cond,
1293 			    &rqp->sr_lock, tmo2);
1294 		if (tr == 0) {
1295 			error = EINTR;
1296 			goto out;
1297 		}
1298 		if (tr < 0) {
1299 			DTRACE_PROBE1(smb_iod_waitrq2,
1300 			    (smb_rq_t *), rqp);
1301 			error = ETIME;
1302 			goto out;
1303 		}
1304 		/* got wakeup */
1305 	}
1306 	error = rqp->sr_lerror;
1307 	rqp->sr_rplast++;
1308 
1309 out:
1310 	SMBRQ_UNLOCK(rqp);
1311 
1312 	/*
1313 	 * MULTIPACKET request must stay in the list.
1314 	 * They may need additional responses.
1315 	 */
1316 	if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
1317 		smb_iod_removerq(rqp);
1318 
1319 	return (error);
1320 }
1321 
1322 /*
1323  * Internal variant of smb_iod_waitrq(), for use in
1324  * requests run by the IOD (reader) thread itself.
1325  * Block only long enough to receive one reply.
1326  */
1327 int
1328 smb_iod_waitrq_int(struct smb_rq *rqp)
1329 {
1330 	struct smb_vc *vcp = rqp->sr_vc;
1331 	int timeleft = rqp->sr_timo;
1332 	int error;
1333 
1334 	ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
1335 again:
1336 	error = smb_iod_recvall(vcp, B_TRUE);
1337 	if (error == ETIME) {
1338 		/* We waited SMB_NBTIMO sec. */
1339 		timeleft -= SMB_NBTIMO;
1340 		if (timeleft > 0)
1341 			goto again;
1342 	}
1343 
1344 	smb_iod_removerq(rqp);
1345 	if (rqp->sr_state != SMBRQ_NOTIFIED)
1346 		error = ETIME;
1347 
1348 	return (error);
1349 }
1350 
1351 /*
1352  * Shutdown all outstanding I/O requests on the specified share with
1353  * ENXIO; used when unmounting a share.  (There shouldn't be any for a
1354  * non-forced unmount; if this is a forced unmount, we have to shutdown
1355  * the requests as part of the unmount process.)
1356  */
1357 void
1358 smb_iod_shutdown_share(struct smb_share *ssp)
1359 {
1360 	struct smb_vc *vcp = SSTOVC(ssp);
1361 	struct smb_rq *rqp;
1362 
1363 	/*
1364 	 * Loop through the list of requests and shutdown the ones
1365 	 * that are for the specified share.
1366 	 */
1367 	rw_enter(&vcp->iod_rqlock, RW_READER);
1368 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1369 		if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
1370 			smb_iod_rqprocessed(rqp, EIO, 0);
1371 	}
1372 	rw_exit(&vcp->iod_rqlock);
1373 }
1374 
1375 /*
1376  * Ioctl functions called by the user-level I/O Deamon (IOD)
1377  * to bring up and service a connection to some SMB server.
1378  */
1379 
1380 /*
1381  * Handle ioctl SMBIOC_IOD_CONNECT
1382  */
1383 int
1384 nsmb_iod_connect(struct smb_vc *vcp, cred_t *cr)
1385 {
1386 	int err, val;
1387 
1388 	ASSERT(vcp->iod_thr == curthread);
1389 
1390 	if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
1391 		cmn_err(CE_NOTE, "iod_connect: bad state %d", vcp->vc_state);
1392 		return (EINVAL);
1393 	}
1394 
1395 	/*
1396 	 * Putting a TLI endpoint back in the right state for a new
1397 	 * connection is a bit tricky.  In theory, this could be:
1398 	 *	SMB_TRAN_DISCONNECT(vcp);
1399 	 *	SMB_TRAN_UNBIND(vcp);
1400 	 * but that method often results in TOUTSTATE errors.
1401 	 * It's easier to just close it and open a new endpoint.
1402 	 */
1403 	SMB_VC_LOCK(vcp);
1404 	if (vcp->vc_tdata)
1405 		SMB_TRAN_DONE(vcp);
1406 	err = SMB_TRAN_CREATE(vcp, cr);
1407 	SMB_VC_UNLOCK(vcp);
1408 	if (err != 0)
1409 		return (err);
1410 
1411 	/*
1412 	 * Set various options on this endpoint.
1413 	 * Keep going in spite of errors.
1414 	 */
1415 	val = smb_tcpsndbuf;
1416 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_SNDBUF, &val);
1417 	if (err != 0) {
1418 		cmn_err(CE_NOTE, "iod_connect: setopt SNDBUF, err=%d", err);
1419 	}
1420 	val = smb_tcprcvbuf;
1421 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_RCVBUF, &val);
1422 	if (err != 0) {
1423 		cmn_err(CE_NOTE, "iod_connect: setopt RCVBUF, err=%d", err);
1424 	}
1425 	val = 1;
1426 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_KEEPALIVE, &val);
1427 	if (err != 0) {
1428 		cmn_err(CE_NOTE, "iod_connect: setopt KEEPALIVE, err=%d", err);
1429 	}
1430 	val = 1;
1431 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_NODELAY, &val);
1432 	if (err != 0) {
1433 		cmn_err(CE_NOTE, "iod_connect: setopt TCP_NODELAY err=%d", err);
1434 	}
1435 	val = smb_connect_timeout * 1000;
1436 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_CON_TMO, &val);
1437 	if (err != 0) {
1438 		cmn_err(CE_NOTE, "iod_connect: setopt TCP con tmo err=%d", err);
1439 	}
1440 
1441 	/*
1442 	 * Bind and connect
1443 	 */
1444 	err = SMB_TRAN_BIND(vcp, NULL);
1445 	if (err != 0) {
1446 		cmn_err(CE_NOTE, "iod_connect: t_kbind: err=%d", err);
1447 		/* Continue on and try connect. */
1448 	}
1449 	err = SMB_TRAN_CONNECT(vcp, &vcp->vc_srvaddr.sa);
1450 	/*
1451 	 * No cmn_err here, as connect failures are normal, i.e.
1452 	 * when a server has multiple addresses and only some are
1453 	 * routed for us. (libsmbfs tries them all)
1454 	 */
1455 	if (err == 0) {
1456 		SMB_VC_LOCK(vcp);
1457 		smb_iod_newstate(vcp, SMBIOD_ST_CONNECTED);
1458 		SMB_VC_UNLOCK(vcp);
1459 	} /* else stay in state reconnect */
1460 
1461 	return (err);
1462 }
1463 
1464 /*
1465  * Handle ioctl SMBIOC_IOD_NEGOTIATE
1466  * Do the whole SMB1/SMB2 negotiate
1467  *
1468  * This is where we send our first request to the server.
1469  * If this is the first time we're talking to this server,
1470  * (meaning not a reconnect) then we don't know whether
1471  * the server supports SMB2, so we need to use the weird
1472  * SMB1-to-SMB2 negotiation. That's where we send an SMB1
1473  * negotiate including dialect "SMB 2.???" and if the
1474  * server supports SMB2 we get an SMB2 reply -- Yes, an
1475  * SMB2 reply to an SMB1 request.  A strange protocol...
1476  *
1477  * If on the other hand we already know the server supports
1478  * SMB2 (because this is a reconnect) or if the client side
1479  * has disabled SMB1 entirely, we'll skip the SMB1 part.
1480  */
1481 int
1482 nsmb_iod_negotiate(struct smb_vc *vcp, cred_t *cr)
1483 {
1484 	struct smb_sopt *sv = &vcp->vc_sopt;
1485 	smb_cred_t scred;
1486 	int err = 0;
1487 
1488 	ASSERT(vcp->iod_thr == curthread);
1489 
1490 	smb_credinit(&scred, cr);
1491 
1492 	if (vcp->vc_state != SMBIOD_ST_CONNECTED) {
1493 		cmn_err(CE_NOTE, "iod_negotiate: bad state %d", vcp->vc_state);
1494 		err = EINVAL;
1495 		goto out;
1496 	}
1497 
1498 	if (vcp->vc_maxver == 0 || vcp->vc_minver > vcp->vc_maxver) {
1499 		err = EINVAL;
1500 		goto out;
1501 	}
1502 
1503 	/*
1504 	 * (Re)init negotiated values
1505 	 */
1506 	bzero(sv, sizeof (*sv));
1507 	vcp->vc2_next_message_id = 0;
1508 	vcp->vc2_limit_message_id = 1;
1509 	vcp->vc2_session_id = 0;
1510 	vcp->vc_next_seq = 0;
1511 
1512 	/*
1513 	 * If this was reconnect, get rid of the old MAC key
1514 	 * and session key.
1515 	 */
1516 	SMB_VC_LOCK(vcp);
1517 	if (vcp->vc_mackey != NULL) {
1518 		kmem_free(vcp->vc_mackey, vcp->vc_mackeylen);
1519 		vcp->vc_mackey = NULL;
1520 		vcp->vc_mackeylen = 0;
1521 	}
1522 	if (vcp->vc_ssnkey != NULL) {
1523 		kmem_free(vcp->vc_ssnkey, vcp->vc_ssnkeylen);
1524 		vcp->vc_ssnkey = NULL;
1525 		vcp->vc_ssnkeylen = 0;
1526 	}
1527 	SMB_VC_UNLOCK(vcp);
1528 
1529 	/*
1530 	 * If this is not an SMB2 reconect (SMBV_SMB2 not set),
1531 	 * and if SMB1 is enabled, do SMB1 neogotiate.  Then
1532 	 * if either SMB1-to-SMB2 negotiate tells us we should
1533 	 * switch to SMB2, or the local configuration has
1534 	 * disabled SMB1, set the SMBV_SMB2 flag.
1535 	 *
1536 	 * Note that vc_maxver is handled in smb_smb_negotiate
1537 	 * so we never get sv_proto == SMB_DIALECT_SMB2_FF when
1538 	 * the local configuration disables SMB2, and therefore
1539 	 * we won't set the SMBV_SMB2 flag.
1540 	 */
1541 	if ((vcp->vc_flags & SMBV_SMB2) == 0) {
1542 		if (vcp->vc_minver < SMB2_DIALECT_BASE) {
1543 			/*
1544 			 * SMB1 is enabled
1545 			 */
1546 			err = smb_smb_negotiate(vcp, &scred);
1547 			if (err != 0)
1548 				goto out;
1549 		}
1550 		/*
1551 		 * If SMB1-to-SMB2 negotiate told us we should
1552 		 * switch to SMB2, or if the local configuration
1553 		 * disables SMB1, set the SMB2 flag.
1554 		 */
1555 		if (sv->sv_proto == SMB_DIALECT_SMB2_FF ||
1556 		    vcp->vc_minver >= SMB2_DIALECT_BASE) {
1557 			/*
1558 			 * Switch this VC to SMB2.
1559 			 */
1560 			SMB_VC_LOCK(vcp);
1561 			vcp->vc_flags |= SMBV_SMB2;
1562 			SMB_VC_UNLOCK(vcp);
1563 		}
1564 	}
1565 
1566 	/*
1567 	 * If this is an SMB2 reconnect (SMBV_SMB2 was set before this
1568 	 * function was called), or SMB1-to-SMB2 negotiate indicated
1569 	 * we should switch to SMB2, or we have SMB1 disabled (both
1570 	 * cases set SMBV_SMB2 above), then do SMB2 negotiate.
1571 	 */
1572 	if ((vcp->vc_flags & SMBV_SMB2) != 0) {
1573 		err = smb2_smb_negotiate(vcp, &scred);
1574 	}
1575 
1576 out:
1577 	if (err == 0) {
1578 		SMB_VC_LOCK(vcp);
1579 		smb_iod_newstate(vcp, SMBIOD_ST_NEGOTIATED);
1580 		SMB_VC_UNLOCK(vcp);
1581 	}
1582 	/*
1583 	 * (else) leave state as it was.
1584 	 * User-level will either close this handle (if connecting
1585 	 * for the first time) or call rcfail and then try again.
1586 	 */
1587 
1588 	smb_credrele(&scred);
1589 
1590 	return (err);
1591 }
1592 
1593 /*
1594  * Handle ioctl SMBIOC_IOD_SSNSETUP
1595  * Do either SMB1 or SMB2 session setup (one call/reply)
1596  */
1597 int
1598 nsmb_iod_ssnsetup(struct smb_vc *vcp, cred_t *cr)
1599 {
1600 	smb_cred_t scred;
1601 	int err;
1602 
1603 	ASSERT(vcp->iod_thr == curthread);
1604 
1605 	switch (vcp->vc_state) {
1606 	case SMBIOD_ST_NEGOTIATED:
1607 	case SMBIOD_ST_AUTHCONT:
1608 		break;
1609 	default:
1610 		return (EINVAL);
1611 	}
1612 
1613 	smb_credinit(&scred, cr);
1614 	if (vcp->vc_flags & SMBV_SMB2)
1615 		err = smb2_smb_ssnsetup(vcp, &scred);
1616 	else
1617 		err = smb_smb_ssnsetup(vcp, &scred);
1618 	smb_credrele(&scred);
1619 
1620 	SMB_VC_LOCK(vcp);
1621 	switch (err) {
1622 	case 0:
1623 		smb_iod_newstate(vcp, SMBIOD_ST_AUTHOK);
1624 		break;
1625 	case EINPROGRESS:	/* MORE_PROCESSING_REQUIRED */
1626 		smb_iod_newstate(vcp, SMBIOD_ST_AUTHCONT);
1627 		break;
1628 	default:
1629 		smb_iod_newstate(vcp, SMBIOD_ST_AUTHFAIL);
1630 		break;
1631 	}
1632 	SMB_VC_UNLOCK(vcp);
1633 
1634 	return (err);
1635 }
1636 
1637 static int
1638 smb_iod_logoff(struct smb_vc *vcp, cred_t *cr)
1639 {
1640 	smb_cred_t scred;
1641 	int err;
1642 
1643 	ASSERT(vcp->iod_thr == curthread);
1644 
1645 	smb_credinit(&scred, cr);
1646 	if (vcp->vc_flags & SMBV_SMB2)
1647 		err = smb2_smb_logoff(vcp, &scred);
1648 	else
1649 		err = smb_smb_logoff(vcp, &scred);
1650 	smb_credrele(&scred);
1651 
1652 	return (err);
1653 }
1654 
1655 /*
1656  * Handle ioctl SMBIOC_IOD_WORK
1657  *
1658  * The smbiod agent calls this after authentication to become
1659  * the reader for this session, so long as that's possible.
1660  * This should only return non-zero if we want that agent to
1661  * give up on this VC permanently.
1662  */
1663 /* ARGSUSED */
1664 int
1665 smb_iod_vc_work(struct smb_vc *vcp, int flags, cred_t *cr)
1666 {
1667 	smbioc_ssn_work_t *wk = &vcp->vc_work;
1668 	int err = 0;
1669 
1670 	/*
1671 	 * This is called by the one-and-only
1672 	 * IOD thread for this VC.
1673 	 */
1674 	ASSERT(vcp->iod_thr == curthread);
1675 
1676 	/*
1677 	 * Should be in state...
1678 	 */
1679 	if (vcp->vc_state != SMBIOD_ST_AUTHOK) {
1680 		cmn_err(CE_NOTE, "iod_vc_work: bad state %d", vcp->vc_state);
1681 		return (EINVAL);
1682 	}
1683 
1684 	/*
1685 	 * Update the session key and initialize SMB signing.
1686 	 *
1687 	 * This implementation does not use multiple SMB sessions per
1688 	 * TCP connection (where only the first session key is used)
1689 	 * so we always have a new session key here.  Sanity check the
1690 	 * length from user space.  Normally 16 or 32.
1691 	 */
1692 	if (wk->wk_u_ssnkey_len > 1024) {
1693 		cmn_err(CE_NOTE, "iod_vc_work: ssn key too long");
1694 		return (EINVAL);
1695 	}
1696 
1697 	ASSERT(vcp->vc_ssnkey == NULL);
1698 	SMB_VC_LOCK(vcp);
1699 	if (wk->wk_u_ssnkey_len != 0 &&
1700 	    wk->wk_u_ssnkey_buf.lp_ptr != NULL) {
1701 		vcp->vc_ssnkeylen = wk->wk_u_ssnkey_len;
1702 		vcp->vc_ssnkey = kmem_alloc(vcp->vc_ssnkeylen, KM_SLEEP);
1703 		if (ddi_copyin(wk->wk_u_ssnkey_buf.lp_ptr,
1704 		    vcp->vc_ssnkey, vcp->vc_ssnkeylen, flags) != 0) {
1705 			err = EFAULT;
1706 		}
1707 	}
1708 	SMB_VC_UNLOCK(vcp);
1709 	if (err)
1710 		return (err);
1711 
1712 	/*
1713 	 * If we have a session key, derive the MAC key for SMB signing.
1714 	 * If this was a NULL session, we might have no session key.
1715 	 */
1716 	ASSERT(vcp->vc_mackey == NULL);
1717 	if (vcp->vc_ssnkey != NULL) {
1718 		if (vcp->vc_flags & SMBV_SMB2)
1719 			err = smb2_sign_init(vcp);
1720 		else
1721 			err = smb_sign_init(vcp);
1722 		if (err != 0)
1723 			return (err);
1724 	}
1725 
1726 	/*
1727 	 * Tell any enqueued requests they can start.
1728 	 */
1729 	SMB_VC_LOCK(vcp);
1730 	vcp->vc_genid++;	/* possibly new connection */
1731 	smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
1732 	cv_broadcast(&vcp->vc_statechg);
1733 	SMB_VC_UNLOCK(vcp);
1734 
1735 	/*
1736 	 * The above cv_broadcast should be sufficient to
1737 	 * get requests going again.
1738 	 *
1739 	 * If we have a callback function, run it.
1740 	 * Was: smb_iod_notify_connected()
1741 	 */
1742 	if (fscb && fscb->fscb_connect)
1743 		smb_vc_walkshares(vcp, fscb->fscb_connect);
1744 
1745 	/*
1746 	 * Run the "reader" loop.  An error return here is normal
1747 	 * (i.e. when we need to reconnect) so ignore errors.
1748 	 * Note: This call updates the vc_state.
1749 	 */
1750 	(void) smb_iod_recvall(vcp, B_FALSE);
1751 
1752 	/*
1753 	 * The reader loop returned, so we must have a
1754 	 * new state.  (disconnected or reconnecting)
1755 	 *
1756 	 * Notify shares of the disconnect.
1757 	 * Was: smb_iod_notify_disconnect()
1758 	 */
1759 	smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1760 
1761 	/*
1762 	 * The reader loop function returns only when
1763 	 * there's been an error on the connection, or
1764 	 * this VC has no more references.  It also
1765 	 * updates the state before it returns.
1766 	 *
1767 	 * Tell any requests to give up or restart.
1768 	 */
1769 	smb_iod_invrq(vcp);
1770 
1771 	return (err);
1772 }
1773 
1774 /*
1775  * Handle ioctl SMBIOC_IOD_IDLE
1776  *
1777  * Wait around for someone to ask to use this VC again after the
1778  * TCP session has closed.  When one of the connected trees adds a
1779  * request, smb_iod_reconnect will set vc_state to RECONNECT and
1780  * wake this cv_wait.  When a VC ref. goes away in smb_vc_rele,
1781  * that also signals this wait so we can re-check whether we
1782  * now hold the last ref. on this VC (and can destroy it).
1783  */
1784 int
1785 smb_iod_vc_idle(struct smb_vc *vcp)
1786 {
1787 	int err = 0;
1788 	boolean_t destroy = B_FALSE;
1789 
1790 	/*
1791 	 * This is called by the one-and-only
1792 	 * IOD thread for this VC.
1793 	 */
1794 	ASSERT(vcp->iod_thr == curthread);
1795 
1796 	/*
1797 	 * Should be in state...
1798 	 */
1799 	if (vcp->vc_state != SMBIOD_ST_IDLE &&
1800 	    vcp->vc_state != SMBIOD_ST_RECONNECT) {
1801 		cmn_err(CE_NOTE, "iod_vc_idle: bad state %d", vcp->vc_state);
1802 		return (EINVAL);
1803 	}
1804 
1805 	SMB_VC_LOCK(vcp);
1806 
1807 	while (vcp->vc_state == SMBIOD_ST_IDLE &&
1808 	    vcp->vc_co.co_usecount > 1) {
1809 		if (cv_wait_sig(&vcp->iod_idle, &vcp->vc_lock) == 0) {
1810 			err = EINTR;
1811 			break;
1812 		}
1813 	}
1814 	if (vcp->vc_state == SMBIOD_ST_IDLE &&
1815 	    vcp->vc_co.co_usecount == 1) {
1816 		/*
1817 		 * We were woken because we now have the last ref.
1818 		 * Arrange for this VC to be destroyed now.
1819 		 * Set the "GONE" flag while holding the lock,
1820 		 * to prevent a race with new references.
1821 		 * The destroy happens after unlock.
1822 		 */
1823 		vcp->vc_flags |= SMBV_GONE;
1824 		destroy = B_TRUE;
1825 	}
1826 
1827 	SMB_VC_UNLOCK(vcp);
1828 
1829 	if (destroy) {
1830 		/* This sets vc_state = DEAD */
1831 		smb_iod_disconnect(vcp);
1832 	}
1833 
1834 	return (err);
1835 }
1836 
1837 /*
1838  * Handle ioctl SMBIOC_IOD_RCFAIL
1839  *
1840  * After a failed reconnect attempt, smbiod will
1841  * call this to make current requests error out.
1842  */
1843 int
1844 smb_iod_vc_rcfail(struct smb_vc *vcp)
1845 {
1846 	clock_t tr;
1847 	int err = 0;
1848 
1849 	/*
1850 	 * This is called by the one-and-only
1851 	 * IOD thread for this VC.
1852 	 */
1853 	ASSERT(vcp->iod_thr == curthread);
1854 	SMB_VC_LOCK(vcp);
1855 
1856 	smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1857 	cv_broadcast(&vcp->vc_statechg);
1858 
1859 	/*
1860 	 * Short wait here for two reasons:
1861 	 * (1) Give requests a chance to error out.
1862 	 * (2) Prevent immediate retry.
1863 	 */
1864 	tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1865 	    SEC_TO_TICK(5), TR_CLOCK_TICK);
1866 	if (tr == 0)
1867 		err = EINTR;
1868 
1869 	/*
1870 	 * Normally we'll switch to state IDLE here.  However,
1871 	 * if something called smb_iod_reconnect() while we were
1872 	 * waiting above, we'll be in in state reconnect already.
1873 	 * In that case, keep state RECONNECT, so we essentially
1874 	 * skip transition through state IDLE that would normally
1875 	 * happen next.
1876 	 */
1877 	if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
1878 		smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1879 		cv_broadcast(&vcp->vc_statechg);
1880 	}
1881 
1882 	SMB_VC_UNLOCK(vcp);
1883 
1884 	return (err);
1885 }
1886 
1887 /*
1888  * Ask the IOD to reconnect (if not already underway)
1889  * then wait for the reconnect to finish.
1890  */
1891 int
1892 smb_iod_reconnect(struct smb_vc *vcp)
1893 {
1894 	int err = 0, rv;
1895 
1896 	SMB_VC_LOCK(vcp);
1897 again:
1898 	switch (vcp->vc_state) {
1899 
1900 	case SMBIOD_ST_IDLE:
1901 		/* Tell the IOD thread it's no longer IDLE. */
1902 		smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1903 		cv_signal(&vcp->iod_idle);
1904 		/* FALLTHROUGH */
1905 
1906 	case SMBIOD_ST_RECONNECT:
1907 	case SMBIOD_ST_CONNECTED:
1908 	case SMBIOD_ST_NEGOTIATED:
1909 	case SMBIOD_ST_AUTHCONT:
1910 	case SMBIOD_ST_AUTHOK:
1911 		/* Wait for the VC state to become ACTIVE. */
1912 		rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1913 		if (rv == 0) {
1914 			err = EINTR;
1915 			break;
1916 		}
1917 		goto again;
1918 
1919 	case SMBIOD_ST_VCACTIVE:
1920 		err = 0; /* success! */
1921 		break;
1922 
1923 	case SMBIOD_ST_AUTHFAIL:
1924 	case SMBIOD_ST_RCFAILED:
1925 	case SMBIOD_ST_DEAD:
1926 	default:
1927 		err = ENOTCONN;
1928 		break;
1929 	}
1930 
1931 	SMB_VC_UNLOCK(vcp);
1932 	return (err);
1933 }
1934