1 /*
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33 */
34
35 /*
36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
37 * Use is subject to license terms.
38 *
39 * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
40 * Copyright 2019 Nexenta Systems, Inc. All rights reserved.
41 * Copyright 2024 RackTop Systems, Inc.
42 */
43
44 #ifdef DEBUG
45 /* See sys/queue.h */
46 #define QUEUEDEBUG 1
47 #endif
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/atomic.h>
52 #include <sys/proc.h>
53 #include <sys/thread.h>
54 #include <sys/file.h>
55 #include <sys/kmem.h>
56 #include <sys/unistd.h>
57 #include <sys/mount.h>
58 #include <sys/vnode.h>
59 #include <sys/types.h>
60 #include <sys/ddi.h>
61 #include <sys/sunddi.h>
62 #include <sys/stream.h>
63 #include <sys/strsun.h>
64 #include <sys/time.h>
65 #include <sys/class.h>
66 #include <sys/disp.h>
67 #include <sys/cmn_err.h>
68 #include <sys/zone.h>
69 #include <sys/sdt.h>
70
71 #include <netsmb/smb_osdep.h>
72
73 #include <netsmb/smb.h>
74 #include <netsmb/smb2.h>
75 #include <netsmb/smb_conn.h>
76 #include <netsmb/smb_rq.h>
77 #include <netsmb/smb2_rq.h>
78 #include <netsmb/smb_subr.h>
79 #include <netsmb/smb_tran.h>
80 #include <netsmb/smb_trantcp.h>
81
82 /*
83 * SMB messages are up to 64K. Let's leave room for two.
84 * If we negotiate up to SMB2, increase these. XXX todo
85 */
86 static int smb_tcpsndbuf = 0x20000;
87 static int smb_tcprcvbuf = 0x20000;
88 static int smb_connect_timeout = 10; /* seconds */
89
90 static int smb1_iod_process(smb_vc_t *, mblk_t *);
91 static int smb2_iod_process(smb_vc_t *, mblk_t *);
92 static int smb_iod_send_echo(smb_vc_t *, cred_t *cr);
93 static int smb_iod_logoff(struct smb_vc *vcp, cred_t *cr);
94
95 /*
96 * This is set/cleared when smbfs loads/unloads
97 * No locks should be necessary, because smbfs
98 * can't unload until all the mounts are gone.
99 */
100 static smb_fscb_t *fscb;
101 void
smb_fscb_set(smb_fscb_t * cb)102 smb_fscb_set(smb_fscb_t *cb)
103 {
104 fscb = cb;
105 }
106
107 static void
smb_iod_share_disconnected(smb_share_t * ssp)108 smb_iod_share_disconnected(smb_share_t *ssp)
109 {
110
111 smb_share_invalidate(ssp);
112
113 /*
114 * This is the only fscb hook smbfs currently uses.
115 * Replaces smbfs_dead() from Darwin.
116 */
117 if (fscb && fscb->fscb_disconn) {
118 fscb->fscb_disconn(ssp);
119 }
120 }
121
122 /*
123 * State changes are important and infrequent.
124 * Make them easily observable via dtrace.
125 */
126 void
smb_iod_newstate(struct smb_vc * vcp,int state)127 smb_iod_newstate(struct smb_vc *vcp, int state)
128 {
129 vcp->vc_state = state;
130 }
131
132 /* Lock Held version of the next function. */
133 static inline void
smb_iod_rqprocessed_LH(struct smb_rq * rqp,int error,int flags)134 smb_iod_rqprocessed_LH(
135 struct smb_rq *rqp,
136 int error,
137 int flags)
138 {
139 rqp->sr_flags |= flags;
140 rqp->sr_lerror = error;
141 rqp->sr_rpgen++;
142 rqp->sr_state = SMBRQ_NOTIFIED;
143 cv_broadcast(&rqp->sr_cond);
144 }
145
146 static void
smb_iod_rqprocessed(struct smb_rq * rqp,int error,int flags)147 smb_iod_rqprocessed(
148 struct smb_rq *rqp,
149 int error,
150 int flags)
151 {
152
153 SMBRQ_LOCK(rqp);
154 smb_iod_rqprocessed_LH(rqp, error, flags);
155 SMBRQ_UNLOCK(rqp);
156 }
157
158 static void
smb_iod_invrq(struct smb_vc * vcp)159 smb_iod_invrq(struct smb_vc *vcp)
160 {
161 struct smb_rq *rqp;
162
163 /*
164 * Invalidate all outstanding requests for this connection
165 * Also wakeup iod_muxwant waiters.
166 */
167 rw_enter(&vcp->iod_rqlock, RW_READER);
168 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
169 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
170 }
171 rw_exit(&vcp->iod_rqlock);
172 cv_broadcast(&vcp->iod_muxwait);
173 }
174
175 /*
176 * Called by smb_vc_rele/smb_vc_kill on last ref, and by
177 * the driver close function if the IOD closes its minor.
178 * In those cases, the caller should be the IOD thread.
179 *
180 * Forcibly kill the connection.
181 */
182 void
smb_iod_disconnect(struct smb_vc * vcp)183 smb_iod_disconnect(struct smb_vc *vcp)
184 {
185
186 /*
187 * Inform everyone of the state change.
188 */
189 SMB_VC_LOCK(vcp);
190 if (vcp->vc_state != SMBIOD_ST_DEAD) {
191 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
192 cv_broadcast(&vcp->vc_statechg);
193 }
194 SMB_VC_UNLOCK(vcp);
195
196 SMB_TRAN_DISCONNECT(vcp);
197 }
198
199 /*
200 * Send one request.
201 *
202 * SMB1 only
203 *
204 * Called by _addrq (for internal requests)
205 * and _sendall (via _addrq, _multirq, _waitrq)
206 * Errors are reported via the smb_rq, using:
207 * smb_iod_rqprocessed(rqp, ...)
208 */
209 static void
smb1_iod_sendrq(struct smb_rq * rqp)210 smb1_iod_sendrq(struct smb_rq *rqp)
211 {
212 struct smb_vc *vcp = rqp->sr_vc;
213 mblk_t *m;
214 int error;
215
216 ASSERT(vcp);
217 ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
218 ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
219
220 /*
221 * Internal requests are allowed in any state;
222 * otherwise should be active.
223 */
224 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
225 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
226 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
227 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
228 return;
229 }
230
231 /*
232 * Overwrite the SMB header with the assigned MID and
233 * (if we're signing) sign it.
234 */
235 smb_rq_fillhdr(rqp);
236 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
237 smb_rq_sign(rqp);
238 }
239
240 /*
241 * The transport send consumes the message and we'd
242 * prefer to keep a copy, so dupmsg() before sending.
243 */
244 m = dupmsg(rqp->sr_rq.mb_top);
245 if (m == NULL) {
246 error = ENOBUFS;
247 goto fatal;
248 }
249
250 #ifdef DTRACE_PROBE2
251 DTRACE_PROBE2(iod_sendrq,
252 (smb_rq_t *), rqp, (mblk_t *), m);
253 #endif
254
255 error = SMB_TRAN_SEND(vcp, m);
256 m = 0; /* consumed by SEND */
257
258 rqp->sr_lerror = error;
259 if (error == 0) {
260 SMBRQ_LOCK(rqp);
261 rqp->sr_flags |= SMBR_SENT;
262 rqp->sr_state = SMBRQ_SENT;
263 SMBRQ_UNLOCK(rqp);
264 return;
265 }
266 /*
267 * Transport send returned an error.
268 * Was it a fatal one?
269 */
270 if (SMB_TRAN_FATAL(vcp, error)) {
271 /*
272 * No further attempts should be made
273 */
274 fatal:
275 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
276 smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
277 return;
278 }
279 }
280
281 /*
282 * Send one request.
283 *
284 * SMB2 only
285 *
286 * Called by _addrq (for internal requests)
287 * and _sendall (via _addrq, _multirq, _waitrq)
288 * Errors are reported via the smb_rq, using:
289 * smb_iod_rqprocessed(rqp, ...)
290 */
291 static void
smb2_iod_sendrq(struct smb_rq * rqp)292 smb2_iod_sendrq(struct smb_rq *rqp)
293 {
294 struct smb_rq *c_rqp; /* compound */
295 struct smb_vc *vcp = rqp->sr_vc;
296 struct smb_sopt *sv = &vcp->vc_sopt;
297 mblk_t *top_m;
298 mblk_t *cur_m;
299 int error;
300 boolean_t encrypt = B_FALSE;
301
302 ASSERT(vcp);
303 ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
304 ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
305
306 /*
307 * Internal requests are allowed in any state;
308 * otherwise should be active.
309 */
310 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
311 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
312 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
313 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
314 return;
315 }
316
317 /* Determine if outgoing request(s) must be encrypted */
318 if ((sv->sv2_sessflags & SMB2_SESSION_FLAG_ENCRYPT_DATA) != 0) {
319 if (rqp->sr2_command != SMB2_NEGOTIATE) {
320 encrypt = B_TRUE;
321 }
322 } else if (rqp->sr_share != NULL &&
323 (rqp->sr_share->ss2_share_flags &
324 SMB2_SHAREFLAG_ENCRYPT_DATA) != 0) {
325 if ((rqp->sr2_command != SMB2_NEGOTIATE) &&
326 (rqp->sr2_command != SMB2_SESSION_SETUP) &&
327 (rqp->sr2_command != SMB2_TREE_CONNECT)) {
328 encrypt = B_TRUE;
329 }
330 }
331
332 /*
333 * Overwrite the SMB header with the assigned MID and
334 * (if we're signing) sign it. If there are compounded
335 * requests after the top one, do those too.
336 */
337 smb2_rq_fillhdr(rqp);
338 if (!encrypt && (rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) != 0) {
339 smb2_rq_sign(rqp);
340 }
341 c_rqp = rqp->sr2_compound_next;
342 while (c_rqp != NULL) {
343 smb2_rq_fillhdr(c_rqp);
344 if (!encrypt &&
345 (c_rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) != 0) {
346 smb2_rq_sign(c_rqp);
347 }
348 c_rqp = c_rqp->sr2_compound_next;
349 }
350
351 /*
352 * Want the dtrace probe to expose the clear data,
353 * not the encrypted data.
354 */
355 DTRACE_PROBE2(iod_sendrq,
356 (smb_rq_t *), rqp, (mblk_t *), rqp->sr_rq.mb_top);
357
358 /*
359 * The transport send consumes the message and we'd
360 * prefer to keep a copy, so dupmsg() before sending.
361 * We also need this to build the compound message
362 * that we'll actually send. The message offset at
363 * the start of each compounded message should be
364 * eight-byte aligned. The caller preparing the
365 * compounded request has to take care of that
366 * before we get here and sign messages etc.
367 *
368 * If we're encrypting, copy instead, and then
369 * encrypt the copy in-place.
370 */
371 if (encrypt)
372 top_m = copymsg(rqp->sr_rq.mb_top);
373 else
374 top_m = dupmsg(rqp->sr_rq.mb_top);
375 if (top_m == NULL) {
376 error = ENOBUFS;
377 goto fatal;
378 }
379 c_rqp = rqp->sr2_compound_next;
380 while (c_rqp != NULL) {
381 size_t len = msgdsize(top_m);
382 ASSERT((len & 7) == 0);
383 if (encrypt)
384 cur_m = copymsg(c_rqp->sr_rq.mb_top);
385 else
386 cur_m = dupmsg(c_rqp->sr_rq.mb_top);
387 if (cur_m == NULL) {
388 freemsg(top_m);
389 error = ENOBUFS;
390 goto fatal;
391 }
392 linkb(top_m, cur_m);
393 }
394
395 if (encrypt) {
396 error = smb3_msg_encrypt(vcp, &top_m);
397 if (error != 0)
398 goto fatal;
399 }
400
401 error = SMB_TRAN_SEND(vcp, top_m);
402 top_m = 0; /* consumed by SEND */
403
404 rqp->sr_lerror = error;
405 if (error == 0) {
406 SMBRQ_LOCK(rqp);
407 rqp->sr_flags |= SMBR_SENT;
408 rqp->sr_state = SMBRQ_SENT;
409 SMBRQ_UNLOCK(rqp);
410 return;
411 }
412 /*
413 * Transport send returned an error.
414 * Was it a fatal one?
415 */
416 if (SMB_TRAN_FATAL(vcp, error)) {
417 /*
418 * No further attempts should be made
419 */
420 fatal:
421 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
422 smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
423 return;
424 }
425 }
426
427 /*
428 * Receive one NetBIOS (or NBT over TCP) message. If none have arrived,
429 * wait up to SMB_NBTIMO (15 sec.) for one to arrive, and then if still
430 * none have arrived, return ETIME.
431 */
432 static int
smb_iod_recvmsg(struct smb_vc * vcp,mblk_t ** mpp)433 smb_iod_recvmsg(struct smb_vc *vcp, mblk_t **mpp)
434 {
435 mblk_t *m;
436 int error;
437
438 top:
439 m = NULL;
440 error = SMB_TRAN_RECV(vcp, &m);
441 if (error == EAGAIN)
442 goto top;
443 if (error)
444 return (error);
445 ASSERT(m != NULL);
446
447 m = m_pullup(m, 4);
448 if (m == NULL) {
449 return (ENOSR);
450 }
451
452 *mpp = m;
453 return (0);
454 }
455
456 /*
457 * How long should we keep around an unused VC (connection)?
458 * There's usually a good chance connections will be reused,
459 * so the default is to keep such connections for 5 min.
460 */
461 #ifdef DEBUG
462 int smb_iod_idle_keep_time = 60; /* seconds */
463 #else
464 int smb_iod_idle_keep_time = 300; /* seconds */
465 #endif
466
467 /*
468 * Process incoming packets
469 *
470 * This is the "reader" loop, run by the IOD thread. Normally we're in
471 * state SMBIOD_ST_VCACTIVE here, but during reconnect we're called in
472 * other states with poll==TRUE
473 *
474 * A non-zero error return here causes the IOD work loop to terminate.
475 */
476 int
smb_iod_recvall(struct smb_vc * vcp,boolean_t poll)477 smb_iod_recvall(struct smb_vc *vcp, boolean_t poll)
478 {
479 mblk_t *m;
480 int error = 0;
481 int etime_idle = 0; /* How many 15 sec. "ticks" idle. */
482 int etime_count = 0; /* ... and when we have requests. */
483
484 for (;;) {
485 /*
486 * Check whether someone "killed" this VC,
487 * or is asking the IOD to terminate.
488 */
489 if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
490 SMBIODEBUG("SHUTDOWN set\n");
491 /* This IOD thread will terminate. */
492 SMB_VC_LOCK(vcp);
493 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
494 cv_broadcast(&vcp->vc_statechg);
495 SMB_VC_UNLOCK(vcp);
496 error = EINTR;
497 break;
498 }
499
500 m = NULL;
501 error = smb_iod_recvmsg(vcp, &m);
502
503 /*
504 * Internal requests (reconnecting) call this in a loop
505 * (with poll==TRUE) until the request completes.
506 */
507 if (error == ETIME && poll)
508 break;
509
510 if (error == ETIME &&
511 vcp->iod_rqlist.tqh_first != NULL) {
512
513 /*
514 * Nothing received and requests waiting.
515 * Increment etime_count. If we were idle,
516 * skip the 1st tick, because we started
517 * waiting before there were any requests.
518 */
519 if (etime_idle != 0) {
520 etime_idle = 0;
521 } else if (etime_count < INT16_MAX) {
522 etime_count++;
523 }
524
525 /*
526 * ETIME and requests in the queue.
527 * The first time (at 15 sec.)
528 * Log an error (just once).
529 */
530 if (etime_count > 0 &&
531 vcp->iod_noresp == B_FALSE) {
532 vcp->iod_noresp = B_TRUE;
533 zprintf(vcp->vc_zoneid,
534 "SMB server %s not responding\n",
535 vcp->vc_srvname);
536 }
537 /*
538 * At 30 sec. try sending an echo, which
539 * should cause some response.
540 */
541 if (etime_count == 2) {
542 SMBIODEBUG("send echo\n");
543 (void) smb_iod_send_echo(vcp, CRED());
544 }
545 /*
546 * At 45 sec. give up on the connection
547 * and try to reconnect.
548 */
549 if (etime_count == 3) {
550 SMB_VC_LOCK(vcp);
551 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
552 SMB_VC_UNLOCK(vcp);
553 SMB_TRAN_DISCONNECT(vcp);
554 break;
555 }
556 continue;
557 } /* ETIME and requests in the queue */
558
559 if (error == ETIME) {
560 /*
561 * Nothing received and no active requests.
562 *
563 * If we've received nothing from the server for
564 * smb_iod_idle_keep_time seconds, and the IOD
565 * thread holds the last reference to this VC,
566 * move to state IDLE and drop the TCP session.
567 * The IDLE handler will destroy the VC unless
568 * vc_state goes to RECONNECT before then.
569 */
570 etime_count = 0;
571 if (etime_idle < INT16_MAX)
572 etime_idle++;
573 if ((etime_idle * SMB_NBTIMO) <
574 smb_iod_idle_keep_time)
575 continue;
576 SMB_VC_LOCK(vcp);
577 if (vcp->vc_co.co_usecount == 1) {
578 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
579 SMB_VC_UNLOCK(vcp);
580 SMBIODEBUG("logoff & disconnect\n");
581 (void) smb_iod_logoff(vcp, CRED());
582 SMB_TRAN_DISCONNECT(vcp);
583 error = 0;
584 break;
585 }
586 SMB_VC_UNLOCK(vcp);
587 continue;
588 } /* error == ETIME */
589
590 if (error) {
591 /*
592 * The recv above returned an error indicating
593 * that our TCP session is no longer usable.
594 * Disconnect the session and get ready to
595 * reconnect. If we have pending requests,
596 * move to state reconnect immediately;
597 * otherwise move to state IDLE until a
598 * request is issued on this VC.
599 */
600 SMB_VC_LOCK(vcp);
601 if (vcp->iod_rqlist.tqh_first != NULL)
602 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
603 else
604 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
605 cv_broadcast(&vcp->vc_statechg);
606 SMB_VC_UNLOCK(vcp);
607 SMB_TRAN_DISCONNECT(vcp);
608 break;
609 }
610
611 /*
612 * Received something. Yea!
613 */
614 etime_count = 0;
615 etime_idle = 0;
616
617 /*
618 * If we just completed a reconnect after logging
619 * "SMB server %s not responding" then log OK now.
620 */
621 if (vcp->iod_noresp) {
622 vcp->iod_noresp = B_FALSE;
623 zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
624 vcp->vc_srvname);
625 }
626
627 if ((vcp->vc_flags & SMBV_SMB2) != 0) {
628 error = smb2_iod_process(vcp, m);
629 } else {
630 error = smb1_iod_process(vcp, m);
631 }
632
633 /*
634 * Reconnect calls this in a loop with poll=TRUE
635 * We've received a response, so break now.
636 */
637 if (poll) {
638 error = 0;
639 break;
640 }
641 }
642
643 return (error);
644 }
645
646 /*
647 * Have what should be an SMB1 reply. Check and parse the header,
648 * then use the message ID to find the request this belongs to and
649 * post it on that request.
650 *
651 * Returns an error if the reader should give up.
652 * To be safe, error if we read garbage.
653 */
654 static int
smb1_iod_process(smb_vc_t * vcp,mblk_t * m)655 smb1_iod_process(smb_vc_t *vcp, mblk_t *m)
656 {
657 struct mdchain md;
658 struct smb_rq *rqp;
659 uint8_t cmd, sig[4];
660 uint16_t mid;
661 int err, skip;
662
663 m = m_pullup(m, SMB_HDRLEN);
664 if (m == NULL)
665 return (ENOMEM);
666
667 /*
668 * Note: Intentionally do NOT md_done(&md)
669 * because that would free the message and
670 * we just want to peek here.
671 */
672 md_initm(&md, m);
673
674 /*
675 * Check the SMB header version and get the MID.
676 *
677 * The header version should be SMB1 except when we're
678 * doing SMB1-to-SMB2 negotiation, in which case we may
679 * see an SMB2 header with message ID=0 (only allowed in
680 * vc_state == SMBIOD_ST_CONNECTED -- negotiationg).
681 */
682 err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
683 if (err)
684 return (err);
685 if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
686 goto bad_hdr;
687 }
688 switch (sig[0]) {
689 case SMB_HDR_V1: /* SMB1 */
690 md_get_uint8(&md, &cmd);
691 /* Skip to and get the MID. At offset 5 now. */
692 skip = SMB_HDR_OFF_MID - 5;
693 md_get_mem(&md, NULL, skip, MB_MSYSTEM);
694 err = md_get_uint16le(&md, &mid);
695 if (err)
696 return (err);
697 break;
698 case SMB_HDR_V2: /* SMB2+ */
699 if (vcp->vc_state == SMBIOD_ST_CONNECTED) {
700 /*
701 * No need to look, can only be
702 * MID=0, cmd=negotiate
703 */
704 cmd = SMB_COM_NEGOTIATE;
705 mid = 0;
706 break;
707 }
708 /* FALLTHROUGH */
709 bad_hdr:
710 default:
711 SMBIODEBUG("Bad SMB hdr\n");
712 m_freem(m);
713 return (EPROTO);
714 }
715
716 /*
717 * Find the reqeuest and post the reply
718 */
719 rw_enter(&vcp->iod_rqlock, RW_READER);
720 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
721
722 if (rqp->sr_mid != mid)
723 continue;
724
725 DTRACE_PROBE2(iod_post_reply,
726 (smb_rq_t *), rqp, (mblk_t *), m);
727 m_dumpm(m);
728
729 SMBRQ_LOCK(rqp);
730 if (rqp->sr_rp.md_top == NULL) {
731 md_initm(&rqp->sr_rp, m);
732 } else {
733 if (rqp->sr_flags & SMBR_MULTIPACKET) {
734 md_append_record(&rqp->sr_rp, m);
735 } else {
736 SMBRQ_UNLOCK(rqp);
737 rqp = NULL;
738 break;
739 }
740 }
741 smb_iod_rqprocessed_LH(rqp, 0, 0);
742 SMBRQ_UNLOCK(rqp);
743 break;
744 }
745 rw_exit(&vcp->iod_rqlock);
746
747 if (rqp == NULL) {
748 if (cmd != SMB_COM_ECHO) {
749 SMBSDEBUG("drop resp: MID 0x%04x\n", (uint_t)mid);
750 }
751 m_freem(m);
752 /*
753 * Keep going. It's possible this reply came
754 * after the request timed out and went away.
755 */
756 }
757 return (0);
758 }
759
760 /*
761 * Have what should be an SMB2 reply. Check and parse the header,
762 * then use the message ID to find the request this belongs to and
763 * post it on that request.
764 *
765 * We also want to apply any credit grant in this reply now,
766 * rather than waiting for the owner to wake up.
767 */
768 static int
smb2_iod_process(smb_vc_t * vcp,mblk_t * m)769 smb2_iod_process(smb_vc_t *vcp, mblk_t *m)
770 {
771 struct mdchain md;
772 struct smb_rq *rqp;
773 uint8_t sig[4];
774 mblk_t *next_m = NULL;
775 uint64_t message_id, async_id;
776 uint32_t flags, next_cmd_off, status;
777 uint16_t command, credits_granted;
778 boolean_t encrypted = B_FALSE;
779 int err;
780
781 top:
782 m = m_pullup(m, SMB2_HDRLEN);
783 if (m == NULL)
784 return (ENOMEM);
785
786 /*
787 * Note: Intentionally do NOT md_done(&md)
788 * because that would free the message and
789 * we just want to peek here.
790 */
791 md_initm(&md, m);
792
793 /*
794 * Check the SMB header. Must be SMB2
795 * (and later, could be SMB3 encrypted)
796 */
797 err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
798 if (err)
799 return (err);
800 if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
801 goto bad_hdr;
802 }
803 switch (sig[0]) {
804 case SMB_HDR_V2:
805 break;
806 case SMB_HDR_V3E:
807 err = smb3_msg_decrypt(vcp, &m);
808 if (err != 0) {
809 SMBIODEBUG("SMB3 decrypt failed\n");
810 m_freem(m);
811 return (ENOMSG);
812 }
813 encrypted = B_TRUE;
814 goto top;
815
816 bad_hdr:
817 default:
818 SMBIODEBUG("Bad SMB2 hdr\n");
819 m_freem(m);
820 return (EPROTO);
821 }
822
823 /*
824 * Parse the rest of the SMB2 header,
825 * skipping what we don't need.
826 */
827 md_get_uint32le(&md, NULL); /* length, credit_charge */
828 md_get_uint32le(&md, &status);
829 md_get_uint16le(&md, &command);
830 md_get_uint16le(&md, &credits_granted);
831 md_get_uint32le(&md, &flags);
832 md_get_uint32le(&md, &next_cmd_off);
833 md_get_uint64le(&md, &message_id);
834 if (flags & SMB2_FLAGS_ASYNC_COMMAND) {
835 md_get_uint64le(&md, &async_id);
836 } else {
837 /* PID, TID (not needed) */
838 async_id = 0;
839 }
840
841 /*
842 * If this is a compound reply, split it.
843 * Next must be 8-byte aligned.
844 */
845 if (next_cmd_off != 0) {
846 if ((next_cmd_off & 7) != 0)
847 SMBIODEBUG("Misaligned next cmd\n");
848 else
849 next_m = m_split(m, next_cmd_off, 1);
850 }
851
852 /*
853 * SMB2 Negotiate may return zero credits_granted,
854 * in which case we should assume it granted one.
855 */
856 if (command == SMB2_NEGOTIATE && credits_granted == 0)
857 credits_granted = 1;
858
859 /*
860 * Apply the credit grant
861 */
862 rw_enter(&vcp->iod_rqlock, RW_WRITER);
863 vcp->vc2_limit_message_id += credits_granted;
864
865 /*
866 * Find the reqeuest and post the reply
867 */
868 rw_downgrade(&vcp->iod_rqlock);
869 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
870
871 if (rqp->sr2_messageid != message_id)
872 continue;
873
874 DTRACE_PROBE2(iod_post_reply,
875 (smb_rq_t *), rqp, (mblk_t *), m);
876 m_dumpm(m);
877
878 /*
879 * If this is an interim response, just save the
880 * async ID but don't wakup the request.
881 * Don't need SMBRQ_LOCK for this.
882 */
883 if (status == NT_STATUS_PENDING && async_id != 0) {
884 rqp->sr2_rspasyncid = async_id;
885 m_freem(m);
886 break;
887 }
888
889 SMBRQ_LOCK(rqp);
890 if (rqp->sr_rp.md_top == NULL) {
891 md_initm(&rqp->sr_rp, m);
892 } else {
893 SMBRQ_UNLOCK(rqp);
894 rqp = NULL;
895 break;
896 }
897 if (encrypted)
898 rqp->sr_flags |= SMBR_ENCRYPTED;
899 smb_iod_rqprocessed_LH(rqp, 0, 0);
900 SMBRQ_UNLOCK(rqp);
901 break;
902 }
903 rw_exit(&vcp->iod_rqlock);
904
905 if (rqp == NULL) {
906 if (command != SMB2_ECHO) {
907 SMBSDEBUG("drop resp: MID %lld\n",
908 (long long)message_id);
909 }
910 m_freem(m);
911 /*
912 * Keep going. It's possible this reply came
913 * after the request timed out and went away.
914 */
915 }
916
917 /*
918 * If we split a compound reply, continue with the
919 * next part of the compound.
920 */
921 if (next_m != NULL) {
922 m = next_m;
923 goto top;
924 }
925
926 return (0);
927 }
928
929 /*
930 * The IOD receiver thread has requests pending and
931 * has not received anything in a while. Try to
932 * send an SMB echo request. It's tricky to do a
933 * send from the IOD thread because we can't block.
934 *
935 * Using tmo=SMBNOREPLYWAIT in the request
936 * so smb_rq_reply will skip smb_iod_waitrq.
937 * The smb_smb_echo call uses SMBR_INTERNAL
938 * to avoid calling smb_iod_sendall().
939 */
940 static int
smb_iod_send_echo(smb_vc_t * vcp,cred_t * cr)941 smb_iod_send_echo(smb_vc_t *vcp, cred_t *cr)
942 {
943 smb_cred_t scred;
944 int err, tmo = SMBNOREPLYWAIT;
945
946 ASSERT(vcp->iod_thr == curthread);
947
948 smb_credinit(&scred, cr);
949 if ((vcp->vc_flags & SMBV_SMB2) != 0) {
950 err = smb2_smb_echo(vcp, &scred, tmo);
951 } else {
952 err = smb_smb_echo(vcp, &scred, tmo);
953 }
954 smb_credrele(&scred);
955 return (err);
956 }
957
958 /*
959 * Helper for smb1_iod_addrq, smb2_iod_addrq
960 * Returns zero if interrupted, else 1.
961 */
962 static int
smb_iod_muxwait(smb_vc_t * vcp,boolean_t sig_ok)963 smb_iod_muxwait(smb_vc_t *vcp, boolean_t sig_ok)
964 {
965 int rc;
966
967 SMB_VC_LOCK(vcp);
968 vcp->iod_muxwant++;
969 if (sig_ok) {
970 rc = cv_wait_sig(&vcp->iod_muxwait, &vcp->vc_lock);
971 } else {
972 cv_wait(&vcp->iod_muxwait, &vcp->vc_lock);
973 rc = 1;
974 }
975 vcp->iod_muxwant--;
976 SMB_VC_UNLOCK(vcp);
977
978 return (rc);
979 }
980
981 /*
982 * Place request in the queue, and send it.
983 * Called with no locks held.
984 *
985 * Called for SMB1 only
986 *
987 * The logic for how we limit active requests differs between
988 * SMB1 and SMB2. With SMB1 it's a simple counter ioc_muxcnt.
989 */
990 int
smb1_iod_addrq(struct smb_rq * rqp)991 smb1_iod_addrq(struct smb_rq *rqp)
992 {
993 struct smb_vc *vcp = rqp->sr_vc;
994 uint16_t need;
995 boolean_t sig_ok =
996 (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
997
998 ASSERT(rqp->sr_cred);
999 ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
1000
1001 rqp->sr_owner = curthread;
1002
1003 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1004
1005 recheck:
1006 /*
1007 * Internal requests can be added in any state,
1008 * but normal requests only in state active.
1009 */
1010 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
1011 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
1012 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
1013 rw_exit(&vcp->iod_rqlock);
1014 return (ENOTCONN);
1015 }
1016
1017 /*
1018 * If we're at the limit of active requests, block until
1019 * enough requests complete so we can make ours active.
1020 * Wakeup in smb_iod_removerq().
1021 *
1022 * Normal callers leave one slot free, so internal
1023 * callers can have the last slot if needed.
1024 */
1025 need = 1;
1026 if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
1027 need++;
1028 if ((vcp->iod_muxcnt + need) > vcp->vc_maxmux) {
1029 rw_exit(&vcp->iod_rqlock);
1030 if (rqp->sr_flags & SMBR_INTERNAL)
1031 return (EBUSY);
1032 if (smb_iod_muxwait(vcp, sig_ok) == 0)
1033 return (EINTR);
1034 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1035 goto recheck;
1036 }
1037
1038 /*
1039 * Add this request to the active list and send it.
1040 * For SMB2 we may have a sequence of compounded
1041 * requests, in which case we must add them all.
1042 * They're sent as a compound in smb2_iod_sendrq.
1043 */
1044 rqp->sr_mid = vcp->vc_next_mid++;
1045 /* If signing, set the signing sequence numbers. */
1046 if (vcp->vc_mackey != NULL && (rqp->sr_rqflags2 &
1047 SMB_FLAGS2_SECURITY_SIGNATURE) != 0) {
1048 rqp->sr_seqno = vcp->vc_next_seq++;
1049 rqp->sr_rseqno = vcp->vc_next_seq++;
1050 }
1051 vcp->iod_muxcnt++;
1052 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
1053 smb1_iod_sendrq(rqp);
1054
1055 rw_exit(&vcp->iod_rqlock);
1056 return (0);
1057 }
1058
1059 /*
1060 * Place request in the queue, and send it.
1061 * Called with no locks held.
1062 *
1063 * Called for SMB2 only.
1064 *
1065 * With SMB2 we have a range of valid message IDs, and we may
1066 * only send requests when we can assign a message ID within
1067 * the valid range. We may need to wait here for some active
1068 * request to finish (and update vc2_limit_message_id) before
1069 * we can get message IDs for our new request(s). Another
1070 * difference is that the request sequence we're waiting to
1071 * add here may require multipe message IDs, either due to
1072 * either compounding or multi-credit requests. Therefore
1073 * we need to wait for the availibility of how ever many
1074 * message IDs are required by our request sequence.
1075 */
1076 int
smb2_iod_addrq(struct smb_rq * rqp)1077 smb2_iod_addrq(struct smb_rq *rqp)
1078 {
1079 struct smb_vc *vcp = rqp->sr_vc;
1080 struct smb_rq *c_rqp; /* compound req */
1081 uint16_t charge;
1082 boolean_t sig_ok =
1083 (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
1084
1085 ASSERT(rqp->sr_cred != NULL);
1086 ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
1087
1088 /*
1089 * Figure out the credit charges
1090 * No multi-credit messages yet.
1091 */
1092 rqp->sr2_totalcreditcharge = rqp->sr2_creditcharge;
1093 c_rqp = rqp->sr2_compound_next;
1094 while (c_rqp != NULL) {
1095 rqp->sr2_totalcreditcharge += c_rqp->sr2_creditcharge;
1096 c_rqp = c_rqp->sr2_compound_next;
1097 }
1098
1099 /*
1100 * Internal request must not be compounded
1101 * and should use exactly one credit.
1102 */
1103 if (rqp->sr_flags & SMBR_INTERNAL) {
1104 if (rqp->sr2_compound_next != NULL) {
1105 ASSERT(0);
1106 return (EINVAL);
1107 }
1108 }
1109
1110 rqp->sr_owner = curthread;
1111
1112 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1113
1114 recheck:
1115 /*
1116 * Internal requests can be added in any state,
1117 * but normal requests only in state active.
1118 */
1119 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
1120 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
1121 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
1122 rw_exit(&vcp->iod_rqlock);
1123 return (ENOTCONN);
1124 }
1125
1126 /*
1127 * If we're at the limit of active requests, block until
1128 * enough requests complete so we can make ours active.
1129 * Wakeup in smb_iod_removerq().
1130 *
1131 * Normal callers leave one slot free, so internal
1132 * callers can have the last slot if needed.
1133 */
1134 charge = rqp->sr2_totalcreditcharge;
1135 if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
1136 charge++;
1137 if ((vcp->vc2_next_message_id + charge) >
1138 vcp->vc2_limit_message_id) {
1139 rw_exit(&vcp->iod_rqlock);
1140 if (rqp->sr_flags & SMBR_INTERNAL)
1141 return (EBUSY);
1142 if (smb_iod_muxwait(vcp, sig_ok) == 0)
1143 return (EINTR);
1144 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1145 goto recheck;
1146 }
1147
1148 /*
1149 * Add this request to the active list and send it.
1150 * For SMB2 we may have a sequence of compounded
1151 * requests, in which case we must add them all.
1152 * They're sent as a compound in smb2_iod_sendrq.
1153 */
1154
1155 rqp->sr2_messageid = vcp->vc2_next_message_id;
1156 vcp->vc2_next_message_id += rqp->sr2_creditcharge;
1157 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
1158
1159 c_rqp = rqp->sr2_compound_next;
1160 while (c_rqp != NULL) {
1161 c_rqp->sr2_messageid = vcp->vc2_next_message_id;
1162 vcp->vc2_next_message_id += c_rqp->sr2_creditcharge;
1163 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, c_rqp, sr_link);
1164 c_rqp = c_rqp->sr2_compound_next;
1165 }
1166 smb2_iod_sendrq(rqp);
1167
1168 rw_exit(&vcp->iod_rqlock);
1169 return (0);
1170 }
1171
1172 /*
1173 * Mark an SMBR_MULTIPACKET request as
1174 * needing another send. Similar to the
1175 * "normal" part of smb1_iod_addrq.
1176 * Only used by SMB1
1177 */
1178 int
smb1_iod_multirq(struct smb_rq * rqp)1179 smb1_iod_multirq(struct smb_rq *rqp)
1180 {
1181 struct smb_vc *vcp = rqp->sr_vc;
1182
1183 ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
1184
1185 if (vcp->vc_flags & SMBV_SMB2) {
1186 ASSERT("!SMB2?");
1187 return (EINVAL);
1188 }
1189
1190 if (rqp->sr_flags & SMBR_INTERNAL)
1191 return (EINVAL);
1192
1193 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
1194 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
1195 return (ENOTCONN);
1196 }
1197
1198 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1199
1200 /* Already on iod_rqlist, just reset state. */
1201 rqp->sr_state = SMBRQ_NOTSENT;
1202 smb1_iod_sendrq(rqp);
1203
1204 rw_exit(&vcp->iod_rqlock);
1205
1206 return (0);
1207 }
1208
1209 /*
1210 * Remove a request from the active list, and
1211 * wake up requests waiting to go active.
1212 *
1213 * Shared by SMB1 + SMB2
1214 *
1215 * The logic for how we limit active requests differs between
1216 * SMB1 and SMB2. With SMB1 it's a simple counter ioc_muxcnt.
1217 * With SMB2 we have a range of valid message IDs, and when we
1218 * retire the oldest request we need to keep track of what is
1219 * now the oldest message ID. In both cases, after we take a
1220 * request out of the list here, we should be able to wake up
1221 * a request waiting to get in the active list.
1222 */
1223 void
smb_iod_removerq(struct smb_rq * rqp)1224 smb_iod_removerq(struct smb_rq *rqp)
1225 {
1226 struct smb_rq *rqp2;
1227 struct smb_vc *vcp = rqp->sr_vc;
1228 boolean_t was_head = B_FALSE;
1229
1230 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1231
1232 #ifdef QUEUEDEBUG
1233 /*
1234 * Make sure we have not already removed it.
1235 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
1236 * XXX: Don't like the constant 1 here...
1237 */
1238 ASSERT(rqp->sr_link.tqe_next != (void *)1L);
1239 #endif
1240
1241 if (TAILQ_FIRST(&vcp->iod_rqlist) == rqp)
1242 was_head = B_TRUE;
1243 TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
1244 if (vcp->vc_flags & SMBV_SMB2) {
1245 rqp2 = TAILQ_FIRST(&vcp->iod_rqlist);
1246 if (was_head && rqp2 != NULL) {
1247 /* Do we still need this? */
1248 vcp->vc2_oldest_message_id =
1249 rqp2->sr2_messageid;
1250 }
1251 } else {
1252 ASSERT(vcp->iod_muxcnt > 0);
1253 vcp->iod_muxcnt--;
1254 }
1255
1256 rw_exit(&vcp->iod_rqlock);
1257
1258 /*
1259 * If there are requests waiting for "mux" slots,
1260 * wake one.
1261 */
1262 SMB_VC_LOCK(vcp);
1263 if (vcp->iod_muxwant != 0)
1264 cv_signal(&vcp->iod_muxwait);
1265 SMB_VC_UNLOCK(vcp);
1266 }
1267
1268 /*
1269 * Wait for a request to complete.
1270 */
1271 int
smb_iod_waitrq(struct smb_rq * rqp)1272 smb_iod_waitrq(struct smb_rq *rqp)
1273 {
1274 struct smb_vc *vcp = rqp->sr_vc;
1275 clock_t tr, tmo1, tmo2;
1276 int error;
1277
1278 if (rqp->sr_flags & SMBR_INTERNAL) {
1279 /* XXX - Do we ever take this path now? */
1280 return (smb_iod_waitrq_int(rqp));
1281 }
1282
1283 /*
1284 * Make sure this is NOT the IOD thread,
1285 * or the wait below will stop the reader.
1286 */
1287 ASSERT(curthread != vcp->iod_thr);
1288
1289 SMBRQ_LOCK(rqp);
1290
1291 /*
1292 * The request has been sent. Now wait for the response,
1293 * with the timeout specified for this request.
1294 * Compute all the deadlines now, so we effectively
1295 * start the timer(s) after the request is sent.
1296 */
1297 if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
1298 tmo1 = SEC_TO_TICK(smb_timo_notice);
1299 else
1300 tmo1 = 0;
1301 tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
1302
1303 /*
1304 * As above, we don't want to allow interrupt for some
1305 * requests like open, because we could miss a succesful
1306 * response and therefore "leak" a FID. Such requests
1307 * are marked SMBR_NOINTR_RECV to prevent that.
1308 *
1309 * If "slow server" warnings are enabled, wait first
1310 * for the "notice" timeout, and warn if expired.
1311 */
1312 if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
1313 if (rqp->sr_flags & SMBR_NOINTR_RECV)
1314 tr = cv_reltimedwait(&rqp->sr_cond,
1315 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
1316 else
1317 tr = cv_reltimedwait_sig(&rqp->sr_cond,
1318 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
1319 if (tr == 0) {
1320 error = EINTR;
1321 goto out;
1322 }
1323 if (tr < 0) {
1324 DTRACE_PROBE1(smb_iod_waitrq1,
1325 (smb_rq_t *), rqp);
1326 }
1327 }
1328
1329 /*
1330 * Keep waiting until tmo2 is expired.
1331 */
1332 while (rqp->sr_rpgen == rqp->sr_rplast) {
1333 if (rqp->sr_flags & SMBR_NOINTR_RECV)
1334 tr = cv_timedwait(&rqp->sr_cond,
1335 &rqp->sr_lock, tmo2);
1336 else
1337 tr = cv_timedwait_sig(&rqp->sr_cond,
1338 &rqp->sr_lock, tmo2);
1339 if (tr == 0) {
1340 error = EINTR;
1341 goto out;
1342 }
1343 if (tr < 0) {
1344 DTRACE_PROBE1(smb_iod_waitrq2,
1345 (smb_rq_t *), rqp);
1346 error = ETIME;
1347 goto out;
1348 }
1349 /* got wakeup */
1350 }
1351 error = rqp->sr_lerror;
1352 rqp->sr_rplast++;
1353
1354 out:
1355 SMBRQ_UNLOCK(rqp);
1356
1357 /*
1358 * MULTIPACKET request must stay in the list.
1359 * They may need additional responses.
1360 */
1361 if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
1362 smb_iod_removerq(rqp);
1363
1364 return (error);
1365 }
1366
1367 /*
1368 * Internal variant of smb_iod_waitrq(), for use in
1369 * requests run by the IOD (reader) thread itself.
1370 * Block only long enough to receive one reply.
1371 */
1372 int
smb_iod_waitrq_int(struct smb_rq * rqp)1373 smb_iod_waitrq_int(struct smb_rq *rqp)
1374 {
1375 struct smb_vc *vcp = rqp->sr_vc;
1376 int timeleft = rqp->sr_timo;
1377 int error;
1378
1379 ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
1380 again:
1381 error = smb_iod_recvall(vcp, B_TRUE);
1382 if (error == ETIME) {
1383 /* We waited SMB_NBTIMO sec. */
1384 timeleft -= SMB_NBTIMO;
1385 if (timeleft > 0)
1386 goto again;
1387 }
1388
1389 smb_iod_removerq(rqp);
1390 if (rqp->sr_state != SMBRQ_NOTIFIED)
1391 error = ETIME;
1392
1393 return (error);
1394 }
1395
1396 /*
1397 * Shutdown all outstanding I/O requests on the specified share with
1398 * ENXIO; used when unmounting a share. (There shouldn't be any for a
1399 * non-forced unmount; if this is a forced unmount, we have to shutdown
1400 * the requests as part of the unmount process.)
1401 */
1402 void
smb_iod_shutdown_share(struct smb_share * ssp)1403 smb_iod_shutdown_share(struct smb_share *ssp)
1404 {
1405 struct smb_vc *vcp = SSTOVC(ssp);
1406 struct smb_rq *rqp;
1407
1408 /*
1409 * Loop through the list of requests and shutdown the ones
1410 * that are for the specified share.
1411 */
1412 rw_enter(&vcp->iod_rqlock, RW_READER);
1413 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1414 if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
1415 smb_iod_rqprocessed(rqp, EIO, 0);
1416 }
1417 rw_exit(&vcp->iod_rqlock);
1418 }
1419
1420 /*
1421 * Ioctl functions called by the user-level I/O Deamon (IOD)
1422 * to bring up and service a connection to some SMB server.
1423 */
1424
1425 /*
1426 * Handle ioctl SMBIOC_IOD_CONNECT
1427 */
1428 int
nsmb_iod_connect(struct smb_vc * vcp,cred_t * cr)1429 nsmb_iod_connect(struct smb_vc *vcp, cred_t *cr)
1430 {
1431 int err, val;
1432
1433 ASSERT(vcp->iod_thr == curthread);
1434
1435 if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
1436 cmn_err(CE_NOTE, "iod_connect: bad state %d", vcp->vc_state);
1437 return (EINVAL);
1438 }
1439
1440 /*
1441 * Putting a TLI endpoint back in the right state for a new
1442 * connection is a bit tricky. In theory, this could be:
1443 * SMB_TRAN_DISCONNECT(vcp);
1444 * SMB_TRAN_UNBIND(vcp);
1445 * but that method often results in TOUTSTATE errors.
1446 * It's easier to just close it and open a new endpoint.
1447 */
1448 SMB_VC_LOCK(vcp);
1449 if (vcp->vc_tdata)
1450 SMB_TRAN_DONE(vcp);
1451 err = SMB_TRAN_CREATE(vcp, cr);
1452 SMB_VC_UNLOCK(vcp);
1453 if (err != 0)
1454 return (err);
1455
1456 /*
1457 * Set various options on this endpoint.
1458 * Keep going in spite of errors.
1459 */
1460 val = smb_tcpsndbuf;
1461 err = SMB_TRAN_SETPARAM(vcp, SMBTP_SNDBUF, &val);
1462 if (err != 0) {
1463 cmn_err(CE_NOTE, "iod_connect: setopt SNDBUF, err=%d", err);
1464 }
1465 val = smb_tcprcvbuf;
1466 err = SMB_TRAN_SETPARAM(vcp, SMBTP_RCVBUF, &val);
1467 if (err != 0) {
1468 cmn_err(CE_NOTE, "iod_connect: setopt RCVBUF, err=%d", err);
1469 }
1470 val = 1;
1471 err = SMB_TRAN_SETPARAM(vcp, SMBTP_KEEPALIVE, &val);
1472 if (err != 0) {
1473 cmn_err(CE_NOTE, "iod_connect: setopt KEEPALIVE, err=%d", err);
1474 }
1475 val = 1;
1476 err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_NODELAY, &val);
1477 if (err != 0) {
1478 cmn_err(CE_NOTE, "iod_connect: setopt TCP_NODELAY err=%d", err);
1479 }
1480 val = smb_connect_timeout * 1000;
1481 err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_CON_TMO, &val);
1482 if (err != 0) {
1483 cmn_err(CE_NOTE, "iod_connect: setopt TCP con tmo err=%d", err);
1484 }
1485
1486 /*
1487 * Bind and connect
1488 */
1489 err = SMB_TRAN_BIND(vcp, NULL);
1490 if (err != 0) {
1491 cmn_err(CE_NOTE, "iod_connect: t_kbind: err=%d", err);
1492 /* Continue on and try connect. */
1493 }
1494 err = SMB_TRAN_CONNECT(vcp, &vcp->vc_srvaddr.sa);
1495 /*
1496 * No cmn_err here, as connect failures are normal, i.e.
1497 * when a server has multiple addresses and only some are
1498 * routed for us. (libsmbfs tries them all)
1499 */
1500 if (err == 0) {
1501 SMB_VC_LOCK(vcp);
1502 smb_iod_newstate(vcp, SMBIOD_ST_CONNECTED);
1503 SMB_VC_UNLOCK(vcp);
1504 } /* else stay in state reconnect */
1505
1506 return (err);
1507 }
1508
1509 /*
1510 * Handle ioctl SMBIOC_IOD_NEGOTIATE
1511 * Do the whole SMB1/SMB2 negotiate
1512 *
1513 * This is where we send our first request to the server.
1514 * If this is the first time we're talking to this server,
1515 * (meaning not a reconnect) then we don't know whether
1516 * the server supports SMB2, so we need to use the weird
1517 * SMB1-to-SMB2 negotiation. That's where we send an SMB1
1518 * negotiate including dialect "SMB 2.???" and if the
1519 * server supports SMB2 we get an SMB2 reply -- Yes, an
1520 * SMB2 reply to an SMB1 request. A strange protocol...
1521 *
1522 * If on the other hand we already know the server supports
1523 * SMB2 (because this is a reconnect) or if the client side
1524 * has disabled SMB1 entirely, we'll skip the SMB1 part.
1525 */
1526 int
nsmb_iod_negotiate(struct smb_vc * vcp,cred_t * cr)1527 nsmb_iod_negotiate(struct smb_vc *vcp, cred_t *cr)
1528 {
1529 struct smb_sopt *sv = &vcp->vc_sopt;
1530 smb_cred_t scred;
1531 int err = 0;
1532
1533 ASSERT(vcp->iod_thr == curthread);
1534
1535 smb_credinit(&scred, cr);
1536
1537 if (vcp->vc_state != SMBIOD_ST_CONNECTED) {
1538 cmn_err(CE_NOTE, "iod_negotiate: bad state %d", vcp->vc_state);
1539 err = EINVAL;
1540 goto out;
1541 }
1542
1543 if (vcp->vc_maxver == 0 || vcp->vc_minver > vcp->vc_maxver) {
1544 err = EINVAL;
1545 goto out;
1546 }
1547
1548 /*
1549 * (Re)init negotiated values
1550 */
1551 bzero(sv, sizeof (*sv));
1552 vcp->vc2_next_message_id = 0;
1553 vcp->vc2_limit_message_id = 1;
1554 vcp->vc2_session_id = 0;
1555 vcp->vc_next_seq = 0;
1556
1557 /*
1558 * If this was reconnect, get rid of the old MAC key
1559 * and session key.
1560 */
1561 SMB_VC_LOCK(vcp);
1562 if (vcp->vc_mackey != NULL) {
1563 kmem_free(vcp->vc_mackey, vcp->vc_mackeylen);
1564 vcp->vc_mackey = NULL;
1565 vcp->vc_mackeylen = 0;
1566 }
1567 if (vcp->vc_ssnkey != NULL) {
1568 kmem_free(vcp->vc_ssnkey, vcp->vc_ssnkeylen);
1569 vcp->vc_ssnkey = NULL;
1570 vcp->vc_ssnkeylen = 0;
1571 }
1572 SMB_VC_UNLOCK(vcp);
1573
1574 /*
1575 * If this is not an SMB2 reconect (SMBV_SMB2 not set),
1576 * and if SMB1 is enabled, do SMB1 neogotiate. Then
1577 * if either SMB1-to-SMB2 negotiate tells us we should
1578 * switch to SMB2, or the local configuration has
1579 * disabled SMB1, set the SMBV_SMB2 flag.
1580 *
1581 * Note that vc_maxver is handled in smb_smb_negotiate
1582 * so we never get sv_proto == SMB_DIALECT_SMB2_FF when
1583 * the local configuration disables SMB2, and therefore
1584 * we won't set the SMBV_SMB2 flag.
1585 */
1586 if ((vcp->vc_flags & SMBV_SMB2) == 0) {
1587 if (vcp->vc_minver < SMB2_DIALECT_BASE) {
1588 /*
1589 * SMB1 is enabled
1590 */
1591 err = smb_smb_negotiate(vcp, &scred);
1592 if (err != 0)
1593 goto out;
1594 }
1595 /*
1596 * If SMB1-to-SMB2 negotiate told us we should
1597 * switch to SMB2, or if the local configuration
1598 * disables SMB1, set the SMB2 flag.
1599 */
1600 if (sv->sv_proto == SMB_DIALECT_SMB2_FF ||
1601 vcp->vc_minver >= SMB2_DIALECT_BASE) {
1602 /*
1603 * Switch this VC to SMB2.
1604 */
1605 SMB_VC_LOCK(vcp);
1606 vcp->vc_flags |= SMBV_SMB2;
1607 SMB_VC_UNLOCK(vcp);
1608 }
1609 }
1610
1611 /*
1612 * If this is an SMB2 reconnect (SMBV_SMB2 was set before this
1613 * function was called), or SMB1-to-SMB2 negotiate indicated
1614 * we should switch to SMB2, or we have SMB1 disabled (both
1615 * cases set SMBV_SMB2 above), then do SMB2 negotiate.
1616 */
1617 if ((vcp->vc_flags & SMBV_SMB2) != 0) {
1618 err = smb2_smb_negotiate(vcp, &scred);
1619 }
1620
1621 out:
1622 if (err == 0) {
1623 SMB_VC_LOCK(vcp);
1624 smb_iod_newstate(vcp, SMBIOD_ST_NEGOTIATED);
1625 SMB_VC_UNLOCK(vcp);
1626 }
1627 /*
1628 * (else) leave state as it was.
1629 * User-level will either close this handle (if connecting
1630 * for the first time) or call rcfail and then try again.
1631 */
1632
1633 smb_credrele(&scred);
1634
1635 return (err);
1636 }
1637
1638 /*
1639 * Handle ioctl SMBIOC_IOD_SSNSETUP
1640 * Do either SMB1 or SMB2 session setup (one call/reply)
1641 */
1642 int
nsmb_iod_ssnsetup(struct smb_vc * vcp,cred_t * cr)1643 nsmb_iod_ssnsetup(struct smb_vc *vcp, cred_t *cr)
1644 {
1645 smb_cred_t scred;
1646 int err;
1647
1648 ASSERT(vcp->iod_thr == curthread);
1649
1650 switch (vcp->vc_state) {
1651 case SMBIOD_ST_NEGOTIATED:
1652 case SMBIOD_ST_AUTHCONT:
1653 break;
1654 default:
1655 return (EINVAL);
1656 }
1657
1658 smb_credinit(&scred, cr);
1659 if (vcp->vc_flags & SMBV_SMB2)
1660 err = smb2_smb_ssnsetup(vcp, &scred);
1661 else
1662 err = smb_smb_ssnsetup(vcp, &scred);
1663 smb_credrele(&scred);
1664
1665 SMB_VC_LOCK(vcp);
1666 switch (err) {
1667 case 0:
1668 smb_iod_newstate(vcp, SMBIOD_ST_AUTHOK);
1669 break;
1670 case EINPROGRESS: /* MORE_PROCESSING_REQUIRED */
1671 smb_iod_newstate(vcp, SMBIOD_ST_AUTHCONT);
1672 break;
1673 default:
1674 smb_iod_newstate(vcp, SMBIOD_ST_AUTHFAIL);
1675 break;
1676 }
1677 SMB_VC_UNLOCK(vcp);
1678
1679 return (err);
1680 }
1681
1682 static int
smb_iod_logoff(struct smb_vc * vcp,cred_t * cr)1683 smb_iod_logoff(struct smb_vc *vcp, cred_t *cr)
1684 {
1685 smb_cred_t scred;
1686 int err;
1687
1688 ASSERT(vcp->iod_thr == curthread);
1689
1690 smb_credinit(&scred, cr);
1691 if (vcp->vc_flags & SMBV_SMB2)
1692 err = smb2_smb_logoff(vcp, &scred);
1693 else
1694 err = smb_smb_logoff(vcp, &scred);
1695 smb_credrele(&scred);
1696
1697 return (err);
1698 }
1699
1700 /*
1701 * Handle ioctl SMBIOC_IOD_WORK
1702 *
1703 * The smbiod agent calls this after authentication to become
1704 * the reader for this session, so long as that's possible.
1705 * This should only return non-zero if we want that agent to
1706 * give up on this VC permanently.
1707 */
1708 /* ARGSUSED */
1709 int
smb_iod_vc_work(struct smb_vc * vcp,int flags,cred_t * cr)1710 smb_iod_vc_work(struct smb_vc *vcp, int flags, cred_t *cr)
1711 {
1712 smbioc_ssn_work_t *wk = &vcp->vc_work;
1713 int err = 0;
1714
1715 /*
1716 * This is called by the one-and-only
1717 * IOD thread for this VC.
1718 */
1719 ASSERT(vcp->iod_thr == curthread);
1720
1721 /*
1722 * Should be in state...
1723 */
1724 if (vcp->vc_state != SMBIOD_ST_AUTHOK) {
1725 cmn_err(CE_NOTE, "iod_vc_work: bad state %d", vcp->vc_state);
1726 return (EINVAL);
1727 }
1728
1729 /*
1730 * Update the session key and initialize SMB signing.
1731 *
1732 * This implementation does not use multiple SMB sessions per
1733 * TCP connection (where only the first session key is used)
1734 * so we always have a new session key here. Sanity check the
1735 * length from user space. Normally 16 or 32.
1736 */
1737 if (wk->wk_u_ssnkey_len > 1024) {
1738 cmn_err(CE_NOTE, "iod_vc_work: ssn key too long");
1739 return (EINVAL);
1740 }
1741
1742 ASSERT(vcp->vc_ssnkey == NULL);
1743 SMB_VC_LOCK(vcp);
1744 if (wk->wk_u_ssnkey_len != 0 &&
1745 wk->wk_u_ssnkey_buf.lp_ptr != NULL) {
1746 vcp->vc_ssnkeylen = wk->wk_u_ssnkey_len;
1747 vcp->vc_ssnkey = kmem_alloc(vcp->vc_ssnkeylen, KM_SLEEP);
1748 if (ddi_copyin(wk->wk_u_ssnkey_buf.lp_ptr,
1749 vcp->vc_ssnkey, vcp->vc_ssnkeylen, flags) != 0) {
1750 err = EFAULT;
1751 }
1752 }
1753 SMB_VC_UNLOCK(vcp);
1754 if (err)
1755 return (err);
1756
1757 /*
1758 * If we have a session key, derive the MAC key for SMB signing.
1759 * If this was a NULL session, we might have no session key.
1760 */
1761 ASSERT(vcp->vc_mackey == NULL);
1762 if (vcp->vc_ssnkey != NULL) {
1763 if (vcp->vc_flags & SMBV_SMB2)
1764 err = smb2_sign_init(vcp);
1765 else
1766 err = smb_sign_init(vcp);
1767 if (err != 0)
1768 return (err);
1769 if (SMB_DIALECT(vcp) >= SMB2_DIALECT_0300)
1770 nsmb_crypt_init_keys(vcp);
1771 }
1772
1773 /*
1774 * Tell any enqueued requests they can start.
1775 */
1776 SMB_VC_LOCK(vcp);
1777 vcp->vc_genid++; /* possibly new connection */
1778 smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
1779 cv_broadcast(&vcp->vc_statechg);
1780 SMB_VC_UNLOCK(vcp);
1781
1782 /*
1783 * The above cv_broadcast should be sufficient to
1784 * get requests going again.
1785 *
1786 * If we have a callback function, run it.
1787 * Was: smb_iod_notify_connected()
1788 */
1789 if (fscb && fscb->fscb_connect)
1790 smb_vc_walkshares(vcp, fscb->fscb_connect);
1791
1792 /*
1793 * Run the "reader" loop. An error return here is normal
1794 * (i.e. when we need to reconnect) so ignore errors.
1795 * Note: This call updates the vc_state.
1796 */
1797 (void) smb_iod_recvall(vcp, B_FALSE);
1798
1799 /*
1800 * The reader loop returned, so we must have a
1801 * new state. (disconnected or reconnecting)
1802 *
1803 * Notify shares of the disconnect.
1804 * Was: smb_iod_notify_disconnect()
1805 */
1806 smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1807
1808 /*
1809 * The reader loop function returns only when
1810 * there's been an error on the connection, or
1811 * this VC has no more references. It also
1812 * updates the state before it returns.
1813 *
1814 * Tell any requests to give up or restart.
1815 */
1816 smb_iod_invrq(vcp);
1817
1818 return (err);
1819 }
1820
1821 /*
1822 * Handle ioctl SMBIOC_IOD_IDLE
1823 *
1824 * Wait around for someone to ask to use this VC again after the
1825 * TCP session has closed. When one of the connected trees adds a
1826 * request, smb_iod_reconnect will set vc_state to RECONNECT and
1827 * wake this cv_wait. When a VC ref. goes away in smb_vc_rele,
1828 * that also signals this wait so we can re-check whether we
1829 * now hold the last ref. on this VC (and can destroy it).
1830 */
1831 int
smb_iod_vc_idle(struct smb_vc * vcp)1832 smb_iod_vc_idle(struct smb_vc *vcp)
1833 {
1834 int err = 0;
1835 boolean_t destroy = B_FALSE;
1836
1837 /*
1838 * This is called by the one-and-only
1839 * IOD thread for this VC.
1840 */
1841 ASSERT(vcp->iod_thr == curthread);
1842
1843 /*
1844 * Should be in state...
1845 */
1846 if (vcp->vc_state != SMBIOD_ST_IDLE &&
1847 vcp->vc_state != SMBIOD_ST_RECONNECT) {
1848 cmn_err(CE_NOTE, "iod_vc_idle: bad state %d", vcp->vc_state);
1849 return (EINVAL);
1850 }
1851
1852 SMB_VC_LOCK(vcp);
1853
1854 while (vcp->vc_state == SMBIOD_ST_IDLE &&
1855 vcp->vc_co.co_usecount > 1) {
1856 if (cv_wait_sig(&vcp->iod_idle, &vcp->vc_lock) == 0) {
1857 err = EINTR;
1858 break;
1859 }
1860 }
1861 if (vcp->vc_state == SMBIOD_ST_IDLE &&
1862 vcp->vc_co.co_usecount == 1) {
1863 /*
1864 * We were woken because we now have the last ref.
1865 * Arrange for this VC to be destroyed now.
1866 * Set the "GONE" flag while holding the lock,
1867 * to prevent a race with new references.
1868 * The destroy happens after unlock.
1869 */
1870 vcp->vc_flags |= SMBV_GONE;
1871 destroy = B_TRUE;
1872 }
1873
1874 SMB_VC_UNLOCK(vcp);
1875
1876 if (destroy) {
1877 /* This sets vc_state = DEAD */
1878 smb_iod_disconnect(vcp);
1879 }
1880
1881 return (err);
1882 }
1883
1884 /*
1885 * Handle ioctl SMBIOC_IOD_RCFAIL
1886 *
1887 * After a failed reconnect attempt, smbiod will
1888 * call this to make current requests error out.
1889 */
1890 int
smb_iod_vc_rcfail(struct smb_vc * vcp)1891 smb_iod_vc_rcfail(struct smb_vc *vcp)
1892 {
1893 clock_t tr;
1894 int err = 0;
1895
1896 /*
1897 * This is called by the one-and-only
1898 * IOD thread for this VC.
1899 */
1900 ASSERT(vcp->iod_thr == curthread);
1901 SMB_VC_LOCK(vcp);
1902
1903 smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1904 cv_broadcast(&vcp->vc_statechg);
1905
1906 /*
1907 * Short wait here for two reasons:
1908 * (1) Give requests a chance to error out.
1909 * (2) Prevent immediate retry.
1910 */
1911 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1912 SEC_TO_TICK(5), TR_CLOCK_TICK);
1913 if (tr == 0)
1914 err = EINTR;
1915
1916 /*
1917 * Normally we'll switch to state IDLE here. However,
1918 * if something called smb_iod_reconnect() while we were
1919 * waiting above, we'll be in in state reconnect already.
1920 * In that case, keep state RECONNECT, so we essentially
1921 * skip transition through state IDLE that would normally
1922 * happen next.
1923 */
1924 if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
1925 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1926 cv_broadcast(&vcp->vc_statechg);
1927 }
1928
1929 SMB_VC_UNLOCK(vcp);
1930
1931 return (err);
1932 }
1933
1934 /*
1935 * Ask the IOD to reconnect (if not already underway)
1936 * then wait for the reconnect to finish.
1937 */
1938 int
smb_iod_reconnect(struct smb_vc * vcp)1939 smb_iod_reconnect(struct smb_vc *vcp)
1940 {
1941 int err = 0, rv;
1942
1943 SMB_VC_LOCK(vcp);
1944 again:
1945 switch (vcp->vc_state) {
1946
1947 case SMBIOD_ST_IDLE:
1948 /* Tell the IOD thread it's no longer IDLE. */
1949 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1950 cv_signal(&vcp->iod_idle);
1951 /* FALLTHROUGH */
1952
1953 case SMBIOD_ST_RECONNECT:
1954 case SMBIOD_ST_CONNECTED:
1955 case SMBIOD_ST_NEGOTIATED:
1956 case SMBIOD_ST_AUTHCONT:
1957 case SMBIOD_ST_AUTHOK:
1958 /* Wait for the VC state to become ACTIVE. */
1959 rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1960 if (rv == 0) {
1961 err = EINTR;
1962 break;
1963 }
1964 goto again;
1965
1966 case SMBIOD_ST_VCACTIVE:
1967 err = 0; /* success! */
1968 break;
1969
1970 case SMBIOD_ST_AUTHFAIL:
1971 case SMBIOD_ST_RCFAILED:
1972 case SMBIOD_ST_DEAD:
1973 default:
1974 err = ENOTCONN;
1975 break;
1976 }
1977
1978 SMB_VC_UNLOCK(vcp);
1979 return (err);
1980 }
1981