xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c (revision bdb9230ac765cb7af3fc1f4119caf2c5720dceb3)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 #ifdef DEBUG
41 /* See sys/queue.h */
42 #define	QUEUEDEBUG 1
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/atomic.h>
48 #include <sys/proc.h>
49 #include <sys/thread.h>
50 #include <sys/kmem.h>
51 #include <sys/unistd.h>
52 #include <sys/mount.h>
53 #include <sys/vnode.h>
54 #include <sys/types.h>
55 #include <sys/ddi.h>
56 #include <sys/sunddi.h>
57 #include <sys/stream.h>
58 #include <sys/strsun.h>
59 #include <sys/time.h>
60 #include <sys/class.h>
61 #include <sys/disp.h>
62 #include <sys/cmn_err.h>
63 #include <sys/zone.h>
64 #include <sys/sdt.h>
65 
66 #ifdef APPLE
67 #include <sys/smb_apple.h>
68 #else
69 #include <netsmb/smb_osdep.h>
70 #endif
71 
72 #include <netsmb/smb.h>
73 #include <netsmb/smb_conn.h>
74 #include <netsmb/smb_rq.h>
75 #include <netsmb/smb_subr.h>
76 #include <netsmb/smb_tran.h>
77 #include <netsmb/smb_trantcp.h>
78 
79 #ifdef NEED_SMBFS_CALLBACKS
80 /*
81  * This is set/cleared when smbfs loads/unloads
82  * No locks should be necessary, because smbfs
83  * can't unload until all the mounts are gone.
84  */
85 static smb_fscb_t *fscb;
86 int
87 smb_fscb_set(smb_fscb_t *cb)
88 {
89 	fscb = cb;
90 	return (0);
91 }
92 #endif /* NEED_SMBFS_CALLBACKS */
93 
94 static void smb_iod_sendall(struct smb_vc *);
95 static void smb_iod_recvall(struct smb_vc *);
96 static void smb_iod_main(struct smb_vc *);
97 
98 
99 #define	SMBIOD_SLEEP_TIMO	2
100 #define	SMBIOD_PING_TIMO	60	/* seconds */
101 
102 /*
103  * After this many seconds we want an unresponded-to request to trigger
104  * some sort of UE (dialogue).  If the connection hasn't responded at all
105  * in this many seconds then the dialogue is of the "connection isn't
106  * responding would you like to force unmount" variety.  If the connection
107  * has been responding (to other requests that is) then we need a dialogue
108  * of the "operation is still pending do you want to cancel it" variety.
109  * At present this latter dialogue does not exist so we have no UE and
110  * just keep waiting for the slow operation.
111  */
112 #define	SMBUETIMEOUT 8 /* seconds */
113 
114 
115 /* Lock Held version of the next function. */
116 static inline void
117 smb_iod_rqprocessed_LH(
118 	struct smb_rq *rqp,
119 	int error,
120 	int flags)
121 {
122 	rqp->sr_flags |= flags;
123 	rqp->sr_lerror = error;
124 	rqp->sr_rpgen++;
125 	rqp->sr_state = SMBRQ_NOTIFIED;
126 	cv_broadcast(&rqp->sr_cond);
127 }
128 
129 static void
130 smb_iod_rqprocessed(
131 	struct smb_rq *rqp,
132 	int error,
133 	int flags)
134 {
135 
136 	SMBRQ_LOCK(rqp);
137 	smb_iod_rqprocessed_LH(rqp, error, flags);
138 	SMBRQ_UNLOCK(rqp);
139 }
140 
141 static void
142 smb_iod_invrq(struct smb_vc *vcp)
143 {
144 	struct smb_rq *rqp;
145 
146 	/*
147 	 * Invalidate all outstanding requests for this connection
148 	 */
149 	rw_enter(&vcp->iod_rqlock, RW_READER);
150 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
151 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
152 	}
153 	rw_exit(&vcp->iod_rqlock);
154 }
155 
156 #ifdef SMBTP_UPCALL
157 static void
158 smb_iod_sockwakeup(struct smb_vc *vcp)
159 {
160 	/* note: called from socket upcall... */
161 }
162 #endif
163 
164 /*
165  * Called after we fail to send or recv.
166  * Called with no locks held.
167  */
168 static void
169 smb_iod_dead(struct smb_vc *vcp)
170 {
171 
172 	SMB_VC_LOCK(vcp);
173 	vcp->vc_state = SMBIOD_ST_DEAD;
174 	cv_broadcast(&vcp->vc_statechg);
175 
176 #ifdef NEED_SMBFS_CALLBACKS
177 	if (fscb != NULL) {
178 		struct smb_connobj *co;
179 		/*
180 		 * Walk the share list, notify...
181 		 * Was: smbfs_dead(...share->ss_mount);
182 		 * XXX: Ok to hold vc_lock here?
183 		 * XXX: More to do here?
184 		 */
185 		SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
186 			/* smbfs_dead() */
187 			fscb->fscb_dead(CPTOSS(co));
188 		}
189 	}
190 #endif /* NEED_SMBFS_CALLBACKS */
191 
192 	SMB_VC_UNLOCK(vcp);
193 
194 	smb_iod_invrq(vcp);
195 }
196 
197 int
198 smb_iod_connect(struct smb_vc *vcp)
199 {
200 	struct proc *p = curproc;
201 	int error;
202 
203 	if (vcp->vc_state != SMBIOD_ST_RECONNECT)
204 		return (EINVAL);
205 
206 	if (vcp->vc_laddr) {
207 		error = SMB_TRAN_BIND(vcp, vcp->vc_laddr, p);
208 		if (error)
209 			goto errout;
210 	}
211 
212 #ifdef SMBTP_SELECTID
213 	SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, vcp);
214 #endif
215 #ifdef SMBTP_UPCALL
216 	SMB_TRAN_SETPARAM(vcp, SMBTP_UPCALL, (void *)smb_iod_sockwakeup);
217 #endif
218 
219 	error = SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, p);
220 	if (error) {
221 		SMBIODEBUG("connection to %s error %d\n",
222 		    vcp->vc_srvname, error);
223 		goto errout;
224 	}
225 
226 	/* Success! */
227 	return (0);
228 
229 errout:
230 
231 	return (error);
232 }
233 
234 /*
235  * Called by smb_vc_rele, smb_vc_kill
236  * Make the connection go away, and
237  * the IOD (reader) thread too!
238  */
239 int
240 smb_iod_disconnect(struct smb_vc *vcp)
241 {
242 
243 	/*
244 	 * Let's be safe here and avoid doing any
245 	 * call across the network while trying to
246 	 * shut things down.  If we just disconnect,
247 	 * the server will take care of the logoff.
248 	 */
249 #if 0
250 	if (vcp->vc_state == SMBIOD_ST_VCACTIVE) {
251 		smb_smb_ssnclose(vcp, &vcp->vc_scred);
252 		vcp->vc_state = SMBIOD_ST_TRANACTIVE;
253 	}
254 	vcp->vc_smbuid = SMB_UID_UNKNOWN;
255 #endif
256 
257 	/*
258 	 * Used to call smb_iod_closetran here,
259 	 * which did both disconnect and close.
260 	 * We now do the close in smb_vc_free,
261 	 * so we always have a valid vc_tdata.
262 	 * Now just send the disconnect here.
263 	 * Extra disconnect calls are ignored.
264 	 */
265 	SMB_TRAN_DISCONNECT(vcp, curproc);
266 
267 	/*
268 	 * If we have an IOD, let it handle the
269 	 * state change when it receives the ACK
270 	 * from the disconnect we just sent.
271 	 * Otherwise set the state here, i.e.
272 	 * after failing session setup.
273 	 */
274 	SMB_VC_LOCK(vcp);
275 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
276 		vcp->vc_state = SMBIOD_ST_DEAD;
277 		cv_broadcast(&vcp->vc_statechg);
278 	}
279 	SMB_VC_UNLOCK(vcp);
280 
281 	return (0);
282 }
283 
284 /*
285  * Send one request.
286  *
287  * Called by _addrq (for internal requests)
288  * and _sendall (via _addrq, _multirq, _waitrq)
289  */
290 static int
291 smb_iod_sendrq(struct smb_rq *rqp)
292 {
293 	struct proc *p = curproc;
294 	struct smb_vc *vcp = rqp->sr_vc;
295 	struct smb_share *ssp = rqp->sr_share;
296 	mblk_t *m;
297 	int error;
298 
299 	ASSERT(vcp);
300 	ASSERT(SEMA_HELD(&vcp->vc_sendlock));
301 	ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
302 
303 	/*
304 	 * Note: requests with sr_flags & SMBR_INTERNAL
305 	 * need to pass here with these states:
306 	 *   SMBIOD_ST_TRANACTIVE: smb_negotiate
307 	 *   SMBIOD_ST_NEGOACTIVE: smb_ssnsetup
308 	 */
309 	SMBIODEBUG("vc_state = %d\n", vcp->vc_state);
310 	switch (vcp->vc_state) {
311 	case SMBIOD_ST_NOTCONN:
312 		smb_iod_rqprocessed(rqp, ENOTCONN, 0);
313 		return (0);
314 	case SMBIOD_ST_DEAD:
315 		/* This is what keeps the iod itself from sending more */
316 		smb_iod_rqprocessed(rqp, ENOTCONN, 0);
317 		return (0);
318 	case SMBIOD_ST_RECONNECT:
319 		return (0);
320 	default:
321 		break;
322 	}
323 
324 	if (rqp->sr_sendcnt == 0) {
325 		*rqp->sr_rquid = htoles(vcp->vc_smbuid);
326 
327 		/*
328 		 * XXX: Odd place for all this...
329 		 * Would expect these values in vc_smbuid
330 		 * and/or the request before we get here.
331 		 * I think most of this mess is due to having
332 		 * the initial UID set to SMB_UID_UKNOWN when
333 		 * it should have been initialized to zero!
334 		 * REVIST this later. XXX -gwr
335 		 *
336 		 * This is checking for the case where
337 		 * "vc_smbuid" was set to 0 in "smb_smb_ssnsetup()";
338 		 * that happens for requests that occur
339 		 * after that's done but before we get back the final
340 		 * session setup reply, where the latter is what
341 		 * gives us the UID.  (There can be an arbitrary # of
342 		 * session setup packet exchanges to complete
343 		 * "extended security" authentication.)
344 		 *
345 		 * However, if the server gave us a UID of 0 in a
346 		 * Session Setup andX reply, and we then do a
347 		 * Tree Connect andX and get back a TID, we should
348 		 * use that TID, not 0, in subsequent references to
349 		 * that tree (e.g., in NetShareEnum RAP requests).
350 		 *
351 		 * So, for now, we forcibly zero out the TID only if we're
352 		 * doing extended security, as that's the only time
353 		 * that "vc_smbuid" should be explicitly zeroed.
354 		 *
355 		 * note we must and do use SMB_TID_UNKNOWN for SMB_COM_ECHO
356 		 */
357 		if (!vcp->vc_smbuid &&
358 		    (vcp->vc_hflags2 & SMB_FLAGS2_EXT_SEC))
359 			*rqp->sr_rqtid = htoles(0);
360 		else
361 			*rqp->sr_rqtid =
362 			    htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
363 		mb_fixhdr(&rqp->sr_rq);
364 
365 		/*
366 		 * Sign the message now that we're finally done
367 		 * filling in the SMB header fields, etc.
368 		 */
369 		if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
370 			smb_rq_sign(rqp);
371 		}
372 	}
373 	if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
374 		smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
375 		/*
376 		 * If all attempts to send a request failed, then
377 		 * something is seriously hosed.
378 		 */
379 		return (ENOTCONN);
380 	}
381 
382 	/*
383 	 * Replaced m_copym() with Solaris copymsg() which does the same
384 	 * work when we want to do a M_COPYALL.
385 	 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
386 	 */
387 	m = copymsg(rqp->sr_rq.mb_top);
388 
389 #ifdef DTRACE_PROBE
390 	DTRACE_PROBE2(smb_iod_sendrq,
391 	    (smb_rq_t *), rqp, (mblk_t *), m);
392 #else
393 	SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
394 #endif
395 	m_dumpm(m);
396 
397 	error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, p) : ENOBUFS;
398 	m = 0; /* consumed by SEND */
399 	if (error == 0) {
400 		SMBRQ_LOCK(rqp);
401 		rqp->sr_flags |= SMBR_SENT;
402 		rqp->sr_state = SMBRQ_SENT;
403 		if (rqp->sr_flags & SMBR_SENDWAIT)
404 			cv_broadcast(&rqp->sr_cond);
405 		SMBRQ_UNLOCK(rqp);
406 		return (0);
407 	}
408 	/*
409 	 * Check for fatal errors
410 	 */
411 	if (SMB_TRAN_FATAL(vcp, error)) {
412 		/*
413 		 * No further attempts should be made
414 		 */
415 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
416 		return (ENOTCONN);
417 	}
418 	if (error)
419 		SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
420 
421 #ifdef APPLE
422 	/* If proc waiting on rqp was signaled... */
423 	if (smb_rq_intr(rqp))
424 		smb_iod_rqprocessed(rqp, EINTR, 0);
425 #endif
426 
427 	return (0);
428 }
429 
430 static int
431 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
432 {
433 	struct proc *p = curproc;
434 	mblk_t *m;
435 	uchar_t *hp;
436 	int error;
437 
438 top:
439 	m = NULL;
440 	error = SMB_TRAN_RECV(vcp, &m, p);
441 	if (error == EAGAIN)
442 		goto top;
443 	if (error)
444 		return (error);
445 	ASSERT(m);
446 
447 	m = m_pullup(m, SMB_HDRLEN);
448 	if (m == NULL) {
449 		return (ENOSR);
450 	}
451 
452 	/*
453 	 * Check the SMB header
454 	 */
455 	hp = mtod(m, uchar_t *);
456 	if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
457 		m_freem(m);
458 		return (EPROTO);
459 	}
460 
461 	*mpp = m;
462 	return (0);
463 }
464 
465 /*
466  * Process incoming packets
467  *
468  * This is the "reader" loop, run by the IOD thread
469  * while in state SMBIOD_ST_VCACTIVE.  The loop now
470  * simply blocks in the socket recv until either a
471  * message arrives, or a disconnect.
472  */
473 static void
474 smb_iod_recvall(struct smb_vc *vcp)
475 {
476 	struct smb_rq *rqp;
477 	mblk_t *m;
478 	uchar_t *hp;
479 	ushort_t mid;
480 	int error;
481 	int etime_count = 0; /* for "server not responding", etc. */
482 
483 	for (;;) {
484 
485 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
486 			SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
487 			error = EIO;
488 			break;
489 		}
490 
491 		if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
492 			SMBIODEBUG("SHUTDOWN set\n");
493 			error = EIO;
494 			break;
495 		}
496 
497 		m = NULL;
498 		error = smb_iod_recv1(vcp, &m);
499 
500 		if ((error == ETIME) && vcp->iod_rqwaiting) {
501 			/*
502 			 * Nothing received for 15 seconds,
503 			 * and we have requests waiting.
504 			 */
505 			etime_count++;
506 
507 			/*
508 			 * Once, at 15 sec. notify callbacks
509 			 * and print the warning message.
510 			 */
511 			if (etime_count == 1) {
512 				smb_iod_notify_down(vcp);
513 				zprintf(vcp->vc_zoneid,
514 				    "SMB server %s not responding\n",
515 				    vcp->vc_srvname);
516 			}
517 
518 			/*
519 			 * At 30 sec. try sending an echo, and then
520 			 * once a minute thereafter. It's tricky to
521 			 * do a send from the IOD thread because
522 			 * we don't want to block here.
523 			 *
524 			 * Using tmo=SMBNOREPLYWAIT in the request
525 			 * so smb_rq_reply will skip smb_iod_waitrq.
526 			 * The smb_smb_echo call uses SMBR_INTERNAL
527 			 * to avoid calling smb_iod_sendall().
528 			 */
529 			if ((etime_count & 3) == 2) {
530 				smb_smb_echo(vcp, &vcp->vc_scred,
531 				    SMBNOREPLYWAIT);
532 			}
533 
534 			continue;
535 		} /* ETIME && iod_rqwaiting */
536 
537 		if (error == ETIME) {
538 			/*
539 			 * If the IOD thread holds the last reference
540 			 * to this VC, disconnect, release, terminate.
541 			 * Usually can avoid the lock/unlock here.
542 			 * Note, in-line: _vc_kill ... _vc_gone
543 			 */
544 			if (vcp->vc_co.co_usecount > 1)
545 				continue;
546 			SMB_VC_LOCK(vcp);
547 			if (vcp->vc_co.co_usecount == 1 &&
548 			    (vcp->vc_flags & SMBV_GONE) == 0) {
549 				vcp->vc_flags |= SMBV_GONE;
550 				SMB_VC_UNLOCK(vcp);
551 				smb_iod_disconnect(vcp);
552 				break;
553 			}
554 			SMB_VC_UNLOCK(vcp);
555 			continue;
556 		} /* error == ETIME */
557 
558 		if (error) {
559 			/*
560 			 * It's dangerous to continue here.
561 			 * (possible infinite loop!)
562 			 */
563 			break;
564 		}
565 
566 		/*
567 		 * Received something.  Yea!
568 		 */
569 		if (etime_count) {
570 			etime_count = 0;
571 
572 			zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
573 			    vcp->vc_srvname);
574 
575 			smb_iod_notify_up(vcp);
576 		}
577 
578 		/*
579 		 * Have an SMB packet.  The SMB header was
580 		 * checked in smb_iod_recv1().
581 		 * Find the request...
582 		 */
583 		hp = mtod(m, uchar_t *);
584 		/*LINTED*/
585 		mid = SMB_HDRMID(hp);
586 		SMBIODEBUG("mid %04x\n", (uint_t)mid);
587 
588 		rw_enter(&vcp->iod_rqlock, RW_READER);
589 		TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
590 
591 			if (rqp->sr_mid != mid)
592 				continue;
593 
594 			DTRACE_PROBE2(smb_iod_recvrq,
595 			    (smb_rq_t *), rqp, (mblk_t *), m);
596 			m_dumpm(m);
597 
598 			SMBRQ_LOCK(rqp);
599 			if (rqp->sr_rp.md_top == NULL) {
600 				md_initm(&rqp->sr_rp, m);
601 			} else {
602 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
603 					md_append_record(&rqp->sr_rp, m);
604 				} else {
605 					SMBRQ_UNLOCK(rqp);
606 					SMBSDEBUG("duplicate response %d "
607 					    "(ignored)\n", mid);
608 					break;
609 				}
610 			}
611 			smb_iod_rqprocessed_LH(rqp, 0, 0);
612 			SMBRQ_UNLOCK(rqp);
613 			break;
614 		}
615 
616 		if (rqp == NULL) {
617 			int cmd = SMB_HDRCMD(hp);
618 
619 			if (cmd != SMB_COM_ECHO)
620 				SMBSDEBUG("drop resp: mid %d, cmd %d\n",
621 				    (uint_t)mid, cmd);
622 /*			smb_printrqlist(vcp); */
623 			m_freem(m);
624 		}
625 		rw_exit(&vcp->iod_rqlock);
626 
627 	}
628 #ifdef APPLE
629 	/*
630 	 * check for interrupts
631 	 * On Solaris, handle in smb_iod_waitrq
632 	 */
633 	rw_enter(&vcp->iod_rqlock, RW_READER);
634 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
635 		if (smb_sigintr(rqp->sr_cred->scr_vfsctx))
636 			smb_iod_rqprocessed(rqp, EINTR, 0);
637 	}
638 	rw_exit(&vcp->iod_rqlock);
639 #endif
640 }
641 
642 /*
643  * Looks like we don't need these callbacks,
644  * but keep the code for now (for Apple).
645  */
646 /*ARGSUSED*/
647 void
648 smb_iod_notify_down(struct smb_vc *vcp)
649 {
650 #ifdef NEED_SMBFS_CALLBACKS
651 	struct smb_connobj *co;
652 
653 	if (fscb == NULL)
654 		return;
655 
656 	/*
657 	 * Walk the share list, notify...
658 	 * Was: smbfs_down(...share->ss_mount);
659 	 * XXX: Ok to hold vc_lock here?
660 	 */
661 	SMB_VC_LOCK(vcp);
662 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
663 		/* smbfs_down() */
664 		fscb->fscb_down(CPTOSS(co));
665 	}
666 	SMB_VC_UNLOCK(vcp);
667 #endif /* NEED_SMBFS_CALLBACKS */
668 }
669 
670 /*ARGSUSED*/
671 void
672 smb_iod_notify_up(struct smb_vc *vcp)
673 {
674 #ifdef NEED_SMBFS_CALLBACKS
675 	struct smb_connobj *co;
676 
677 	if (fscb == NULL)
678 		return;
679 
680 	/*
681 	 * Walk the share list, notify...
682 	 * Was: smbfs_up(...share->ss_mount);
683 	 * XXX: Ok to hold vc_lock here?
684 	 */
685 	SMB_VC_LOCK(vcp);
686 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
687 		/* smbfs_up() */
688 		fscb->fscb_up(CPTOSS(co));
689 	}
690 	SMB_VC_UNLOCK(vcp);
691 #endif /* NEED_SMBFS_CALLBACKS */
692 }
693 
694 /*
695  * The IOD thread is now just a "reader",
696  * so no more smb_iod_request().  Yea!
697  */
698 
699 /*
700  * Place request in the queue, and send it now if possible.
701  * Called with no locks held.
702  */
703 int
704 smb_iod_addrq(struct smb_rq *rqp)
705 {
706 	struct smb_vc *vcp = rqp->sr_vc;
707 	int error, save_newrq;
708 
709 	SMBIODEBUG("entry, mid=%d\n", rqp->sr_mid);
710 
711 	ASSERT(rqp->sr_cred);
712 
713 	/* This helps a little with debugging. */
714 	rqp->sr_owner = curthread;
715 
716 	if (rqp->sr_flags & SMBR_INTERNAL) {
717 		/*
718 		 * This is some kind of internal request,
719 		 * i.e. negotiate, session setup, echo...
720 		 * Allow vc_state < SMBIOD_ST_VCACTIVE, and
721 		 * always send directly from this thread.
722 		 * May be called by the IOD thread (echo).
723 		 * Note lock order: iod_rqlist, vc_sendlock
724 		 */
725 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
726 		if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
727 			/*
728 			 * We're signing requests and verifying
729 			 * signatures on responses.  Set the
730 			 * sequence numbers of the request and
731 			 * response here, used in smb_rq_verify.
732 			 */
733 			rqp->sr_seqno = vcp->vc_seqno++;
734 			rqp->sr_rseqno = vcp->vc_seqno++;
735 		}
736 		TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
737 		rw_downgrade(&vcp->iod_rqlock);
738 
739 		/*
740 		 * Note: iod_sendrq expects vc_sendlock,
741 		 * so take that here, but carefully:
742 		 * Never block the IOD thread here.
743 		 */
744 		if (curthread == vcp->iod_thr) {
745 			if (sema_tryp(&vcp->vc_sendlock) == 0) {
746 				SMBIODEBUG("sendlock busy\n");
747 				error = EAGAIN;
748 			} else {
749 				/* Have vc_sendlock */
750 				error = smb_iod_sendrq(rqp);
751 				sema_v(&vcp->vc_sendlock);
752 			}
753 		} else {
754 			sema_p(&vcp->vc_sendlock);
755 			error = smb_iod_sendrq(rqp);
756 			sema_v(&vcp->vc_sendlock);
757 		}
758 
759 		rw_exit(&vcp->iod_rqlock);
760 		if (error)
761 			smb_iod_removerq(rqp);
762 
763 		return (error);
764 	}
765 
766 	/*
767 	 * Normal request from the driver or smbfs.
768 	 * State should be correct after the check in
769 	 * smb_rq_enqueue(), but we dropped locks...
770 	 */
771 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
772 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
773 		return (ENOTCONN);
774 	}
775 
776 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
777 
778 	if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
779 		/*
780 		 * We're signing requests and verifying
781 		 * signatures on responses.  Set the
782 		 * sequence numbers of the request and
783 		 * response here, used in smb_rq_verify.
784 		 */
785 		rqp->sr_seqno = vcp->vc_seqno++;
786 		rqp->sr_rseqno = vcp->vc_seqno++;
787 	}
788 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
789 
790 	/* iod_rqlock/WRITER protects iod_newrq */
791 	save_newrq = vcp->iod_newrq;
792 	vcp->iod_newrq++;
793 
794 	rw_exit(&vcp->iod_rqlock);
795 
796 	/*
797 	 * Now send any requests that need to be sent,
798 	 * including the one we just put on the list.
799 	 * Only the thread that found iod_newrq==0
800 	 * needs to run the send loop.
801 	 */
802 	if (save_newrq == 0)
803 		smb_iod_sendall(vcp);
804 
805 	return (0);
806 }
807 
808 /*
809  * Mark an SMBR_MULTIPACKET request as
810  * needing another send.  Similar to the
811  * "normal" part of smb_iod_addrq.
812  */
813 int
814 smb_iod_multirq(struct smb_rq *rqp)
815 {
816 	struct smb_vc *vcp = rqp->sr_vc;
817 	int save_newrq;
818 
819 	ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
820 
821 	if (rqp->sr_flags & SMBR_INTERNAL)
822 		return (EINVAL);
823 
824 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
825 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
826 		return (ENOTCONN);
827 	}
828 
829 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
830 
831 	/* Already on iod_rqlist, just reset state. */
832 	rqp->sr_state = SMBRQ_NOTSENT;
833 
834 	/* iod_rqlock/WRITER protects iod_newrq */
835 	save_newrq = vcp->iod_newrq;
836 	vcp->iod_newrq++;
837 
838 	rw_exit(&vcp->iod_rqlock);
839 
840 	/*
841 	 * Now send any requests that need to be sent,
842 	 * including the one we just marked NOTSENT.
843 	 * Only the thread that found iod_newrq==0
844 	 * needs to run the send loop.
845 	 */
846 	if (save_newrq == 0)
847 		smb_iod_sendall(vcp);
848 
849 	return (0);
850 }
851 
852 
853 int
854 smb_iod_removerq(struct smb_rq *rqp)
855 {
856 	struct smb_vc *vcp = rqp->sr_vc;
857 
858 	SMBIODEBUG("entry, mid=%d\n", rqp->sr_mid);
859 
860 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
861 #ifdef QUEUEDEBUG
862 	/*
863 	 * Make sure we have not already removed it.
864 	 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
865 	 * XXX: Don't like the constant 1 here...
866 	 */
867 	ASSERT(rqp->sr_link.tqe_next != (void *)1L);
868 #endif
869 	TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
870 	rw_exit(&vcp->iod_rqlock);
871 
872 	return (0);
873 }
874 
875 
876 /*
877  * Internal version of smb_iod_waitrq.
878  *
879  * This is used when there is no reader thread,
880  * so we have to do the recv here.  The request
881  * must have the SMBR_INTERNAL flag set.
882  */
883 static int
884 smb_iod_waitrq_internal(struct smb_rq *rqp)
885 {
886 	struct smb_vc *vcp = rqp->sr_vc;
887 	mblk_t *m;
888 	uchar_t *hp;
889 	int error;
890 	uint16_t mid;
891 	uint8_t cmd;
892 
893 	/* Make sure it's an internal request. */
894 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0) {
895 		SMBIODEBUG("not internal\n");
896 		return (EINVAL);
897 	}
898 
899 	/* Only simple requests allowed. */
900 	if (rqp->sr_flags & SMBR_MULTIPACKET) {
901 		SMBIODEBUG("multipacket\n");
902 		return (EINVAL);
903 	}
904 
905 	/* Should not already have a response. */
906 	if (rqp->sr_rp.md_top) {
907 		DEBUG_ENTER("smb_iod_waitrq again?\n");
908 		return (0);
909 	}
910 
911 	/*
912 	 * The message recv loop.  Terminates when we
913 	 * receive the message we're looking for.
914 	 * Drop others, with complaints.
915 	 * Scaled-down version of smb_iod_recvall
916 	 */
917 	for (;;) {
918 		m = NULL;
919 		error = smb_iod_recv1(vcp, &m);
920 		if (error) {
921 			/*
922 			 * It's dangerous to continue here.
923 			 * (possible infinite loop!)
924 			 */
925 #if 0
926 			if (SMB_TRAN_FATAL(vcp, error)) {
927 				return (error);
928 			}
929 			continue;
930 #endif
931 			return (error);
932 		}
933 
934 		hp = mtod(m, uchar_t *);
935 		cmd = SMB_HDRCMD(hp);
936 		/*LINTED*/
937 		mid = SMB_HDRMID(hp);
938 
939 		SMBIODEBUG("cmd 0x%02x mid %04x\n",
940 		    (uint_t)cmd, (uint_t)mid);
941 		m_dumpm(m);
942 
943 		/*
944 		 * Normally, the MID will match.
945 		 * For internal requests, also
946 		 * match on the cmd to be safe.
947 		 */
948 		if (mid == rqp->sr_mid)
949 			break;
950 		if (cmd == rqp->sr_cmd) {
951 			SMBIODEBUG("cmd match but not mid!\n");
952 			break;
953 		}
954 
955 		SMBIODEBUG("drop nomatch\n");
956 		m_freem(m);
957 	}
958 
959 	/*
960 	 * Have the response we were waiting for.
961 	 * Simplified version of the code from
962 	 * smb_iod_recvall
963 	 */
964 	SMBRQ_LOCK(rqp);
965 	if (rqp->sr_rp.md_top == NULL) {
966 		md_initm(&rqp->sr_rp, m);
967 	} else {
968 		SMBIODEBUG("drop duplicate\n");
969 		m_freem(m);
970 	}
971 	SMBRQ_UNLOCK(rqp);
972 
973 	return (0);
974 }
975 
976 
977 /*
978  * Wait for a request to complete.
979  *
980  * For internal requests, see smb_iod_waitrq_internal.
981  * For normal requests, we need to deal with
982  * ioc_muxcnt dropping below vc_maxmux by
983  * making arrangements to send more...
984  */
985 int
986 smb_iod_waitrq(struct smb_rq *rqp)
987 {
988 	struct smb_vc *vcp = rqp->sr_vc;
989 	clock_t tr, tmo1, tmo2;
990 	int error, rc;
991 
992 	SMBIODEBUG("entry, cmd=0x%02x mid=0x%04x\n",
993 	    (uint_t)rqp->sr_cmd, (uint_t)rqp->sr_mid);
994 
995 	if (rqp->sr_flags & SMBR_INTERNAL) {
996 		ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
997 		error = smb_iod_waitrq_internal(rqp);
998 		smb_iod_removerq(rqp);
999 		return (error);
1000 	}
1001 
1002 	/*
1003 	 * Make sure this is NOT the IOD thread,
1004 	 * or the wait below will always timeout.
1005 	 */
1006 	ASSERT(curthread != vcp->iod_thr);
1007 
1008 	atomic_inc_uint(&vcp->iod_rqwaiting);
1009 	SMBRQ_LOCK(rqp);
1010 
1011 	/*
1012 	 * First, wait for the request to be sent.  Normally the send
1013 	 * has already happened by the time we get here.  However, if
1014 	 * we have more than maxmux entries in the request list, our
1015 	 * request may not be sent until other requests complete.
1016 	 * The wait in this case is due to local I/O demands, so
1017 	 * we don't want the server response timeout to apply.
1018 	 *
1019 	 * If a request is allowed to interrupt this wait, then the
1020 	 * request is cancelled and never sent OTW.  Some kinds of
1021 	 * requests should never be cancelled (i.e. close) and those
1022 	 * are marked SMBR_NOINTR_SEND so they either go eventually,
1023 	 * or a connection close will terminate them with ENOTCONN.
1024 	 */
1025 	while (rqp->sr_state == SMBRQ_NOTSENT) {
1026 		rqp->sr_flags |= SMBR_SENDWAIT;
1027 		if (rqp->sr_flags & SMBR_NOINTR_SEND) {
1028 			cv_wait(&rqp->sr_cond, &rqp->sr_lock);
1029 			rc = 1;
1030 		} else
1031 			rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
1032 		rqp->sr_flags &= ~SMBR_SENDWAIT;
1033 		if (rc == 0) {
1034 			SMBIODEBUG("EINTR in sendwait, mid=%u\n", rqp->sr_mid);
1035 			error = EINTR;
1036 			goto out;
1037 		}
1038 	}
1039 
1040 	/*
1041 	 * The request has been sent.  Now wait for the response,
1042 	 * with the timeout specified for this request.
1043 	 * Compute all the deadlines now, so we effectively
1044 	 * start the timer(s) after the request is sent.
1045 	 */
1046 	if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
1047 		tmo1 = lbolt + SEC_TO_TICK(smb_timo_notice);
1048 	else
1049 		tmo1 = 0;
1050 	tmo2 = lbolt + SEC_TO_TICK(rqp->sr_timo);
1051 
1052 	/*
1053 	 * As above, we don't want to allow interrupt for some
1054 	 * requests like open, because we could miss a succesful
1055 	 * response and therefore "leak" a FID.  Such requests
1056 	 * are marked SMBR_NOINTR_RECV to prevent that.
1057 	 *
1058 	 * If "slow server" warnings are enabled, wait first
1059 	 * for the "notice" timeout, and warn if expired.
1060 	 */
1061 	if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
1062 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
1063 			tr = cv_timedwait(&rqp->sr_cond,
1064 			    &rqp->sr_lock, tmo1);
1065 		else
1066 			tr = cv_timedwait_sig(&rqp->sr_cond,
1067 			    &rqp->sr_lock, tmo1);
1068 		if (tr == 0) {
1069 			error = EINTR;
1070 			goto out;
1071 		}
1072 		if (tr < 0) {
1073 #ifdef DTRACE_PROBE
1074 			DTRACE_PROBE1(smb_iod_waitrq1,
1075 			    (smb_rq_t *), rqp);
1076 #endif
1077 #ifdef NOT_YET
1078 			/* Want this to go ONLY to the user. */
1079 			uprintf("SMB server %s has not responded"
1080 			    " to request %d after %d seconds..."
1081 			    " (still waiting).\n", vcp->vc_srvname,
1082 			    rqp->sr_mid, smb_timo_notice);
1083 #endif
1084 		}
1085 	}
1086 
1087 	/*
1088 	 * Keep waiting until tmo2 is expired.
1089 	 */
1090 	while (rqp->sr_rpgen == rqp->sr_rplast) {
1091 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
1092 			tr = cv_timedwait(&rqp->sr_cond,
1093 			    &rqp->sr_lock, tmo2);
1094 		else
1095 			tr = cv_timedwait_sig(&rqp->sr_cond,
1096 			    &rqp->sr_lock, tmo2);
1097 		if (tr == 0) {
1098 			error = EINTR;
1099 			goto out;
1100 		}
1101 		if (tr < 0) {
1102 #ifdef DTRACE_PROBE
1103 			DTRACE_PROBE1(smb_iod_waitrq2,
1104 			    (smb_rq_t *), rqp);
1105 #endif
1106 #ifdef NOT_YET
1107 			/* Want this to go ONLY to the user. */
1108 			uprintf("SMB server %s has not responded"
1109 			    " to request %d after %d seconds..."
1110 			    " (giving up).\n", vcp->vc_srvname,
1111 			    rqp->sr_mid, rqp->sr_timo);
1112 #endif
1113 			error = ETIME;
1114 			goto out;
1115 		}
1116 		/* got wakeup */
1117 	}
1118 	error = rqp->sr_lerror;
1119 	rqp->sr_rplast++;
1120 
1121 out:
1122 	SMBRQ_UNLOCK(rqp);
1123 	atomic_dec_uint(&vcp->iod_rqwaiting);
1124 
1125 	/*
1126 	 * MULTIPACKET request must stay in the list.
1127 	 * They may need additional responses.
1128 	 */
1129 	if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
1130 		smb_iod_removerq(rqp);
1131 
1132 	/*
1133 	 * Some request has been completed.
1134 	 * If we reached the mux limit,
1135 	 * re-run the send loop...
1136 	 */
1137 	if (vcp->iod_muxfull)
1138 		smb_iod_sendall(vcp);
1139 
1140 	return (error);
1141 }
1142 
1143 /*
1144  * Shutdown all outstanding I/O requests on the specified share with
1145  * ENXIO; used when unmounting a share.  (There shouldn't be any for a
1146  * non-forced unmount; if this is a forced unmount, we have to shutdown
1147  * the requests as part of the unmount process.)
1148  */
1149 void
1150 smb_iod_shutdown_share(struct smb_share *ssp)
1151 {
1152 	struct smb_vc *vcp = SSTOVC(ssp);
1153 	struct smb_rq *rqp;
1154 
1155 	/*
1156 	 * Loop through the list of requests and shutdown the ones
1157 	 * that are for the specified share.
1158 	 */
1159 	rw_enter(&vcp->iod_rqlock, RW_READER);
1160 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1161 		if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
1162 			smb_iod_rqprocessed(rqp, EIO, 0);
1163 	}
1164 	rw_exit(&vcp->iod_rqlock);
1165 }
1166 
1167 /*
1168  * Send all requests that need sending.
1169  * Called from _addrq, _multirq, _waitrq
1170  */
1171 static void
1172 smb_iod_sendall(struct smb_vc *vcp)
1173 {
1174 	struct smb_rq *rqp;
1175 	int error, save_newrq, muxcnt;
1176 
1177 	/*
1178 	 * Clear "newrq" to make sure threads adding
1179 	 * new requests will run this function again.
1180 	 */
1181 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
1182 	save_newrq = vcp->iod_newrq;
1183 	vcp->iod_newrq = 0;
1184 
1185 	/*
1186 	 * We only read iod_rqlist, so downgrade rwlock.
1187 	 * This allows the IOD to handle responses while
1188 	 * some requesting thread may be blocked in send.
1189 	 */
1190 	rw_downgrade(&vcp->iod_rqlock);
1191 
1192 	/* Expect to find about this many requests. */
1193 	SMBIODEBUG("top, save_newrq=%d\n", save_newrq);
1194 
1195 	/*
1196 	 * Serialize to prevent multiple senders.
1197 	 * Note lock order: iod_rqlock, vc_sendlock
1198 	 */
1199 	sema_p(&vcp->vc_sendlock);
1200 
1201 	/*
1202 	 * Walk the list of requests and send when possible.
1203 	 * We avoid having more than vc_maxmux requests
1204 	 * outstanding to the server by traversing only
1205 	 * vc_maxmux entries into this list.  Simple!
1206 	 */
1207 	ASSERT(vcp->vc_maxmux > 0);
1208 	error = muxcnt = 0;
1209 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1210 
1211 		if (vcp->vc_state == SMBIOD_ST_DEAD) {
1212 			error = ENOTCONN; /* stop everything! */
1213 			break;
1214 		}
1215 
1216 		if (rqp->sr_state == SMBRQ_NOTSENT) {
1217 			error = smb_iod_sendrq(rqp);
1218 			if (error)
1219 				break;
1220 		}
1221 
1222 		if (++muxcnt == vcp->vc_maxmux) {
1223 			SMBIODEBUG("muxcnt == vc_maxmux\n");
1224 			break;
1225 		}
1226 
1227 	}
1228 
1229 	/*
1230 	 * If we have vc_maxmux requests outstanding,
1231 	 * arrange for _waitrq to call _sendall as
1232 	 * requests are completed.
1233 	 */
1234 	vcp->iod_muxfull =
1235 	    (muxcnt < vcp->vc_maxmux) ? 0 : 1;
1236 
1237 	sema_v(&vcp->vc_sendlock);
1238 	rw_exit(&vcp->iod_rqlock);
1239 
1240 	if (error == ENOTCONN)
1241 		smb_iod_dead(vcp);
1242 
1243 }
1244 
1245 
1246 /*
1247  * "main" function for smbiod daemon thread
1248  */
1249 void
1250 smb_iod_main(struct smb_vc *vcp)
1251 {
1252 	kthread_t *thr = curthread;
1253 
1254 	SMBIODEBUG("entry\n");
1255 
1256 	SMBIODEBUG("Running, thr=0x%p\n", thr);
1257 
1258 	/*
1259 	 * Prevent race with thread that created us.
1260 	 * After we get this lock iod_thr is set.
1261 	 */
1262 	SMB_VC_LOCK(vcp);
1263 	ASSERT(thr == vcp->iod_thr);
1264 
1265 	/* Redundant with iod_thr, but may help debugging. */
1266 	vcp->iod_flags |= SMBIOD_RUNNING;
1267 	SMB_VC_UNLOCK(vcp);
1268 
1269 	/*
1270 	 * OK, this is a new reader thread.
1271 	 * In case of reconnect, tell any
1272 	 * old requests they can restart.
1273 	 */
1274 	smb_iod_invrq(vcp);
1275 
1276 	/*
1277 	 * Run the "reader" loop.
1278 	 */
1279 	smb_iod_recvall(vcp);
1280 
1281 	/*
1282 	 * The reader loop function returns only when
1283 	 * there's been a fatal error on the connection.
1284 	 */
1285 	smb_iod_dead(vcp);
1286 
1287 	/*
1288 	 * The reader thread is going away.  Clear iod_thr,
1289 	 * and wake up anybody waiting for us to quit.
1290 	 */
1291 	SMB_VC_LOCK(vcp);
1292 	vcp->iod_flags &= ~SMBIOD_RUNNING;
1293 	vcp->iod_thr = NULL;
1294 	cv_broadcast(&vcp->iod_exit);
1295 	SMB_VC_UNLOCK(vcp);
1296 
1297 	/*
1298 	 * This hold was taken in smb_iod_create()
1299 	 * when this thread was created.
1300 	 */
1301 	smb_vc_rele(vcp);
1302 
1303 	SMBIODEBUG("Exiting, p=0x%p\n", curproc);
1304 	zthread_exit();
1305 }
1306 
1307 /*
1308  * Create the reader thread.
1309  *
1310  * This happens when we are just about to
1311  * enter vc_state = SMBIOD_ST_VCACTIVE;
1312  * See smb_sm_ssnsetup()
1313  */
1314 int
1315 smb_iod_create(struct smb_vc *vcp)
1316 {
1317 	kthread_t *thr = NULL;
1318 	int error;
1319 
1320 	/*
1321 	 * Take a hold on the VC for the IOD thread.
1322 	 * This hold will be released when the IOD
1323 	 * thread terminates. (or on error below)
1324 	 */
1325 	smb_vc_hold(vcp);
1326 
1327 	SMB_VC_LOCK(vcp);
1328 
1329 	if (vcp->iod_thr != NULL) {
1330 		SMBIODEBUG("aready have an IOD?");
1331 		error = EIO;
1332 		goto out;
1333 	}
1334 
1335 	/*
1336 	 * Darwin code used: IOCreateThread(...)
1337 	 * In Solaris, we use...
1338 	 */
1339 	thr = zthread_create(
1340 	    NULL,	/* stack */
1341 	    0, /* stack size (default) */
1342 	    smb_iod_main, /* entry func... */
1343 	    vcp, /* ... and arg */
1344 	    0, /* len (of what?) */
1345 	    minclsyspri); /* priority */
1346 	if (thr == NULL) {
1347 		SMBERROR("can't start smbiod\n");
1348 		error = ENOMEM;
1349 		goto out;
1350 	}
1351 
1352 	/* Success! */
1353 	error = 0;
1354 	vcp->iod_thr = thr;
1355 
1356 out:
1357 	SMB_VC_UNLOCK(vcp);
1358 
1359 	if (error)
1360 		smb_vc_rele(vcp);
1361 
1362 	return (error);
1363 }
1364 
1365 /*
1366  * Called from smb_vc_free to do any
1367  * cleanup of our IOD (reader) thread.
1368  */
1369 int
1370 smb_iod_destroy(struct smb_vc *vcp)
1371 {
1372 	clock_t tmo;
1373 
1374 	/*
1375 	 * Let's try to make sure the IOD thread
1376 	 * goes away, by waiting for it to exit.
1377 	 * Normally, it's gone by now.
1378 	 *
1379 	 * Only wait for a second, because we're in the
1380 	 * teardown path and don't want to get stuck here.
1381 	 * Should not take long, or things are hosed...
1382 	 */
1383 	SMB_VC_LOCK(vcp);
1384 	if (vcp->iod_thr) {
1385 		vcp->iod_flags |= SMBIOD_SHUTDOWN;
1386 		tmo = lbolt + hz;
1387 		tmo = cv_timedwait(&vcp->iod_exit, &vcp->vc_lock, tmo);
1388 		if (tmo == -1) {
1389 			SMBERROR("IOD thread for %s did not exit?\n",
1390 			    vcp->vc_srvname);
1391 		}
1392 	}
1393 	if (vcp->iod_thr) {
1394 		/* This should not happen. */
1395 		SMBIODEBUG("IOD thread did not exit!\n");
1396 		/* Try harder? */
1397 		tsignal(vcp->iod_thr, SIGKILL);
1398 	}
1399 	SMB_VC_UNLOCK(vcp);
1400 
1401 	return (0);
1402 }
1403