xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c (revision 8329232e00f1048795bae53acb230316243aadb5)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  *
39  * Copyright 2017 Nexenta Systems, Inc.  All rights reserved.
40  */
41 
42 #ifdef DEBUG
43 /* See sys/queue.h */
44 #define	QUEUEDEBUG 1
45 #endif
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/atomic.h>
50 #include <sys/proc.h>
51 #include <sys/thread.h>
52 #include <sys/file.h>
53 #include <sys/kmem.h>
54 #include <sys/unistd.h>
55 #include <sys/mount.h>
56 #include <sys/vnode.h>
57 #include <sys/types.h>
58 #include <sys/ddi.h>
59 #include <sys/sunddi.h>
60 #include <sys/stream.h>
61 #include <sys/strsun.h>
62 #include <sys/time.h>
63 #include <sys/class.h>
64 #include <sys/disp.h>
65 #include <sys/cmn_err.h>
66 #include <sys/zone.h>
67 #include <sys/sdt.h>
68 
69 #include <netsmb/smb_osdep.h>
70 
71 #include <netsmb/smb.h>
72 #include <netsmb/smb_conn.h>
73 #include <netsmb/smb_rq.h>
74 #include <netsmb/smb_subr.h>
75 #include <netsmb/smb_tran.h>
76 #include <netsmb/smb_trantcp.h>
77 
78 int smb_iod_send_echo(smb_vc_t *);
79 
80 #ifdef	_FAKE_KERNEL
81 extern void tsignal(kthread_t *, int);
82 #endif
83 
84 /*
85  * This is set/cleared when smbfs loads/unloads
86  * No locks should be necessary, because smbfs
87  * can't unload until all the mounts are gone.
88  */
89 static smb_fscb_t *fscb;
90 void
91 smb_fscb_set(smb_fscb_t *cb)
92 {
93 	fscb = cb;
94 }
95 
96 static void
97 smb_iod_share_disconnected(smb_share_t *ssp)
98 {
99 
100 	smb_share_invalidate(ssp);
101 
102 	/* smbfs_dead() */
103 	if (fscb && fscb->fscb_disconn) {
104 		fscb->fscb_disconn(ssp);
105 	}
106 }
107 
108 /*
109  * State changes are important and infrequent.
110  * Make them easily observable via dtrace.
111  */
112 void
113 smb_iod_newstate(struct smb_vc *vcp, int state)
114 {
115 	vcp->vc_state = state;
116 }
117 
118 /* Lock Held version of the next function. */
119 static inline void
120 smb_iod_rqprocessed_LH(
121 	struct smb_rq *rqp,
122 	int error,
123 	int flags)
124 {
125 	rqp->sr_flags |= flags;
126 	rqp->sr_lerror = error;
127 	rqp->sr_rpgen++;
128 	rqp->sr_state = SMBRQ_NOTIFIED;
129 	cv_broadcast(&rqp->sr_cond);
130 }
131 
132 static void
133 smb_iod_rqprocessed(
134 	struct smb_rq *rqp,
135 	int error,
136 	int flags)
137 {
138 
139 	SMBRQ_LOCK(rqp);
140 	smb_iod_rqprocessed_LH(rqp, error, flags);
141 	SMBRQ_UNLOCK(rqp);
142 }
143 
144 static void
145 smb_iod_invrq(struct smb_vc *vcp)
146 {
147 	struct smb_rq *rqp;
148 
149 	/*
150 	 * Invalidate all outstanding requests for this connection
151 	 */
152 	rw_enter(&vcp->iod_rqlock, RW_READER);
153 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
154 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
155 	}
156 	rw_exit(&vcp->iod_rqlock);
157 }
158 
159 /*
160  * Called by smb_vc_rele, smb_vc_kill, and by the driver
161  * close entry point if the IOD closes its dev handle.
162  *
163  * Forcibly kill the connection and IOD.
164  */
165 void
166 smb_iod_disconnect(struct smb_vc *vcp)
167 {
168 
169 	/*
170 	 * Inform everyone of the state change.
171 	 */
172 	SMB_VC_LOCK(vcp);
173 	if (vcp->vc_state != SMBIOD_ST_DEAD) {
174 		smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
175 		cv_broadcast(&vcp->vc_statechg);
176 	}
177 	SMB_VC_UNLOCK(vcp);
178 
179 	/*
180 	 * Let's be safe here and avoid doing any
181 	 * call across the network while trying to
182 	 * shut things down.  If we just disconnect,
183 	 * the server will take care of the logoff.
184 	 */
185 	SMB_TRAN_DISCONNECT(vcp);
186 }
187 
188 /*
189  * Send one request.
190  *
191  * Called by _addrq (for internal requests)
192  * and _sendall (via _addrq, _multirq, _waitrq)
193  */
194 static int
195 smb_iod_sendrq(struct smb_rq *rqp)
196 {
197 	struct smb_vc *vcp = rqp->sr_vc;
198 	mblk_t *m;
199 	int error;
200 
201 	ASSERT(vcp);
202 	ASSERT(SEMA_HELD(&vcp->vc_sendlock));
203 	ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
204 
205 	/*
206 	 * Note: Anything special for SMBR_INTERNAL here?
207 	 */
208 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
209 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
210 		return (ENOTCONN);
211 	}
212 
213 
214 	/*
215 	 * On the first send, set the MID and (maybe)
216 	 * the signing sequence numbers.  The increments
217 	 * here are serialized by vc_sendlock
218 	 */
219 	if (rqp->sr_sendcnt == 0) {
220 
221 		rqp->sr_mid = vcp->vc_next_mid++;
222 
223 		if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
224 			/*
225 			 * We're signing requests and verifying
226 			 * signatures on responses.  Set the
227 			 * sequence numbers of the request and
228 			 * response here, used in smb_rq_verify.
229 			 */
230 			rqp->sr_seqno = vcp->vc_next_seq++;
231 			rqp->sr_rseqno = vcp->vc_next_seq++;
232 		}
233 
234 		/* Fill in UID, TID, MID, etc. */
235 		smb_rq_fillhdr(rqp);
236 
237 		/*
238 		 * Sign the message now that we're finally done
239 		 * filling in the SMB header fields, etc.
240 		 */
241 		if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
242 			smb_rq_sign(rqp);
243 		}
244 	}
245 	if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
246 		smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
247 		/*
248 		 * If all attempts to send a request failed, then
249 		 * something is seriously hosed.
250 		 */
251 		return (ENOTCONN);
252 	}
253 
254 	/*
255 	 * Replaced m_copym() with Solaris copymsg() which does the same
256 	 * work when we want to do a M_COPYALL.
257 	 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
258 	 */
259 	m = copymsg(rqp->sr_rq.mb_top);
260 
261 #ifdef DTRACE_PROBE
262 	DTRACE_PROBE2(smb_iod_sendrq,
263 	    (smb_rq_t *), rqp, (mblk_t *), m);
264 #else
265 	SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
266 #endif
267 	m_dumpm(m);
268 
269 	if (m != NULL) {
270 		error = SMB_TRAN_SEND(vcp, m);
271 		m = 0; /* consumed by SEND */
272 	} else
273 		error = ENOBUFS;
274 
275 	rqp->sr_lerror = error;
276 	if (error == 0) {
277 		SMBRQ_LOCK(rqp);
278 		rqp->sr_flags |= SMBR_SENT;
279 		rqp->sr_state = SMBRQ_SENT;
280 		if (rqp->sr_flags & SMBR_SENDWAIT)
281 			cv_broadcast(&rqp->sr_cond);
282 		SMBRQ_UNLOCK(rqp);
283 		return (0);
284 	}
285 	/*
286 	 * Check for fatal errors
287 	 */
288 	if (SMB_TRAN_FATAL(vcp, error)) {
289 		/*
290 		 * No further attempts should be made
291 		 */
292 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
293 		return (ENOTCONN);
294 	}
295 	if (error)
296 		SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
297 
298 #ifdef APPLE
299 	/* If proc waiting on rqp was signaled... */
300 	if (smb_rq_intr(rqp))
301 		smb_iod_rqprocessed(rqp, EINTR, 0);
302 #endif
303 
304 	return (0);
305 }
306 
307 static int
308 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
309 {
310 	mblk_t *m;
311 	uchar_t *hp;
312 	int error;
313 
314 top:
315 	m = NULL;
316 	error = SMB_TRAN_RECV(vcp, &m);
317 	if (error == EAGAIN)
318 		goto top;
319 	if (error)
320 		return (error);
321 	ASSERT(m);
322 
323 	m = m_pullup(m, SMB_HDRLEN);
324 	if (m == NULL) {
325 		return (ENOSR);
326 	}
327 
328 	/*
329 	 * Check the SMB header
330 	 */
331 	hp = mtod(m, uchar_t *);
332 	if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
333 		m_freem(m);
334 		return (EPROTO);
335 	}
336 
337 	*mpp = m;
338 	return (0);
339 }
340 
341 /*
342  * Process incoming packets
343  *
344  * This is the "reader" loop, run by the IOD thread
345  * while in state SMBIOD_ST_VCACTIVE.  The loop now
346  * simply blocks in the socket recv until either a
347  * message arrives, or a disconnect.
348  *
349  * Any non-zero error means the IOD should terminate.
350  */
351 int
352 smb_iod_recvall(struct smb_vc *vcp)
353 {
354 	struct smb_rq *rqp;
355 	mblk_t *m;
356 	uchar_t *hp;
357 	ushort_t mid;
358 	int error = 0;
359 	int etime_count = 0; /* for "server not responding", etc. */
360 
361 	for (;;) {
362 		/*
363 		 * Check whether someone "killed" this VC,
364 		 * or is asking the IOD to terminate.
365 		 */
366 
367 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
368 			SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
369 			error = 0;
370 			break;
371 		}
372 
373 		if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
374 			SMBIODEBUG("SHUTDOWN set\n");
375 			/* This IOD thread will terminate. */
376 			SMB_VC_LOCK(vcp);
377 			smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
378 			cv_broadcast(&vcp->vc_statechg);
379 			SMB_VC_UNLOCK(vcp);
380 			error = EINTR;
381 			break;
382 		}
383 
384 		m = NULL;
385 		error = smb_iod_recv1(vcp, &m);
386 
387 		if (error == ETIME &&
388 		    vcp->iod_rqlist.tqh_first != NULL) {
389 			/*
390 			 * Nothing received for 15 seconds and
391 			 * we have requests in the queue.
392 			 */
393 			etime_count++;
394 
395 			/*
396 			 * Once, at 15 sec. notify callbacks
397 			 * and print the warning message.
398 			 */
399 			if (etime_count == 1) {
400 				/* Was: smb_iod_notify_down(vcp); */
401 				if (fscb && fscb->fscb_down)
402 					smb_vc_walkshares(vcp,
403 					    fscb->fscb_down);
404 				zprintf(vcp->vc_zoneid,
405 				    "SMB server %s not responding\n",
406 				    vcp->vc_srvname);
407 			}
408 
409 			/*
410 			 * At 30 sec. try sending an echo, and then
411 			 * once a minute thereafter.
412 			 */
413 			if ((etime_count & 3) == 2) {
414 				(void) smb_iod_send_echo(vcp);
415 			}
416 
417 			continue;
418 		} /* ETIME && requests in queue */
419 
420 		if (error == ETIME) {
421 			/*
422 			 * If the IOD thread holds the last reference
423 			 * to this VC, let the IOD thread terminate.
424 			 */
425 			if (vcp->vc_co.co_usecount > 1)
426 				continue;
427 			SMB_VC_LOCK(vcp);
428 			if (vcp->vc_co.co_usecount == 1) {
429 				smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
430 				SMB_VC_UNLOCK(vcp);
431 				error = 0;
432 				break;
433 			}
434 			SMB_VC_UNLOCK(vcp);
435 			continue;
436 		} /* error == ETIME */
437 
438 		if (error) {
439 			/*
440 			 * The recv. above returned some error
441 			 * we can't continue from i.e. ENOTCONN.
442 			 * It's dangerous to continue here.
443 			 * (possible infinite loop!)
444 			 *
445 			 * If we have requests enqueued, next
446 			 * state is reconnecting, else idle.
447 			 */
448 			int state;
449 			SMB_VC_LOCK(vcp);
450 			state = (vcp->iod_rqlist.tqh_first != NULL) ?
451 			    SMBIOD_ST_RECONNECT : SMBIOD_ST_IDLE;
452 			smb_iod_newstate(vcp, state);
453 			cv_broadcast(&vcp->vc_statechg);
454 			SMB_VC_UNLOCK(vcp);
455 			error = 0;
456 			break;
457 		}
458 
459 		/*
460 		 * Received something.  Yea!
461 		 */
462 		if (etime_count) {
463 			etime_count = 0;
464 
465 			zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
466 			    vcp->vc_srvname);
467 
468 			/* Was: smb_iod_notify_up(vcp); */
469 			if (fscb && fscb->fscb_up)
470 				smb_vc_walkshares(vcp, fscb->fscb_up);
471 		}
472 
473 		/*
474 		 * Have an SMB packet.  The SMB header was
475 		 * checked in smb_iod_recv1().
476 		 * Find the request...
477 		 */
478 		hp = mtod(m, uchar_t *);
479 		/*LINTED*/
480 		mid = letohs(SMB_HDRMID(hp));
481 		SMBIODEBUG("mid %04x\n", (uint_t)mid);
482 
483 		rw_enter(&vcp->iod_rqlock, RW_READER);
484 		TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
485 
486 			if (rqp->sr_mid != mid)
487 				continue;
488 
489 			DTRACE_PROBE2(smb_iod_recvrq,
490 			    (smb_rq_t *), rqp, (mblk_t *), m);
491 			m_dumpm(m);
492 
493 			SMBRQ_LOCK(rqp);
494 			if (rqp->sr_rp.md_top == NULL) {
495 				md_initm(&rqp->sr_rp, m);
496 			} else {
497 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
498 					md_append_record(&rqp->sr_rp, m);
499 				} else {
500 					SMBRQ_UNLOCK(rqp);
501 					SMBSDEBUG("duplicate response %d "
502 					    "(ignored)\n", mid);
503 					break;
504 				}
505 			}
506 			smb_iod_rqprocessed_LH(rqp, 0, 0);
507 			SMBRQ_UNLOCK(rqp);
508 			break;
509 		}
510 
511 		if (rqp == NULL) {
512 			int cmd = SMB_HDRCMD(hp);
513 
514 			if (cmd != SMB_COM_ECHO)
515 				SMBSDEBUG("drop resp: mid %d, cmd %d\n",
516 				    (uint_t)mid, cmd);
517 /*			smb_printrqlist(vcp); */
518 			m_freem(m);
519 		}
520 		rw_exit(&vcp->iod_rqlock);
521 
522 	}
523 
524 	return (error);
525 }
526 
527 /*
528  * The IOD receiver thread has requests pending and
529  * has not received anything in a while.  Try to
530  * send an SMB echo request.  It's tricky to do a
531  * send from the IOD thread because we can't block.
532  *
533  * Using tmo=SMBNOREPLYWAIT in the request
534  * so smb_rq_reply will skip smb_iod_waitrq.
535  * The smb_smb_echo call uses SMBR_INTERNAL
536  * to avoid calling smb_iod_sendall().
537  */
538 int
539 smb_iod_send_echo(smb_vc_t *vcp)
540 {
541 	smb_cred_t scred;
542 	int err;
543 
544 	smb_credinit(&scred, NULL);
545 	err = smb_smb_echo(vcp, &scred, SMBNOREPLYWAIT);
546 	smb_credrele(&scred);
547 	return (err);
548 }
549 
550 /*
551  * The IOD thread is now just a "reader",
552  * so no more smb_iod_request().  Yea!
553  */
554 
555 /*
556  * Place request in the queue, and send it now if possible.
557  * Called with no locks held.
558  */
559 int
560 smb_iod_addrq(struct smb_rq *rqp)
561 {
562 	struct smb_vc *vcp = rqp->sr_vc;
563 	int error, save_newrq;
564 
565 	ASSERT(rqp->sr_cred);
566 
567 	/*
568 	 * State should be correct after the check in
569 	 * smb_rq_enqueue(), but we dropped locks...
570 	 */
571 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
572 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
573 		return (ENOTCONN);
574 	}
575 
576 	/*
577 	 * Requests from the IOD itself are marked _INTERNAL,
578 	 * and get some special treatment to avoid blocking
579 	 * the reader thread (so we don't deadlock).
580 	 * The request is not yet on the queue, so we can
581 	 * modify it's state here without locks.
582 	 * Only thing using this now is ECHO.
583 	 */
584 	rqp->sr_owner = curthread;
585 	if (rqp->sr_owner == vcp->iod_thr) {
586 		rqp->sr_flags |= SMBR_INTERNAL;
587 
588 		/*
589 		 * This is a request from the IOD thread.
590 		 * Always send directly from this thread.
591 		 * Note lock order: iod_rqlist, vc_sendlock
592 		 */
593 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
594 		TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
595 		rw_downgrade(&vcp->iod_rqlock);
596 
597 		/*
598 		 * Note: iod_sendrq expects vc_sendlock,
599 		 * so take that here, but carefully:
600 		 * Never block the IOD thread here.
601 		 */
602 		if (sema_tryp(&vcp->vc_sendlock) == 0) {
603 			SMBIODEBUG("sendlock busy\n");
604 			error = EAGAIN;
605 		} else {
606 			/* Have vc_sendlock */
607 			error = smb_iod_sendrq(rqp);
608 			sema_v(&vcp->vc_sendlock);
609 		}
610 
611 		rw_exit(&vcp->iod_rqlock);
612 
613 		/*
614 		 * In the non-error case, _removerq
615 		 * is done by either smb_rq_reply
616 		 * or smb_iod_waitrq.
617 		 */
618 		if (error)
619 			smb_iod_removerq(rqp);
620 
621 		return (error);
622 	}
623 
624 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
625 
626 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
627 	/* iod_rqlock/WRITER protects iod_newrq */
628 	save_newrq = vcp->iod_newrq;
629 	vcp->iod_newrq++;
630 
631 	rw_exit(&vcp->iod_rqlock);
632 
633 	/*
634 	 * Now send any requests that need to be sent,
635 	 * including the one we just put on the list.
636 	 * Only the thread that found iod_newrq==0
637 	 * needs to run the send loop.
638 	 */
639 	if (save_newrq == 0)
640 		smb_iod_sendall(vcp);
641 
642 	return (0);
643 }
644 
645 /*
646  * Mark an SMBR_MULTIPACKET request as
647  * needing another send.  Similar to the
648  * "normal" part of smb_iod_addrq.
649  */
650 int
651 smb_iod_multirq(struct smb_rq *rqp)
652 {
653 	struct smb_vc *vcp = rqp->sr_vc;
654 	int save_newrq;
655 
656 	ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
657 
658 	if (rqp->sr_flags & SMBR_INTERNAL)
659 		return (EINVAL);
660 
661 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
662 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
663 		return (ENOTCONN);
664 	}
665 
666 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
667 
668 	/* Already on iod_rqlist, just reset state. */
669 	rqp->sr_state = SMBRQ_NOTSENT;
670 
671 	/* iod_rqlock/WRITER protects iod_newrq */
672 	save_newrq = vcp->iod_newrq;
673 	vcp->iod_newrq++;
674 
675 	rw_exit(&vcp->iod_rqlock);
676 
677 	/*
678 	 * Now send any requests that need to be sent,
679 	 * including the one we just marked NOTSENT.
680 	 * Only the thread that found iod_newrq==0
681 	 * needs to run the send loop.
682 	 */
683 	if (save_newrq == 0)
684 		smb_iod_sendall(vcp);
685 
686 	return (0);
687 }
688 
689 
690 void
691 smb_iod_removerq(struct smb_rq *rqp)
692 {
693 	struct smb_vc *vcp = rqp->sr_vc;
694 
695 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
696 #ifdef QUEUEDEBUG
697 	/*
698 	 * Make sure we have not already removed it.
699 	 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
700 	 * XXX: Don't like the constant 1 here...
701 	 */
702 	ASSERT(rqp->sr_link.tqe_next != (void *)1L);
703 #endif
704 	TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
705 	rw_exit(&vcp->iod_rqlock);
706 }
707 
708 
709 
710 /*
711  * Wait for a request to complete.
712  *
713  * For normal requests, we need to deal with
714  * ioc_muxcnt dropping below vc_maxmux by
715  * making arrangements to send more...
716  */
717 int
718 smb_iod_waitrq(struct smb_rq *rqp)
719 {
720 	struct smb_vc *vcp = rqp->sr_vc;
721 	clock_t tr, tmo1, tmo2;
722 	int error, rc;
723 
724 	if (rqp->sr_flags & SMBR_INTERNAL) {
725 		ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
726 		smb_iod_removerq(rqp);
727 		return (EAGAIN);
728 	}
729 
730 	/*
731 	 * Make sure this is NOT the IOD thread,
732 	 * or the wait below will stop the reader.
733 	 */
734 	ASSERT(curthread != vcp->iod_thr);
735 
736 	SMBRQ_LOCK(rqp);
737 
738 	/*
739 	 * First, wait for the request to be sent.  Normally the send
740 	 * has already happened by the time we get here.  However, if
741 	 * we have more than maxmux entries in the request list, our
742 	 * request may not be sent until other requests complete.
743 	 * The wait in this case is due to local I/O demands, so
744 	 * we don't want the server response timeout to apply.
745 	 *
746 	 * If a request is allowed to interrupt this wait, then the
747 	 * request is cancelled and never sent OTW.  Some kinds of
748 	 * requests should never be cancelled (i.e. close) and those
749 	 * are marked SMBR_NOINTR_SEND so they either go eventually,
750 	 * or a connection close will terminate them with ENOTCONN.
751 	 */
752 	while (rqp->sr_state == SMBRQ_NOTSENT) {
753 		rqp->sr_flags |= SMBR_SENDWAIT;
754 		if (rqp->sr_flags & SMBR_NOINTR_SEND) {
755 			cv_wait(&rqp->sr_cond, &rqp->sr_lock);
756 			rc = 1;
757 		} else
758 			rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
759 		rqp->sr_flags &= ~SMBR_SENDWAIT;
760 		if (rc == 0) {
761 			SMBIODEBUG("EINTR in sendwait, rq=%p\n", (void *)rqp);
762 			error = EINTR;
763 			goto out;
764 		}
765 	}
766 
767 	/*
768 	 * The request has been sent.  Now wait for the response,
769 	 * with the timeout specified for this request.
770 	 * Compute all the deadlines now, so we effectively
771 	 * start the timer(s) after the request is sent.
772 	 */
773 	if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
774 		tmo1 = SEC_TO_TICK(smb_timo_notice);
775 	else
776 		tmo1 = 0;
777 	tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
778 
779 	/*
780 	 * As above, we don't want to allow interrupt for some
781 	 * requests like open, because we could miss a succesful
782 	 * response and therefore "leak" a FID.  Such requests
783 	 * are marked SMBR_NOINTR_RECV to prevent that.
784 	 *
785 	 * If "slow server" warnings are enabled, wait first
786 	 * for the "notice" timeout, and warn if expired.
787 	 */
788 	if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
789 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
790 			tr = cv_reltimedwait(&rqp->sr_cond,
791 			    &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
792 		else
793 			tr = cv_reltimedwait_sig(&rqp->sr_cond,
794 			    &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
795 		if (tr == 0) {
796 			error = EINTR;
797 			goto out;
798 		}
799 		if (tr < 0) {
800 #ifdef DTRACE_PROBE1
801 			DTRACE_PROBE1(smb_iod_waitrq1,
802 			    (smb_rq_t *), rqp);
803 #endif
804 #ifdef NOT_YET
805 			/* Want this to go ONLY to the user. */
806 			uprintf("SMB server %s has not responded"
807 			    " to request %d after %d seconds..."
808 			    " (still waiting).\n", vcp->vc_srvname,
809 			    rqp->sr_mid, smb_timo_notice);
810 #endif
811 		}
812 	}
813 
814 	/*
815 	 * Keep waiting until tmo2 is expired.
816 	 */
817 	while (rqp->sr_rpgen == rqp->sr_rplast) {
818 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
819 			tr = cv_timedwait(&rqp->sr_cond,
820 			    &rqp->sr_lock, tmo2);
821 		else
822 			tr = cv_timedwait_sig(&rqp->sr_cond,
823 			    &rqp->sr_lock, tmo2);
824 		if (tr == 0) {
825 			error = EINTR;
826 			goto out;
827 		}
828 		if (tr < 0) {
829 #ifdef DTRACE_PROBE
830 			DTRACE_PROBE1(smb_iod_waitrq2,
831 			    (smb_rq_t *), rqp);
832 #endif
833 #ifdef NOT_YET
834 			/* Want this to go ONLY to the user. */
835 			uprintf("SMB server %s has not responded"
836 			    " to request %d after %d seconds..."
837 			    " (giving up).\n", vcp->vc_srvname,
838 			    rqp->sr_mid, rqp->sr_timo);
839 #endif
840 			error = ETIME;
841 			goto out;
842 		}
843 		/* got wakeup */
844 	}
845 	error = rqp->sr_lerror;
846 	rqp->sr_rplast++;
847 
848 out:
849 	SMBRQ_UNLOCK(rqp);
850 
851 	/*
852 	 * MULTIPACKET request must stay in the list.
853 	 * They may need additional responses.
854 	 */
855 	if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
856 		smb_iod_removerq(rqp);
857 
858 	/*
859 	 * Some request has been completed.
860 	 * If we reached the mux limit,
861 	 * re-run the send loop...
862 	 */
863 	if (vcp->iod_muxfull)
864 		smb_iod_sendall(vcp);
865 
866 	return (error);
867 }
868 
869 /*
870  * Shutdown all outstanding I/O requests on the specified share with
871  * ENXIO; used when unmounting a share.  (There shouldn't be any for a
872  * non-forced unmount; if this is a forced unmount, we have to shutdown
873  * the requests as part of the unmount process.)
874  */
875 void
876 smb_iod_shutdown_share(struct smb_share *ssp)
877 {
878 	struct smb_vc *vcp = SSTOVC(ssp);
879 	struct smb_rq *rqp;
880 
881 	/*
882 	 * Loop through the list of requests and shutdown the ones
883 	 * that are for the specified share.
884 	 */
885 	rw_enter(&vcp->iod_rqlock, RW_READER);
886 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
887 		if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
888 			smb_iod_rqprocessed(rqp, EIO, 0);
889 	}
890 	rw_exit(&vcp->iod_rqlock);
891 }
892 
893 /*
894  * Send all requests that need sending.
895  * Called from _addrq, _multirq, _waitrq
896  */
897 void
898 smb_iod_sendall(smb_vc_t *vcp)
899 {
900 	struct smb_rq *rqp;
901 	int error, muxcnt;
902 
903 	/*
904 	 * Clear "newrq" to make sure threads adding
905 	 * new requests will run this function again.
906 	 */
907 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
908 	vcp->iod_newrq = 0;
909 
910 	/*
911 	 * We only read iod_rqlist, so downgrade rwlock.
912 	 * This allows the IOD to handle responses while
913 	 * some requesting thread may be blocked in send.
914 	 */
915 	rw_downgrade(&vcp->iod_rqlock);
916 
917 	/*
918 	 * Serialize to prevent multiple senders.
919 	 * Note lock order: iod_rqlock, vc_sendlock
920 	 */
921 	sema_p(&vcp->vc_sendlock);
922 
923 	/*
924 	 * Walk the list of requests and send when possible.
925 	 * We avoid having more than vc_maxmux requests
926 	 * outstanding to the server by traversing only
927 	 * vc_maxmux entries into this list.  Simple!
928 	 */
929 	ASSERT(vcp->vc_maxmux > 0);
930 	error = muxcnt = 0;
931 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
932 
933 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
934 			error = ENOTCONN; /* stop everything! */
935 			break;
936 		}
937 
938 		if (rqp->sr_state == SMBRQ_NOTSENT) {
939 			error = smb_iod_sendrq(rqp);
940 			if (error)
941 				break;
942 		}
943 
944 		if (++muxcnt == vcp->vc_maxmux) {
945 			SMBIODEBUG("muxcnt == vc_maxmux\n");
946 			break;
947 		}
948 
949 	}
950 
951 	/*
952 	 * If we have vc_maxmux requests outstanding,
953 	 * arrange for _waitrq to call _sendall as
954 	 * requests are completed.
955 	 */
956 	vcp->iod_muxfull =
957 	    (muxcnt < vcp->vc_maxmux) ? 0 : 1;
958 
959 	sema_v(&vcp->vc_sendlock);
960 	rw_exit(&vcp->iod_rqlock);
961 }
962 
963 int
964 smb_iod_vc_work(struct smb_vc *vcp, cred_t *cr)
965 {
966 	struct file *fp = NULL;
967 	int err = 0;
968 
969 	/*
970 	 * This is called by the one-and-only
971 	 * IOD thread for this VC.
972 	 */
973 	ASSERT(vcp->iod_thr == curthread);
974 
975 	/*
976 	 * Get the network transport file pointer,
977 	 * and "loan" it to our transport module.
978 	 */
979 	if ((fp = getf(vcp->vc_tran_fd)) == NULL) {
980 		err = EBADF;
981 		goto out;
982 	}
983 	if ((err = SMB_TRAN_LOAN_FP(vcp, fp, cr)) != 0)
984 		goto out;
985 
986 	/*
987 	 * In case of reconnect, tell any enqueued requests
988 	 * then can GO!
989 	 */
990 	SMB_VC_LOCK(vcp);
991 	vcp->vc_genid++;	/* possibly new connection */
992 	smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
993 	cv_broadcast(&vcp->vc_statechg);
994 	SMB_VC_UNLOCK(vcp);
995 
996 	/*
997 	 * The above cv_broadcast should be sufficient to
998 	 * get requests going again.
999 	 *
1000 	 * If we have a callback function, run it.
1001 	 * Was: smb_iod_notify_connected()
1002 	 */
1003 	if (fscb && fscb->fscb_connect)
1004 		smb_vc_walkshares(vcp, fscb->fscb_connect);
1005 
1006 	/*
1007 	 * Run the "reader" loop.
1008 	 */
1009 	err = smb_iod_recvall(vcp);
1010 
1011 	/*
1012 	 * The reader loop returned, so we must have a
1013 	 * new state.  (disconnected or reconnecting)
1014 	 *
1015 	 * Notify shares of the disconnect.
1016 	 * Was: smb_iod_notify_disconnect()
1017 	 */
1018 	smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1019 
1020 	/*
1021 	 * The reader loop function returns only when
1022 	 * there's been an error on the connection, or
1023 	 * this VC has no more references.  It also
1024 	 * updates the state before it returns.
1025 	 *
1026 	 * Tell any requests to give up or restart.
1027 	 */
1028 	smb_iod_invrq(vcp);
1029 
1030 out:
1031 	/* Recall the file descriptor loan. */
1032 	(void) SMB_TRAN_LOAN_FP(vcp, NULL, cr);
1033 	if (fp != NULL) {
1034 		releasef(vcp->vc_tran_fd);
1035 	}
1036 
1037 	return (err);
1038 }
1039 
1040 /*
1041  * Wait around for someone to ask to use this VC.
1042  * If the VC has only the IOD reference, then
1043  * wait only a minute or so, then drop it.
1044  */
1045 int
1046 smb_iod_vc_idle(struct smb_vc *vcp)
1047 {
1048 	clock_t tr, delta = SEC_TO_TICK(15);
1049 	int err = 0;
1050 
1051 	/*
1052 	 * This is called by the one-and-only
1053 	 * IOD thread for this VC.
1054 	 */
1055 	ASSERT(vcp->iod_thr == curthread);
1056 
1057 	SMB_VC_LOCK(vcp);
1058 	while (vcp->vc_state == SMBIOD_ST_IDLE) {
1059 		tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1060 		    delta, TR_CLOCK_TICK);
1061 		if (tr == 0) {
1062 			err = EINTR;
1063 			break;
1064 		}
1065 		if (tr < 0) {
1066 			/* timeout */
1067 			if (vcp->vc_co.co_usecount == 1) {
1068 				/* Let this IOD terminate. */
1069 				smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
1070 				/* nobody to cv_broadcast */
1071 				break;
1072 			}
1073 		}
1074 	}
1075 	SMB_VC_UNLOCK(vcp);
1076 
1077 	return (err);
1078 }
1079 
1080 /*
1081  * After a failed reconnect attempt, smbiod will
1082  * call this to make current requests error out.
1083  */
1084 int
1085 smb_iod_vc_rcfail(struct smb_vc *vcp)
1086 {
1087 	clock_t tr;
1088 	int err = 0;
1089 
1090 	/*
1091 	 * This is called by the one-and-only
1092 	 * IOD thread for this VC.
1093 	 */
1094 	ASSERT(vcp->iod_thr == curthread);
1095 
1096 	if (vcp->vc_state != SMBIOD_ST_RECONNECT)
1097 		return (EINVAL);
1098 
1099 	SMB_VC_LOCK(vcp);
1100 
1101 	smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1102 	cv_broadcast(&vcp->vc_statechg);
1103 
1104 	/*
1105 	 * Short wait here for two reasons:
1106 	 * (1) Give requests a chance to error out.
1107 	 * (2) Prevent immediate retry.
1108 	 */
1109 	tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1110 	    SEC_TO_TICK(5), TR_CLOCK_TICK);
1111 	if (tr == 0)
1112 		err = EINTR;
1113 
1114 	smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1115 	cv_broadcast(&vcp->vc_statechg);
1116 
1117 	SMB_VC_UNLOCK(vcp);
1118 
1119 	return (err);
1120 }
1121 
1122 /*
1123  * Ask the IOD to reconnect (if not already underway)
1124  * then wait for the reconnect to finish.
1125  */
1126 int
1127 smb_iod_reconnect(struct smb_vc *vcp)
1128 {
1129 	int err = 0, rv;
1130 
1131 	SMB_VC_LOCK(vcp);
1132 again:
1133 	switch (vcp->vc_state) {
1134 
1135 	case SMBIOD_ST_IDLE:
1136 		smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1137 		cv_signal(&vcp->iod_idle);
1138 		/* FALLTHROUGH */
1139 
1140 	case SMBIOD_ST_RECONNECT:
1141 		rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1142 		if (rv == 0) {
1143 			err = EINTR;
1144 			break;
1145 		}
1146 		goto again;
1147 
1148 	case SMBIOD_ST_VCACTIVE:
1149 		err = 0; /* success! */
1150 		break;
1151 
1152 	case SMBIOD_ST_RCFAILED:
1153 	case SMBIOD_ST_DEAD:
1154 	default:
1155 		err = ENOTCONN;
1156 		break;
1157 	}
1158 
1159 	SMB_VC_UNLOCK(vcp);
1160 	return (err);
1161 }
1162