xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c (revision 46b592853d0f4f11781b6b0a7533f267c6aee132)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 #ifdef DEBUG
41 /* See sys/queue.h */
42 #define	QUEUEDEBUG 1
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/atomic.h>
48 #include <sys/proc.h>
49 #include <sys/thread.h>
50 #include <sys/file.h>
51 #include <sys/kmem.h>
52 #include <sys/unistd.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/types.h>
56 #include <sys/ddi.h>
57 #include <sys/sunddi.h>
58 #include <sys/stream.h>
59 #include <sys/strsun.h>
60 #include <sys/time.h>
61 #include <sys/class.h>
62 #include <sys/disp.h>
63 #include <sys/cmn_err.h>
64 #include <sys/zone.h>
65 #include <sys/sdt.h>
66 
67 #ifdef APPLE
68 #include <sys/smb_apple.h>
69 #else
70 #include <netsmb/smb_osdep.h>
71 #endif
72 
73 #include <netsmb/smb.h>
74 #include <netsmb/smb_conn.h>
75 #include <netsmb/smb_rq.h>
76 #include <netsmb/smb_subr.h>
77 #include <netsmb/smb_tran.h>
78 #include <netsmb/smb_trantcp.h>
79 
80 int smb_iod_send_echo(smb_vc_t *);
81 
82 /*
83  * This is set/cleared when smbfs loads/unloads
84  * No locks should be necessary, because smbfs
85  * can't unload until all the mounts are gone.
86  */
87 static smb_fscb_t *fscb;
88 int
89 smb_fscb_set(smb_fscb_t *cb)
90 {
91 	fscb = cb;
92 	return (0);
93 }
94 
95 static void
96 smb_iod_share_disconnected(smb_share_t *ssp)
97 {
98 
99 	smb_share_invalidate(ssp);
100 
101 	/* smbfs_dead() */
102 	if (fscb && fscb->fscb_disconn) {
103 		fscb->fscb_disconn(ssp);
104 	}
105 }
106 
107 /*
108  * State changes are important and infrequent.
109  * Make them easily observable via dtrace.
110  */
111 void
112 smb_iod_newstate(struct smb_vc *vcp, int state)
113 {
114 	vcp->vc_state = state;
115 }
116 
117 /* Lock Held version of the next function. */
118 static inline void
119 smb_iod_rqprocessed_LH(
120 	struct smb_rq *rqp,
121 	int error,
122 	int flags)
123 {
124 	rqp->sr_flags |= flags;
125 	rqp->sr_lerror = error;
126 	rqp->sr_rpgen++;
127 	rqp->sr_state = SMBRQ_NOTIFIED;
128 	cv_broadcast(&rqp->sr_cond);
129 }
130 
131 static void
132 smb_iod_rqprocessed(
133 	struct smb_rq *rqp,
134 	int error,
135 	int flags)
136 {
137 
138 	SMBRQ_LOCK(rqp);
139 	smb_iod_rqprocessed_LH(rqp, error, flags);
140 	SMBRQ_UNLOCK(rqp);
141 }
142 
143 static void
144 smb_iod_invrq(struct smb_vc *vcp)
145 {
146 	struct smb_rq *rqp;
147 
148 	/*
149 	 * Invalidate all outstanding requests for this connection
150 	 */
151 	rw_enter(&vcp->iod_rqlock, RW_READER);
152 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
153 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
154 	}
155 	rw_exit(&vcp->iod_rqlock);
156 }
157 
158 /*
159  * Called by smb_vc_rele, smb_vc_kill, and by the driver
160  * close entry point if the IOD closes its dev handle.
161  *
162  * Forcibly kill the connection and IOD.
163  */
164 int
165 smb_iod_disconnect(struct smb_vc *vcp)
166 {
167 
168 	/*
169 	 * Inform everyone of the state change.
170 	 */
171 	SMB_VC_LOCK(vcp);
172 	if (vcp->vc_state != SMBIOD_ST_DEAD) {
173 		smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
174 		cv_broadcast(&vcp->vc_statechg);
175 	}
176 	SMB_VC_UNLOCK(vcp);
177 
178 	/*
179 	 * Let's be safe here and avoid doing any
180 	 * call across the network while trying to
181 	 * shut things down.  If we just disconnect,
182 	 * the server will take care of the logoff.
183 	 */
184 	SMB_TRAN_DISCONNECT(vcp);
185 
186 	/*
187 	 * If we have an IOD, it should immediately notice
188 	 * that its connection has closed.  But in case
189 	 * it doesn't, let's also send it a signal.
190 	 * (but don't shoot our own foot!)
191 	 * Note: the iod calls smb_iod_invrq on its way out.
192 	 */
193 	if (vcp->iod_thr != NULL &&
194 	    vcp->iod_thr != curthread) {
195 		tsignal(vcp->iod_thr, SIGKILL);
196 	}
197 
198 	return (0);
199 }
200 
201 /*
202  * Send one request.
203  *
204  * Called by _addrq (for internal requests)
205  * and _sendall (via _addrq, _multirq, _waitrq)
206  */
207 static int
208 smb_iod_sendrq(struct smb_rq *rqp)
209 {
210 	struct smb_vc *vcp = rqp->sr_vc;
211 	mblk_t *m;
212 	int error;
213 
214 	ASSERT(vcp);
215 	ASSERT(SEMA_HELD(&vcp->vc_sendlock));
216 	ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
217 
218 	/*
219 	 * Note: Anything special for SMBR_INTERNAL here?
220 	 */
221 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
222 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
223 		return (ENOTCONN);
224 	}
225 
226 
227 	/*
228 	 * On the first send, set the MID and (maybe)
229 	 * the signing sequence numbers.  The increments
230 	 * here are serialized by vc_sendlock
231 	 */
232 	if (rqp->sr_sendcnt == 0) {
233 
234 		rqp->sr_mid = vcp->vc_next_mid++;
235 
236 		if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
237 			/*
238 			 * We're signing requests and verifying
239 			 * signatures on responses.  Set the
240 			 * sequence numbers of the request and
241 			 * response here, used in smb_rq_verify.
242 			 */
243 			rqp->sr_seqno = vcp->vc_next_seq++;
244 			rqp->sr_rseqno = vcp->vc_next_seq++;
245 		}
246 
247 		/* Fill in UID, TID, MID, etc. */
248 		smb_rq_fillhdr(rqp);
249 
250 		/*
251 		 * Sign the message now that we're finally done
252 		 * filling in the SMB header fields, etc.
253 		 */
254 		if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
255 			smb_rq_sign(rqp);
256 		}
257 	}
258 	if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
259 		smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
260 		/*
261 		 * If all attempts to send a request failed, then
262 		 * something is seriously hosed.
263 		 */
264 		return (ENOTCONN);
265 	}
266 
267 	/*
268 	 * Replaced m_copym() with Solaris copymsg() which does the same
269 	 * work when we want to do a M_COPYALL.
270 	 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
271 	 */
272 	m = copymsg(rqp->sr_rq.mb_top);
273 
274 #ifdef DTRACE_PROBE
275 	DTRACE_PROBE2(smb_iod_sendrq,
276 	    (smb_rq_t *), rqp, (mblk_t *), m);
277 #else
278 	SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
279 #endif
280 	m_dumpm(m);
281 
282 	if (m != NULL) {
283 		error = SMB_TRAN_SEND(vcp, m);
284 		m = 0; /* consumed by SEND */
285 	} else
286 		error = ENOBUFS;
287 
288 	rqp->sr_lerror = error;
289 	if (error == 0) {
290 		SMBRQ_LOCK(rqp);
291 		rqp->sr_flags |= SMBR_SENT;
292 		rqp->sr_state = SMBRQ_SENT;
293 		if (rqp->sr_flags & SMBR_SENDWAIT)
294 			cv_broadcast(&rqp->sr_cond);
295 		SMBRQ_UNLOCK(rqp);
296 		return (0);
297 	}
298 	/*
299 	 * Check for fatal errors
300 	 */
301 	if (SMB_TRAN_FATAL(vcp, error)) {
302 		/*
303 		 * No further attempts should be made
304 		 */
305 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
306 		return (ENOTCONN);
307 	}
308 	if (error)
309 		SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
310 
311 #ifdef APPLE
312 	/* If proc waiting on rqp was signaled... */
313 	if (smb_rq_intr(rqp))
314 		smb_iod_rqprocessed(rqp, EINTR, 0);
315 #endif
316 
317 	return (0);
318 }
319 
320 static int
321 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
322 {
323 	mblk_t *m;
324 	uchar_t *hp;
325 	int error;
326 
327 top:
328 	m = NULL;
329 	error = SMB_TRAN_RECV(vcp, &m);
330 	if (error == EAGAIN)
331 		goto top;
332 	if (error)
333 		return (error);
334 	ASSERT(m);
335 
336 	m = m_pullup(m, SMB_HDRLEN);
337 	if (m == NULL) {
338 		return (ENOSR);
339 	}
340 
341 	/*
342 	 * Check the SMB header
343 	 */
344 	hp = mtod(m, uchar_t *);
345 	if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
346 		m_freem(m);
347 		return (EPROTO);
348 	}
349 
350 	*mpp = m;
351 	return (0);
352 }
353 
354 /*
355  * Process incoming packets
356  *
357  * This is the "reader" loop, run by the IOD thread
358  * while in state SMBIOD_ST_VCACTIVE.  The loop now
359  * simply blocks in the socket recv until either a
360  * message arrives, or a disconnect.
361  *
362  * Any non-zero error means the IOD should terminate.
363  */
364 int
365 smb_iod_recvall(struct smb_vc *vcp)
366 {
367 	struct smb_rq *rqp;
368 	mblk_t *m;
369 	uchar_t *hp;
370 	ushort_t mid;
371 	int error = 0;
372 	int etime_count = 0; /* for "server not responding", etc. */
373 
374 	for (;;) {
375 		/*
376 		 * Check whether someone "killed" this VC,
377 		 * or is asking the IOD to terminate.
378 		 */
379 
380 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
381 			SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
382 			error = 0;
383 			break;
384 		}
385 
386 		if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
387 			SMBIODEBUG("SHUTDOWN set\n");
388 			/* This IOD thread will terminate. */
389 			SMB_VC_LOCK(vcp);
390 			smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
391 			cv_broadcast(&vcp->vc_statechg);
392 			SMB_VC_UNLOCK(vcp);
393 			error = EINTR;
394 			break;
395 		}
396 
397 		m = NULL;
398 		error = smb_iod_recv1(vcp, &m);
399 
400 		if (error == ETIME &&
401 		    vcp->iod_rqlist.tqh_first != NULL) {
402 			/*
403 			 * Nothing received for 15 seconds and
404 			 * we have requests in the queue.
405 			 */
406 			etime_count++;
407 
408 			/*
409 			 * Once, at 15 sec. notify callbacks
410 			 * and print the warning message.
411 			 */
412 			if (etime_count == 1) {
413 				/* Was: smb_iod_notify_down(vcp); */
414 				if (fscb && fscb->fscb_down)
415 					smb_vc_walkshares(vcp,
416 					    fscb->fscb_down);
417 				zprintf(vcp->vc_zoneid,
418 				    "SMB server %s not responding\n",
419 				    vcp->vc_srvname);
420 			}
421 
422 			/*
423 			 * At 30 sec. try sending an echo, and then
424 			 * once a minute thereafter.
425 			 */
426 			if ((etime_count & 3) == 2) {
427 				(void) smb_iod_send_echo(vcp);
428 			}
429 
430 			continue;
431 		} /* ETIME && requests in queue */
432 
433 		if (error == ETIME) {
434 			/*
435 			 * If the IOD thread holds the last reference
436 			 * to this VC, let the IOD thread terminate.
437 			 */
438 			if (vcp->vc_co.co_usecount > 1)
439 				continue;
440 			SMB_VC_LOCK(vcp);
441 			if (vcp->vc_co.co_usecount == 1) {
442 				smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
443 				SMB_VC_UNLOCK(vcp);
444 				error = 0;
445 				break;
446 			}
447 			SMB_VC_UNLOCK(vcp);
448 			continue;
449 		} /* error == ETIME */
450 
451 		if (error) {
452 			/*
453 			 * The recv. above returned some error
454 			 * we can't continue from i.e. ENOTCONN.
455 			 * It's dangerous to continue here.
456 			 * (possible infinite loop!)
457 			 *
458 			 * If we have requests enqueued, next
459 			 * state is reconnecting, else idle.
460 			 */
461 			int state;
462 			SMB_VC_LOCK(vcp);
463 			state = (vcp->iod_rqlist.tqh_first != NULL) ?
464 			    SMBIOD_ST_RECONNECT : SMBIOD_ST_IDLE;
465 			smb_iod_newstate(vcp, state);
466 			cv_broadcast(&vcp->vc_statechg);
467 			SMB_VC_UNLOCK(vcp);
468 			error = 0;
469 			break;
470 		}
471 
472 		/*
473 		 * Received something.  Yea!
474 		 */
475 		if (etime_count) {
476 			etime_count = 0;
477 
478 			zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
479 			    vcp->vc_srvname);
480 
481 			/* Was: smb_iod_notify_up(vcp); */
482 			if (fscb && fscb->fscb_up)
483 				smb_vc_walkshares(vcp, fscb->fscb_up);
484 		}
485 
486 		/*
487 		 * Have an SMB packet.  The SMB header was
488 		 * checked in smb_iod_recv1().
489 		 * Find the request...
490 		 */
491 		hp = mtod(m, uchar_t *);
492 		/*LINTED*/
493 		mid = letohs(SMB_HDRMID(hp));
494 		SMBIODEBUG("mid %04x\n", (uint_t)mid);
495 
496 		rw_enter(&vcp->iod_rqlock, RW_READER);
497 		TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
498 
499 			if (rqp->sr_mid != mid)
500 				continue;
501 
502 			DTRACE_PROBE2(smb_iod_recvrq,
503 			    (smb_rq_t *), rqp, (mblk_t *), m);
504 			m_dumpm(m);
505 
506 			SMBRQ_LOCK(rqp);
507 			if (rqp->sr_rp.md_top == NULL) {
508 				md_initm(&rqp->sr_rp, m);
509 			} else {
510 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
511 					md_append_record(&rqp->sr_rp, m);
512 				} else {
513 					SMBRQ_UNLOCK(rqp);
514 					SMBSDEBUG("duplicate response %d "
515 					    "(ignored)\n", mid);
516 					break;
517 				}
518 			}
519 			smb_iod_rqprocessed_LH(rqp, 0, 0);
520 			SMBRQ_UNLOCK(rqp);
521 			break;
522 		}
523 
524 		if (rqp == NULL) {
525 			int cmd = SMB_HDRCMD(hp);
526 
527 			if (cmd != SMB_COM_ECHO)
528 				SMBSDEBUG("drop resp: mid %d, cmd %d\n",
529 				    (uint_t)mid, cmd);
530 /*			smb_printrqlist(vcp); */
531 			m_freem(m);
532 		}
533 		rw_exit(&vcp->iod_rqlock);
534 
535 	}
536 
537 	return (error);
538 }
539 
540 /*
541  * The IOD receiver thread has requests pending and
542  * has not received anything in a while.  Try to
543  * send an SMB echo request.  It's tricky to do a
544  * send from the IOD thread because we can't block.
545  *
546  * Using tmo=SMBNOREPLYWAIT in the request
547  * so smb_rq_reply will skip smb_iod_waitrq.
548  * The smb_smb_echo call uses SMBR_INTERNAL
549  * to avoid calling smb_iod_sendall().
550  */
551 int
552 smb_iod_send_echo(smb_vc_t *vcp)
553 {
554 	smb_cred_t scred;
555 	int err;
556 
557 	smb_credinit(&scred, NULL);
558 	err = smb_smb_echo(vcp, &scred, SMBNOREPLYWAIT);
559 	smb_credrele(&scred);
560 	return (err);
561 }
562 
563 /*
564  * The IOD thread is now just a "reader",
565  * so no more smb_iod_request().  Yea!
566  */
567 
568 /*
569  * Place request in the queue, and send it now if possible.
570  * Called with no locks held.
571  */
572 int
573 smb_iod_addrq(struct smb_rq *rqp)
574 {
575 	struct smb_vc *vcp = rqp->sr_vc;
576 	int error, save_newrq;
577 
578 	ASSERT(rqp->sr_cred);
579 
580 	/*
581 	 * State should be correct after the check in
582 	 * smb_rq_enqueue(), but we dropped locks...
583 	 */
584 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
585 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
586 		return (ENOTCONN);
587 	}
588 
589 	/*
590 	 * Requests from the IOD itself are marked _INTERNAL,
591 	 * and get some special treatment to avoid blocking
592 	 * the reader thread (so we don't deadlock).
593 	 * The request is not yet on the queue, so we can
594 	 * modify it's state here without locks.
595 	 * Only thing using this now is ECHO.
596 	 */
597 	rqp->sr_owner = curthread;
598 	if (rqp->sr_owner == vcp->iod_thr) {
599 		rqp->sr_flags |= SMBR_INTERNAL;
600 
601 		/*
602 		 * This is a request from the IOD thread.
603 		 * Always send directly from this thread.
604 		 * Note lock order: iod_rqlist, vc_sendlock
605 		 */
606 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
607 		TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
608 		rw_downgrade(&vcp->iod_rqlock);
609 
610 		/*
611 		 * Note: iod_sendrq expects vc_sendlock,
612 		 * so take that here, but carefully:
613 		 * Never block the IOD thread here.
614 		 */
615 		if (sema_tryp(&vcp->vc_sendlock) == 0) {
616 			SMBIODEBUG("sendlock busy\n");
617 			error = EAGAIN;
618 		} else {
619 			/* Have vc_sendlock */
620 			error = smb_iod_sendrq(rqp);
621 			sema_v(&vcp->vc_sendlock);
622 		}
623 
624 		rw_exit(&vcp->iod_rqlock);
625 
626 		/*
627 		 * In the non-error case, _removerq
628 		 * is done by either smb_rq_reply
629 		 * or smb_iod_waitrq.
630 		 */
631 		if (error)
632 			smb_iod_removerq(rqp);
633 
634 		return (error);
635 	}
636 
637 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
638 
639 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
640 	/* iod_rqlock/WRITER protects iod_newrq */
641 	save_newrq = vcp->iod_newrq;
642 	vcp->iod_newrq++;
643 
644 	rw_exit(&vcp->iod_rqlock);
645 
646 	/*
647 	 * Now send any requests that need to be sent,
648 	 * including the one we just put on the list.
649 	 * Only the thread that found iod_newrq==0
650 	 * needs to run the send loop.
651 	 */
652 	if (save_newrq == 0)
653 		smb_iod_sendall(vcp);
654 
655 	return (0);
656 }
657 
658 /*
659  * Mark an SMBR_MULTIPACKET request as
660  * needing another send.  Similar to the
661  * "normal" part of smb_iod_addrq.
662  */
663 int
664 smb_iod_multirq(struct smb_rq *rqp)
665 {
666 	struct smb_vc *vcp = rqp->sr_vc;
667 	int save_newrq;
668 
669 	ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
670 
671 	if (rqp->sr_flags & SMBR_INTERNAL)
672 		return (EINVAL);
673 
674 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
675 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
676 		return (ENOTCONN);
677 	}
678 
679 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
680 
681 	/* Already on iod_rqlist, just reset state. */
682 	rqp->sr_state = SMBRQ_NOTSENT;
683 
684 	/* iod_rqlock/WRITER protects iod_newrq */
685 	save_newrq = vcp->iod_newrq;
686 	vcp->iod_newrq++;
687 
688 	rw_exit(&vcp->iod_rqlock);
689 
690 	/*
691 	 * Now send any requests that need to be sent,
692 	 * including the one we just marked NOTSENT.
693 	 * Only the thread that found iod_newrq==0
694 	 * needs to run the send loop.
695 	 */
696 	if (save_newrq == 0)
697 		smb_iod_sendall(vcp);
698 
699 	return (0);
700 }
701 
702 
703 int
704 smb_iod_removerq(struct smb_rq *rqp)
705 {
706 	struct smb_vc *vcp = rqp->sr_vc;
707 
708 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
709 #ifdef QUEUEDEBUG
710 	/*
711 	 * Make sure we have not already removed it.
712 	 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
713 	 * XXX: Don't like the constant 1 here...
714 	 */
715 	ASSERT(rqp->sr_link.tqe_next != (void *)1L);
716 #endif
717 	TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
718 	rw_exit(&vcp->iod_rqlock);
719 
720 	return (0);
721 }
722 
723 
724 
725 /*
726  * Wait for a request to complete.
727  *
728  * For normal requests, we need to deal with
729  * ioc_muxcnt dropping below vc_maxmux by
730  * making arrangements to send more...
731  */
732 int
733 smb_iod_waitrq(struct smb_rq *rqp)
734 {
735 	struct smb_vc *vcp = rqp->sr_vc;
736 	clock_t tr, tmo1, tmo2;
737 	int error, rc;
738 
739 	if (rqp->sr_flags & SMBR_INTERNAL) {
740 		ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
741 		smb_iod_removerq(rqp);
742 		return (EAGAIN);
743 	}
744 
745 	/*
746 	 * Make sure this is NOT the IOD thread,
747 	 * or the wait below will stop the reader.
748 	 */
749 	ASSERT(curthread != vcp->iod_thr);
750 
751 	SMBRQ_LOCK(rqp);
752 
753 	/*
754 	 * First, wait for the request to be sent.  Normally the send
755 	 * has already happened by the time we get here.  However, if
756 	 * we have more than maxmux entries in the request list, our
757 	 * request may not be sent until other requests complete.
758 	 * The wait in this case is due to local I/O demands, so
759 	 * we don't want the server response timeout to apply.
760 	 *
761 	 * If a request is allowed to interrupt this wait, then the
762 	 * request is cancelled and never sent OTW.  Some kinds of
763 	 * requests should never be cancelled (i.e. close) and those
764 	 * are marked SMBR_NOINTR_SEND so they either go eventually,
765 	 * or a connection close will terminate them with ENOTCONN.
766 	 */
767 	while (rqp->sr_state == SMBRQ_NOTSENT) {
768 		rqp->sr_flags |= SMBR_SENDWAIT;
769 		if (rqp->sr_flags & SMBR_NOINTR_SEND) {
770 			cv_wait(&rqp->sr_cond, &rqp->sr_lock);
771 			rc = 1;
772 		} else
773 			rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
774 		rqp->sr_flags &= ~SMBR_SENDWAIT;
775 		if (rc == 0) {
776 			SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp);
777 			error = EINTR;
778 			goto out;
779 		}
780 	}
781 
782 	/*
783 	 * The request has been sent.  Now wait for the response,
784 	 * with the timeout specified for this request.
785 	 * Compute all the deadlines now, so we effectively
786 	 * start the timer(s) after the request is sent.
787 	 */
788 	if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
789 		tmo1 = lbolt + SEC_TO_TICK(smb_timo_notice);
790 	else
791 		tmo1 = 0;
792 	tmo2 = lbolt + SEC_TO_TICK(rqp->sr_timo);
793 
794 	/*
795 	 * As above, we don't want to allow interrupt for some
796 	 * requests like open, because we could miss a succesful
797 	 * response and therefore "leak" a FID.  Such requests
798 	 * are marked SMBR_NOINTR_RECV to prevent that.
799 	 *
800 	 * If "slow server" warnings are enabled, wait first
801 	 * for the "notice" timeout, and warn if expired.
802 	 */
803 	if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
804 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
805 			tr = cv_timedwait(&rqp->sr_cond,
806 			    &rqp->sr_lock, tmo1);
807 		else
808 			tr = cv_timedwait_sig(&rqp->sr_cond,
809 			    &rqp->sr_lock, tmo1);
810 		if (tr == 0) {
811 			error = EINTR;
812 			goto out;
813 		}
814 		if (tr < 0) {
815 #ifdef DTRACE_PROBE
816 			DTRACE_PROBE1(smb_iod_waitrq1,
817 			    (smb_rq_t *), rqp);
818 #endif
819 #ifdef NOT_YET
820 			/* Want this to go ONLY to the user. */
821 			uprintf("SMB server %s has not responded"
822 			    " to request %d after %d seconds..."
823 			    " (still waiting).\n", vcp->vc_srvname,
824 			    rqp->sr_mid, smb_timo_notice);
825 #endif
826 		}
827 	}
828 
829 	/*
830 	 * Keep waiting until tmo2 is expired.
831 	 */
832 	while (rqp->sr_rpgen == rqp->sr_rplast) {
833 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
834 			tr = cv_timedwait(&rqp->sr_cond,
835 			    &rqp->sr_lock, tmo2);
836 		else
837 			tr = cv_timedwait_sig(&rqp->sr_cond,
838 			    &rqp->sr_lock, tmo2);
839 		if (tr == 0) {
840 			error = EINTR;
841 			goto out;
842 		}
843 		if (tr < 0) {
844 #ifdef DTRACE_PROBE
845 			DTRACE_PROBE1(smb_iod_waitrq2,
846 			    (smb_rq_t *), rqp);
847 #endif
848 #ifdef NOT_YET
849 			/* Want this to go ONLY to the user. */
850 			uprintf("SMB server %s has not responded"
851 			    " to request %d after %d seconds..."
852 			    " (giving up).\n", vcp->vc_srvname,
853 			    rqp->sr_mid, rqp->sr_timo);
854 #endif
855 			error = ETIME;
856 			goto out;
857 		}
858 		/* got wakeup */
859 	}
860 	error = rqp->sr_lerror;
861 	rqp->sr_rplast++;
862 
863 out:
864 	SMBRQ_UNLOCK(rqp);
865 
866 	/*
867 	 * MULTIPACKET request must stay in the list.
868 	 * They may need additional responses.
869 	 */
870 	if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
871 		smb_iod_removerq(rqp);
872 
873 	/*
874 	 * Some request has been completed.
875 	 * If we reached the mux limit,
876 	 * re-run the send loop...
877 	 */
878 	if (vcp->iod_muxfull)
879 		smb_iod_sendall(vcp);
880 
881 	return (error);
882 }
883 
884 /*
885  * Shutdown all outstanding I/O requests on the specified share with
886  * ENXIO; used when unmounting a share.  (There shouldn't be any for a
887  * non-forced unmount; if this is a forced unmount, we have to shutdown
888  * the requests as part of the unmount process.)
889  */
890 void
891 smb_iod_shutdown_share(struct smb_share *ssp)
892 {
893 	struct smb_vc *vcp = SSTOVC(ssp);
894 	struct smb_rq *rqp;
895 
896 	/*
897 	 * Loop through the list of requests and shutdown the ones
898 	 * that are for the specified share.
899 	 */
900 	rw_enter(&vcp->iod_rqlock, RW_READER);
901 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
902 		if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
903 			smb_iod_rqprocessed(rqp, EIO, 0);
904 	}
905 	rw_exit(&vcp->iod_rqlock);
906 }
907 
908 /*
909  * Send all requests that need sending.
910  * Called from _addrq, _multirq, _waitrq
911  */
912 void
913 smb_iod_sendall(smb_vc_t *vcp)
914 {
915 	struct smb_rq *rqp;
916 	int error, save_newrq, muxcnt;
917 
918 	/*
919 	 * Clear "newrq" to make sure threads adding
920 	 * new requests will run this function again.
921 	 */
922 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
923 	save_newrq = vcp->iod_newrq;
924 	vcp->iod_newrq = 0;
925 
926 	/*
927 	 * We only read iod_rqlist, so downgrade rwlock.
928 	 * This allows the IOD to handle responses while
929 	 * some requesting thread may be blocked in send.
930 	 */
931 	rw_downgrade(&vcp->iod_rqlock);
932 
933 	/* Expect to find about this many requests. */
934 	SMBIODEBUG("top, save_newrq=%d\n", save_newrq);
935 
936 	/*
937 	 * Serialize to prevent multiple senders.
938 	 * Note lock order: iod_rqlock, vc_sendlock
939 	 */
940 	sema_p(&vcp->vc_sendlock);
941 
942 	/*
943 	 * Walk the list of requests and send when possible.
944 	 * We avoid having more than vc_maxmux requests
945 	 * outstanding to the server by traversing only
946 	 * vc_maxmux entries into this list.  Simple!
947 	 */
948 	ASSERT(vcp->vc_maxmux > 0);
949 	error = muxcnt = 0;
950 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
951 
952 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
953 			error = ENOTCONN; /* stop everything! */
954 			break;
955 		}
956 
957 		if (rqp->sr_state == SMBRQ_NOTSENT) {
958 			error = smb_iod_sendrq(rqp);
959 			if (error)
960 				break;
961 		}
962 
963 		if (++muxcnt == vcp->vc_maxmux) {
964 			SMBIODEBUG("muxcnt == vc_maxmux\n");
965 			break;
966 		}
967 
968 	}
969 
970 	/*
971 	 * If we have vc_maxmux requests outstanding,
972 	 * arrange for _waitrq to call _sendall as
973 	 * requests are completed.
974 	 */
975 	vcp->iod_muxfull =
976 	    (muxcnt < vcp->vc_maxmux) ? 0 : 1;
977 
978 	sema_v(&vcp->vc_sendlock);
979 	rw_exit(&vcp->iod_rqlock);
980 }
981 
982 int
983 smb_iod_vc_work(struct smb_vc *vcp, cred_t *cr)
984 {
985 	struct file *fp = NULL;
986 	int err = 0;
987 
988 	/*
989 	 * This is called by the one-and-only
990 	 * IOD thread for this VC.
991 	 */
992 	ASSERT(vcp->iod_thr == curthread);
993 
994 	/*
995 	 * Get the network transport file pointer,
996 	 * and "loan" it to our transport module.
997 	 */
998 	if ((fp = getf(vcp->vc_tran_fd)) == NULL) {
999 		err = EBADF;
1000 		goto out;
1001 	}
1002 	if ((err = SMB_TRAN_LOAN_FP(vcp, fp, cr)) != 0)
1003 		goto out;
1004 
1005 	/*
1006 	 * In case of reconnect, tell any enqueued requests
1007 	 * then can GO!
1008 	 */
1009 	SMB_VC_LOCK(vcp);
1010 	vcp->vc_genid++;	/* possibly new connection */
1011 	smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
1012 	cv_broadcast(&vcp->vc_statechg);
1013 	SMB_VC_UNLOCK(vcp);
1014 
1015 	/*
1016 	 * The above cv_broadcast should be sufficient to
1017 	 * get requests going again.
1018 	 *
1019 	 * If we have a callback function, run it.
1020 	 * Was: smb_iod_notify_connected()
1021 	 */
1022 	if (fscb && fscb->fscb_connect)
1023 		smb_vc_walkshares(vcp, fscb->fscb_connect);
1024 
1025 	/*
1026 	 * Run the "reader" loop.
1027 	 */
1028 	err = smb_iod_recvall(vcp);
1029 
1030 	/*
1031 	 * The reader loop returned, so we must have a
1032 	 * new state.  (disconnected or reconnecting)
1033 	 *
1034 	 * Notify shares of the disconnect.
1035 	 * Was: smb_iod_notify_disconnect()
1036 	 */
1037 	smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1038 
1039 	/*
1040 	 * The reader loop function returns only when
1041 	 * there's been an error on the connection, or
1042 	 * this VC has no more references.  It also
1043 	 * updates the state before it returns.
1044 	 *
1045 	 * Tell any requests to give up or restart.
1046 	 */
1047 	smb_iod_invrq(vcp);
1048 
1049 out:
1050 	/* Recall the file descriptor loan. */
1051 	(void) SMB_TRAN_LOAN_FP(vcp, NULL, cr);
1052 	if (fp != NULL) {
1053 		releasef(vcp->vc_tran_fd);
1054 	}
1055 
1056 	return (err);
1057 }
1058 
1059 /*
1060  * Wait around for someone to ask to use this VC.
1061  * If the VC has only the IOD reference, then
1062  * wait only a minute or so, then drop it.
1063  */
1064 int
1065 smb_iod_vc_idle(struct smb_vc *vcp)
1066 {
1067 	clock_t tr, tmo;
1068 	int err = 0;
1069 
1070 	/*
1071 	 * This is called by the one-and-only
1072 	 * IOD thread for this VC.
1073 	 */
1074 	ASSERT(vcp->iod_thr == curthread);
1075 
1076 	SMB_VC_LOCK(vcp);
1077 	while (vcp->vc_state == SMBIOD_ST_IDLE) {
1078 		tmo = lbolt + SEC_TO_TICK(15);
1079 		tr = cv_timedwait_sig(&vcp->iod_idle, &vcp->vc_lock, tmo);
1080 		if (tr == 0) {
1081 			err = EINTR;
1082 			break;
1083 		}
1084 		if (tr < 0) {
1085 			/* timeout */
1086 			if (vcp->vc_co.co_usecount == 1) {
1087 				/* Let this IOD terminate. */
1088 				smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
1089 				/* nobody to cv_broadcast */
1090 				break;
1091 			}
1092 		}
1093 	}
1094 	SMB_VC_UNLOCK(vcp);
1095 
1096 	return (err);
1097 }
1098 
1099 /*
1100  * After a failed reconnect attempt, smbiod will
1101  * call this to make current requests error out.
1102  */
1103 int
1104 smb_iod_vc_rcfail(struct smb_vc *vcp)
1105 {
1106 	clock_t tr, tmo;
1107 	int err = 0;
1108 
1109 	/*
1110 	 * This is called by the one-and-only
1111 	 * IOD thread for this VC.
1112 	 */
1113 	ASSERT(vcp->iod_thr == curthread);
1114 
1115 	if (vcp->vc_state != SMBIOD_ST_RECONNECT)
1116 		return (EINVAL);
1117 
1118 	SMB_VC_LOCK(vcp);
1119 
1120 	smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1121 	cv_broadcast(&vcp->vc_statechg);
1122 
1123 	/*
1124 	 * Short wait here for two reasons:
1125 	 * (1) Give requests a chance to error out.
1126 	 * (2) Prevent immediate retry.
1127 	 */
1128 	tmo = lbolt + SEC_TO_TICK(5);
1129 	tr = cv_timedwait_sig(&vcp->iod_idle, &vcp->vc_lock, tmo);
1130 	if (tr == 0)
1131 		err = EINTR;
1132 
1133 	smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1134 	cv_broadcast(&vcp->vc_statechg);
1135 
1136 	SMB_VC_UNLOCK(vcp);
1137 
1138 	return (err);
1139 }
1140 
1141 /*
1142  * Ask the IOD to reconnect (if not already underway)
1143  * then wait for the reconnect to finish.
1144  */
1145 int
1146 smb_iod_reconnect(struct smb_vc *vcp)
1147 {
1148 	int err = 0, rv;
1149 
1150 	SMB_VC_LOCK(vcp);
1151 again:
1152 	switch (vcp->vc_state) {
1153 
1154 	case SMBIOD_ST_IDLE:
1155 		smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1156 		cv_signal(&vcp->iod_idle);
1157 		/* FALLTHROUGH */
1158 
1159 	case SMBIOD_ST_RECONNECT:
1160 		rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1161 		if (rv == 0) {
1162 			err = EINTR;
1163 			break;
1164 		}
1165 		goto again;
1166 
1167 	case SMBIOD_ST_VCACTIVE:
1168 		err = 0; /* success! */
1169 		break;
1170 
1171 	case SMBIOD_ST_RCFAILED:
1172 	case SMBIOD_ST_DEAD:
1173 	default:
1174 		err = ENOTCONN;
1175 		break;
1176 	}
1177 
1178 	SMB_VC_UNLOCK(vcp);
1179 	return (err);
1180 }
1181