xref: /titanic_41/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c (revision a10288d14ec55ff6ac2e030182a5a15586f9aa1a)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 #ifdef DEBUG
41 /* See sys/queue.h */
42 #define	QUEUEDEBUG 1
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/atomic.h>
48 #include <sys/proc.h>
49 #include <sys/thread.h>
50 #include <sys/file.h>
51 #include <sys/kmem.h>
52 #include <sys/unistd.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/types.h>
56 #include <sys/ddi.h>
57 #include <sys/sunddi.h>
58 #include <sys/stream.h>
59 #include <sys/strsun.h>
60 #include <sys/time.h>
61 #include <sys/class.h>
62 #include <sys/disp.h>
63 #include <sys/cmn_err.h>
64 #include <sys/zone.h>
65 #include <sys/sdt.h>
66 
67 #include <netsmb/smb_osdep.h>
68 
69 #include <netsmb/smb.h>
70 #include <netsmb/smb_conn.h>
71 #include <netsmb/smb_rq.h>
72 #include <netsmb/smb_subr.h>
73 #include <netsmb/smb_tran.h>
74 #include <netsmb/smb_trantcp.h>
75 
76 int smb_iod_send_echo(smb_vc_t *);
77 
78 /*
79  * This is set/cleared when smbfs loads/unloads
80  * No locks should be necessary, because smbfs
81  * can't unload until all the mounts are gone.
82  */
83 static smb_fscb_t *fscb;
84 void
smb_fscb_set(smb_fscb_t * cb)85 smb_fscb_set(smb_fscb_t *cb)
86 {
87 	fscb = cb;
88 }
89 
90 static void
smb_iod_share_disconnected(smb_share_t * ssp)91 smb_iod_share_disconnected(smb_share_t *ssp)
92 {
93 
94 	smb_share_invalidate(ssp);
95 
96 	/* smbfs_dead() */
97 	if (fscb && fscb->fscb_disconn) {
98 		fscb->fscb_disconn(ssp);
99 	}
100 }
101 
102 /*
103  * State changes are important and infrequent.
104  * Make them easily observable via dtrace.
105  */
106 void
smb_iod_newstate(struct smb_vc * vcp,int state)107 smb_iod_newstate(struct smb_vc *vcp, int state)
108 {
109 	vcp->vc_state = state;
110 }
111 
112 /* Lock Held version of the next function. */
113 static inline void
smb_iod_rqprocessed_LH(struct smb_rq * rqp,int error,int flags)114 smb_iod_rqprocessed_LH(
115 	struct smb_rq *rqp,
116 	int error,
117 	int flags)
118 {
119 	rqp->sr_flags |= flags;
120 	rqp->sr_lerror = error;
121 	rqp->sr_rpgen++;
122 	rqp->sr_state = SMBRQ_NOTIFIED;
123 	cv_broadcast(&rqp->sr_cond);
124 }
125 
126 static void
smb_iod_rqprocessed(struct smb_rq * rqp,int error,int flags)127 smb_iod_rqprocessed(
128 	struct smb_rq *rqp,
129 	int error,
130 	int flags)
131 {
132 
133 	SMBRQ_LOCK(rqp);
134 	smb_iod_rqprocessed_LH(rqp, error, flags);
135 	SMBRQ_UNLOCK(rqp);
136 }
137 
138 static void
smb_iod_invrq(struct smb_vc * vcp)139 smb_iod_invrq(struct smb_vc *vcp)
140 {
141 	struct smb_rq *rqp;
142 
143 	/*
144 	 * Invalidate all outstanding requests for this connection
145 	 */
146 	rw_enter(&vcp->iod_rqlock, RW_READER);
147 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
148 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
149 	}
150 	rw_exit(&vcp->iod_rqlock);
151 }
152 
153 /*
154  * Called by smb_vc_rele, smb_vc_kill, and by the driver
155  * close entry point if the IOD closes its dev handle.
156  *
157  * Forcibly kill the connection and IOD.
158  */
159 void
smb_iod_disconnect(struct smb_vc * vcp)160 smb_iod_disconnect(struct smb_vc *vcp)
161 {
162 
163 	/*
164 	 * Inform everyone of the state change.
165 	 */
166 	SMB_VC_LOCK(vcp);
167 	if (vcp->vc_state != SMBIOD_ST_DEAD) {
168 		smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
169 		cv_broadcast(&vcp->vc_statechg);
170 	}
171 	SMB_VC_UNLOCK(vcp);
172 
173 	/*
174 	 * Let's be safe here and avoid doing any
175 	 * call across the network while trying to
176 	 * shut things down.  If we just disconnect,
177 	 * the server will take care of the logoff.
178 	 */
179 	SMB_TRAN_DISCONNECT(vcp);
180 
181 	/*
182 	 * If we have an IOD, it should immediately notice
183 	 * that its connection has closed.  But in case
184 	 * it doesn't, let's also send it a signal.
185 	 */
186 	SMB_VC_LOCK(vcp);
187 	if (vcp->iod_thr != NULL &&
188 	    vcp->iod_thr != curthread) {
189 		tsignal(vcp->iod_thr, SIGKILL);
190 	}
191 	SMB_VC_UNLOCK(vcp);
192 }
193 
194 /*
195  * Send one request.
196  *
197  * Called by _addrq (for internal requests)
198  * and _sendall (via _addrq, _multirq, _waitrq)
199  */
200 static int
smb_iod_sendrq(struct smb_rq * rqp)201 smb_iod_sendrq(struct smb_rq *rqp)
202 {
203 	struct smb_vc *vcp = rqp->sr_vc;
204 	mblk_t *m;
205 	int error;
206 
207 	ASSERT(vcp);
208 	ASSERT(SEMA_HELD(&vcp->vc_sendlock));
209 	ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
210 
211 	/*
212 	 * Note: Anything special for SMBR_INTERNAL here?
213 	 */
214 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
215 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
216 		return (ENOTCONN);
217 	}
218 
219 
220 	/*
221 	 * On the first send, set the MID and (maybe)
222 	 * the signing sequence numbers.  The increments
223 	 * here are serialized by vc_sendlock
224 	 */
225 	if (rqp->sr_sendcnt == 0) {
226 
227 		rqp->sr_mid = vcp->vc_next_mid++;
228 
229 		if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
230 			/*
231 			 * We're signing requests and verifying
232 			 * signatures on responses.  Set the
233 			 * sequence numbers of the request and
234 			 * response here, used in smb_rq_verify.
235 			 */
236 			rqp->sr_seqno = vcp->vc_next_seq++;
237 			rqp->sr_rseqno = vcp->vc_next_seq++;
238 		}
239 
240 		/* Fill in UID, TID, MID, etc. */
241 		smb_rq_fillhdr(rqp);
242 
243 		/*
244 		 * Sign the message now that we're finally done
245 		 * filling in the SMB header fields, etc.
246 		 */
247 		if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
248 			smb_rq_sign(rqp);
249 		}
250 	}
251 	if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
252 		smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
253 		/*
254 		 * If all attempts to send a request failed, then
255 		 * something is seriously hosed.
256 		 */
257 		return (ENOTCONN);
258 	}
259 
260 	/*
261 	 * Replaced m_copym() with Solaris copymsg() which does the same
262 	 * work when we want to do a M_COPYALL.
263 	 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
264 	 */
265 	m = copymsg(rqp->sr_rq.mb_top);
266 
267 #ifdef DTRACE_PROBE
268 	DTRACE_PROBE2(smb_iod_sendrq,
269 	    (smb_rq_t *), rqp, (mblk_t *), m);
270 #else
271 	SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
272 #endif
273 	m_dumpm(m);
274 
275 	if (m != NULL) {
276 		error = SMB_TRAN_SEND(vcp, m);
277 		m = 0; /* consumed by SEND */
278 	} else
279 		error = ENOBUFS;
280 
281 	rqp->sr_lerror = error;
282 	if (error == 0) {
283 		SMBRQ_LOCK(rqp);
284 		rqp->sr_flags |= SMBR_SENT;
285 		rqp->sr_state = SMBRQ_SENT;
286 		if (rqp->sr_flags & SMBR_SENDWAIT)
287 			cv_broadcast(&rqp->sr_cond);
288 		SMBRQ_UNLOCK(rqp);
289 		return (0);
290 	}
291 	/*
292 	 * Check for fatal errors
293 	 */
294 	if (SMB_TRAN_FATAL(vcp, error)) {
295 		/*
296 		 * No further attempts should be made
297 		 */
298 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
299 		return (ENOTCONN);
300 	}
301 	if (error)
302 		SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
303 
304 #ifdef APPLE
305 	/* If proc waiting on rqp was signaled... */
306 	if (smb_rq_intr(rqp))
307 		smb_iod_rqprocessed(rqp, EINTR, 0);
308 #endif
309 
310 	return (0);
311 }
312 
313 static int
smb_iod_recv1(struct smb_vc * vcp,mblk_t ** mpp)314 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
315 {
316 	mblk_t *m;
317 	uchar_t *hp;
318 	int error;
319 
320 top:
321 	m = NULL;
322 	error = SMB_TRAN_RECV(vcp, &m);
323 	if (error == EAGAIN)
324 		goto top;
325 	if (error)
326 		return (error);
327 	ASSERT(m);
328 
329 	m = m_pullup(m, SMB_HDRLEN);
330 	if (m == NULL) {
331 		return (ENOSR);
332 	}
333 
334 	/*
335 	 * Check the SMB header
336 	 */
337 	hp = mtod(m, uchar_t *);
338 	if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
339 		m_freem(m);
340 		return (EPROTO);
341 	}
342 
343 	*mpp = m;
344 	return (0);
345 }
346 
347 /*
348  * Process incoming packets
349  *
350  * This is the "reader" loop, run by the IOD thread
351  * while in state SMBIOD_ST_VCACTIVE.  The loop now
352  * simply blocks in the socket recv until either a
353  * message arrives, or a disconnect.
354  *
355  * Any non-zero error means the IOD should terminate.
356  */
357 int
smb_iod_recvall(struct smb_vc * vcp)358 smb_iod_recvall(struct smb_vc *vcp)
359 {
360 	struct smb_rq *rqp;
361 	mblk_t *m;
362 	uchar_t *hp;
363 	ushort_t mid;
364 	int error = 0;
365 	int etime_count = 0; /* for "server not responding", etc. */
366 
367 	for (;;) {
368 		/*
369 		 * Check whether someone "killed" this VC,
370 		 * or is asking the IOD to terminate.
371 		 */
372 
373 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
374 			SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
375 			error = 0;
376 			break;
377 		}
378 
379 		if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
380 			SMBIODEBUG("SHUTDOWN set\n");
381 			/* This IOD thread will terminate. */
382 			SMB_VC_LOCK(vcp);
383 			smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
384 			cv_broadcast(&vcp->vc_statechg);
385 			SMB_VC_UNLOCK(vcp);
386 			error = EINTR;
387 			break;
388 		}
389 
390 		m = NULL;
391 		error = smb_iod_recv1(vcp, &m);
392 
393 		if (error == ETIME &&
394 		    vcp->iod_rqlist.tqh_first != NULL) {
395 			/*
396 			 * Nothing received for 15 seconds and
397 			 * we have requests in the queue.
398 			 */
399 			etime_count++;
400 
401 			/*
402 			 * Once, at 15 sec. notify callbacks
403 			 * and print the warning message.
404 			 */
405 			if (etime_count == 1) {
406 				/* Was: smb_iod_notify_down(vcp); */
407 				if (fscb && fscb->fscb_down)
408 					smb_vc_walkshares(vcp,
409 					    fscb->fscb_down);
410 				zprintf(vcp->vc_zoneid,
411 				    "SMB server %s not responding\n",
412 				    vcp->vc_srvname);
413 			}
414 
415 			/*
416 			 * At 30 sec. try sending an echo, and then
417 			 * once a minute thereafter.
418 			 */
419 			if ((etime_count & 3) == 2) {
420 				(void) smb_iod_send_echo(vcp);
421 			}
422 
423 			continue;
424 		} /* ETIME && requests in queue */
425 
426 		if (error == ETIME) {
427 			/*
428 			 * If the IOD thread holds the last reference
429 			 * to this VC, let the IOD thread terminate.
430 			 */
431 			if (vcp->vc_co.co_usecount > 1)
432 				continue;
433 			SMB_VC_LOCK(vcp);
434 			if (vcp->vc_co.co_usecount == 1) {
435 				smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
436 				SMB_VC_UNLOCK(vcp);
437 				error = 0;
438 				break;
439 			}
440 			SMB_VC_UNLOCK(vcp);
441 			continue;
442 		} /* error == ETIME */
443 
444 		if (error) {
445 			/*
446 			 * The recv. above returned some error
447 			 * we can't continue from i.e. ENOTCONN.
448 			 * It's dangerous to continue here.
449 			 * (possible infinite loop!)
450 			 *
451 			 * If we have requests enqueued, next
452 			 * state is reconnecting, else idle.
453 			 */
454 			int state;
455 			SMB_VC_LOCK(vcp);
456 			state = (vcp->iod_rqlist.tqh_first != NULL) ?
457 			    SMBIOD_ST_RECONNECT : SMBIOD_ST_IDLE;
458 			smb_iod_newstate(vcp, state);
459 			cv_broadcast(&vcp->vc_statechg);
460 			SMB_VC_UNLOCK(vcp);
461 			error = 0;
462 			break;
463 		}
464 
465 		/*
466 		 * Received something.  Yea!
467 		 */
468 		if (etime_count) {
469 			etime_count = 0;
470 
471 			zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
472 			    vcp->vc_srvname);
473 
474 			/* Was: smb_iod_notify_up(vcp); */
475 			if (fscb && fscb->fscb_up)
476 				smb_vc_walkshares(vcp, fscb->fscb_up);
477 		}
478 
479 		/*
480 		 * Have an SMB packet.  The SMB header was
481 		 * checked in smb_iod_recv1().
482 		 * Find the request...
483 		 */
484 		hp = mtod(m, uchar_t *);
485 		/*LINTED*/
486 		mid = letohs(SMB_HDRMID(hp));
487 		SMBIODEBUG("mid %04x\n", (uint_t)mid);
488 
489 		rw_enter(&vcp->iod_rqlock, RW_READER);
490 		TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
491 
492 			if (rqp->sr_mid != mid)
493 				continue;
494 
495 			DTRACE_PROBE2(smb_iod_recvrq,
496 			    (smb_rq_t *), rqp, (mblk_t *), m);
497 			m_dumpm(m);
498 
499 			SMBRQ_LOCK(rqp);
500 			if (rqp->sr_rp.md_top == NULL) {
501 				md_initm(&rqp->sr_rp, m);
502 			} else {
503 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
504 					md_append_record(&rqp->sr_rp, m);
505 				} else {
506 					SMBRQ_UNLOCK(rqp);
507 					SMBSDEBUG("duplicate response %d "
508 					    "(ignored)\n", mid);
509 					break;
510 				}
511 			}
512 			smb_iod_rqprocessed_LH(rqp, 0, 0);
513 			SMBRQ_UNLOCK(rqp);
514 			break;
515 		}
516 
517 		if (rqp == NULL) {
518 			int cmd = SMB_HDRCMD(hp);
519 
520 			if (cmd != SMB_COM_ECHO)
521 				SMBSDEBUG("drop resp: mid %d, cmd %d\n",
522 				    (uint_t)mid, cmd);
523 /*			smb_printrqlist(vcp); */
524 			m_freem(m);
525 		}
526 		rw_exit(&vcp->iod_rqlock);
527 
528 	}
529 
530 	return (error);
531 }
532 
533 /*
534  * The IOD receiver thread has requests pending and
535  * has not received anything in a while.  Try to
536  * send an SMB echo request.  It's tricky to do a
537  * send from the IOD thread because we can't block.
538  *
539  * Using tmo=SMBNOREPLYWAIT in the request
540  * so smb_rq_reply will skip smb_iod_waitrq.
541  * The smb_smb_echo call uses SMBR_INTERNAL
542  * to avoid calling smb_iod_sendall().
543  */
544 int
smb_iod_send_echo(smb_vc_t * vcp)545 smb_iod_send_echo(smb_vc_t *vcp)
546 {
547 	smb_cred_t scred;
548 	int err;
549 
550 	smb_credinit(&scred, NULL);
551 	err = smb_smb_echo(vcp, &scred, SMBNOREPLYWAIT);
552 	smb_credrele(&scred);
553 	return (err);
554 }
555 
556 /*
557  * The IOD thread is now just a "reader",
558  * so no more smb_iod_request().  Yea!
559  */
560 
561 /*
562  * Place request in the queue, and send it now if possible.
563  * Called with no locks held.
564  */
565 int
smb_iod_addrq(struct smb_rq * rqp)566 smb_iod_addrq(struct smb_rq *rqp)
567 {
568 	struct smb_vc *vcp = rqp->sr_vc;
569 	int error, save_newrq;
570 
571 	ASSERT(rqp->sr_cred);
572 
573 	/*
574 	 * State should be correct after the check in
575 	 * smb_rq_enqueue(), but we dropped locks...
576 	 */
577 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
578 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
579 		return (ENOTCONN);
580 	}
581 
582 	/*
583 	 * Requests from the IOD itself are marked _INTERNAL,
584 	 * and get some special treatment to avoid blocking
585 	 * the reader thread (so we don't deadlock).
586 	 * The request is not yet on the queue, so we can
587 	 * modify it's state here without locks.
588 	 * Only thing using this now is ECHO.
589 	 */
590 	rqp->sr_owner = curthread;
591 	if (rqp->sr_owner == vcp->iod_thr) {
592 		rqp->sr_flags |= SMBR_INTERNAL;
593 
594 		/*
595 		 * This is a request from the IOD thread.
596 		 * Always send directly from this thread.
597 		 * Note lock order: iod_rqlist, vc_sendlock
598 		 */
599 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
600 		TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
601 		rw_downgrade(&vcp->iod_rqlock);
602 
603 		/*
604 		 * Note: iod_sendrq expects vc_sendlock,
605 		 * so take that here, but carefully:
606 		 * Never block the IOD thread here.
607 		 */
608 		if (sema_tryp(&vcp->vc_sendlock) == 0) {
609 			SMBIODEBUG("sendlock busy\n");
610 			error = EAGAIN;
611 		} else {
612 			/* Have vc_sendlock */
613 			error = smb_iod_sendrq(rqp);
614 			sema_v(&vcp->vc_sendlock);
615 		}
616 
617 		rw_exit(&vcp->iod_rqlock);
618 
619 		/*
620 		 * In the non-error case, _removerq
621 		 * is done by either smb_rq_reply
622 		 * or smb_iod_waitrq.
623 		 */
624 		if (error)
625 			smb_iod_removerq(rqp);
626 
627 		return (error);
628 	}
629 
630 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
631 
632 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
633 	/* iod_rqlock/WRITER protects iod_newrq */
634 	save_newrq = vcp->iod_newrq;
635 	vcp->iod_newrq++;
636 
637 	rw_exit(&vcp->iod_rqlock);
638 
639 	/*
640 	 * Now send any requests that need to be sent,
641 	 * including the one we just put on the list.
642 	 * Only the thread that found iod_newrq==0
643 	 * needs to run the send loop.
644 	 */
645 	if (save_newrq == 0)
646 		smb_iod_sendall(vcp);
647 
648 	return (0);
649 }
650 
651 /*
652  * Mark an SMBR_MULTIPACKET request as
653  * needing another send.  Similar to the
654  * "normal" part of smb_iod_addrq.
655  */
656 int
smb_iod_multirq(struct smb_rq * rqp)657 smb_iod_multirq(struct smb_rq *rqp)
658 {
659 	struct smb_vc *vcp = rqp->sr_vc;
660 	int save_newrq;
661 
662 	ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
663 
664 	if (rqp->sr_flags & SMBR_INTERNAL)
665 		return (EINVAL);
666 
667 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
668 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
669 		return (ENOTCONN);
670 	}
671 
672 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
673 
674 	/* Already on iod_rqlist, just reset state. */
675 	rqp->sr_state = SMBRQ_NOTSENT;
676 
677 	/* iod_rqlock/WRITER protects iod_newrq */
678 	save_newrq = vcp->iod_newrq;
679 	vcp->iod_newrq++;
680 
681 	rw_exit(&vcp->iod_rqlock);
682 
683 	/*
684 	 * Now send any requests that need to be sent,
685 	 * including the one we just marked NOTSENT.
686 	 * Only the thread that found iod_newrq==0
687 	 * needs to run the send loop.
688 	 */
689 	if (save_newrq == 0)
690 		smb_iod_sendall(vcp);
691 
692 	return (0);
693 }
694 
695 
696 void
smb_iod_removerq(struct smb_rq * rqp)697 smb_iod_removerq(struct smb_rq *rqp)
698 {
699 	struct smb_vc *vcp = rqp->sr_vc;
700 
701 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
702 #ifdef QUEUEDEBUG
703 	/*
704 	 * Make sure we have not already removed it.
705 	 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
706 	 * XXX: Don't like the constant 1 here...
707 	 */
708 	ASSERT(rqp->sr_link.tqe_next != (void *)1L);
709 #endif
710 	TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
711 	rw_exit(&vcp->iod_rqlock);
712 }
713 
714 
715 
716 /*
717  * Wait for a request to complete.
718  *
719  * For normal requests, we need to deal with
720  * ioc_muxcnt dropping below vc_maxmux by
721  * making arrangements to send more...
722  */
723 int
smb_iod_waitrq(struct smb_rq * rqp)724 smb_iod_waitrq(struct smb_rq *rqp)
725 {
726 	struct smb_vc *vcp = rqp->sr_vc;
727 	clock_t tr, tmo1, tmo2;
728 	int error, rc;
729 
730 	if (rqp->sr_flags & SMBR_INTERNAL) {
731 		ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
732 		smb_iod_removerq(rqp);
733 		return (EAGAIN);
734 	}
735 
736 	/*
737 	 * Make sure this is NOT the IOD thread,
738 	 * or the wait below will stop the reader.
739 	 */
740 	ASSERT(curthread != vcp->iod_thr);
741 
742 	SMBRQ_LOCK(rqp);
743 
744 	/*
745 	 * First, wait for the request to be sent.  Normally the send
746 	 * has already happened by the time we get here.  However, if
747 	 * we have more than maxmux entries in the request list, our
748 	 * request may not be sent until other requests complete.
749 	 * The wait in this case is due to local I/O demands, so
750 	 * we don't want the server response timeout to apply.
751 	 *
752 	 * If a request is allowed to interrupt this wait, then the
753 	 * request is cancelled and never sent OTW.  Some kinds of
754 	 * requests should never be cancelled (i.e. close) and those
755 	 * are marked SMBR_NOINTR_SEND so they either go eventually,
756 	 * or a connection close will terminate them with ENOTCONN.
757 	 */
758 	while (rqp->sr_state == SMBRQ_NOTSENT) {
759 		rqp->sr_flags |= SMBR_SENDWAIT;
760 		if (rqp->sr_flags & SMBR_NOINTR_SEND) {
761 			cv_wait(&rqp->sr_cond, &rqp->sr_lock);
762 			rc = 1;
763 		} else
764 			rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
765 		rqp->sr_flags &= ~SMBR_SENDWAIT;
766 		if (rc == 0) {
767 			SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp);
768 			error = EINTR;
769 			goto out;
770 		}
771 	}
772 
773 	/*
774 	 * The request has been sent.  Now wait for the response,
775 	 * with the timeout specified for this request.
776 	 * Compute all the deadlines now, so we effectively
777 	 * start the timer(s) after the request is sent.
778 	 */
779 	if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
780 		tmo1 = SEC_TO_TICK(smb_timo_notice);
781 	else
782 		tmo1 = 0;
783 	tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
784 
785 	/*
786 	 * As above, we don't want to allow interrupt for some
787 	 * requests like open, because we could miss a succesful
788 	 * response and therefore "leak" a FID.  Such requests
789 	 * are marked SMBR_NOINTR_RECV to prevent that.
790 	 *
791 	 * If "slow server" warnings are enabled, wait first
792 	 * for the "notice" timeout, and warn if expired.
793 	 */
794 	if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
795 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
796 			tr = cv_reltimedwait(&rqp->sr_cond,
797 			    &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
798 		else
799 			tr = cv_reltimedwait_sig(&rqp->sr_cond,
800 			    &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
801 		if (tr == 0) {
802 			error = EINTR;
803 			goto out;
804 		}
805 		if (tr < 0) {
806 #ifdef DTRACE_PROBE
807 			DTRACE_PROBE1(smb_iod_waitrq1,
808 			    (smb_rq_t *), rqp);
809 #endif
810 #ifdef NOT_YET
811 			/* Want this to go ONLY to the user. */
812 			uprintf("SMB server %s has not responded"
813 			    " to request %d after %d seconds..."
814 			    " (still waiting).\n", vcp->vc_srvname,
815 			    rqp->sr_mid, smb_timo_notice);
816 #endif
817 		}
818 	}
819 
820 	/*
821 	 * Keep waiting until tmo2 is expired.
822 	 */
823 	while (rqp->sr_rpgen == rqp->sr_rplast) {
824 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
825 			tr = cv_timedwait(&rqp->sr_cond,
826 			    &rqp->sr_lock, tmo2);
827 		else
828 			tr = cv_timedwait_sig(&rqp->sr_cond,
829 			    &rqp->sr_lock, tmo2);
830 		if (tr == 0) {
831 			error = EINTR;
832 			goto out;
833 		}
834 		if (tr < 0) {
835 #ifdef DTRACE_PROBE
836 			DTRACE_PROBE1(smb_iod_waitrq2,
837 			    (smb_rq_t *), rqp);
838 #endif
839 #ifdef NOT_YET
840 			/* Want this to go ONLY to the user. */
841 			uprintf("SMB server %s has not responded"
842 			    " to request %d after %d seconds..."
843 			    " (giving up).\n", vcp->vc_srvname,
844 			    rqp->sr_mid, rqp->sr_timo);
845 #endif
846 			error = ETIME;
847 			goto out;
848 		}
849 		/* got wakeup */
850 	}
851 	error = rqp->sr_lerror;
852 	rqp->sr_rplast++;
853 
854 out:
855 	SMBRQ_UNLOCK(rqp);
856 
857 	/*
858 	 * MULTIPACKET request must stay in the list.
859 	 * They may need additional responses.
860 	 */
861 	if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
862 		smb_iod_removerq(rqp);
863 
864 	/*
865 	 * Some request has been completed.
866 	 * If we reached the mux limit,
867 	 * re-run the send loop...
868 	 */
869 	if (vcp->iod_muxfull)
870 		smb_iod_sendall(vcp);
871 
872 	return (error);
873 }
874 
875 /*
876  * Shutdown all outstanding I/O requests on the specified share with
877  * ENXIO; used when unmounting a share.  (There shouldn't be any for a
878  * non-forced unmount; if this is a forced unmount, we have to shutdown
879  * the requests as part of the unmount process.)
880  */
881 void
smb_iod_shutdown_share(struct smb_share * ssp)882 smb_iod_shutdown_share(struct smb_share *ssp)
883 {
884 	struct smb_vc *vcp = SSTOVC(ssp);
885 	struct smb_rq *rqp;
886 
887 	/*
888 	 * Loop through the list of requests and shutdown the ones
889 	 * that are for the specified share.
890 	 */
891 	rw_enter(&vcp->iod_rqlock, RW_READER);
892 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
893 		if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
894 			smb_iod_rqprocessed(rqp, EIO, 0);
895 	}
896 	rw_exit(&vcp->iod_rqlock);
897 }
898 
899 /*
900  * Send all requests that need sending.
901  * Called from _addrq, _multirq, _waitrq
902  */
903 void
smb_iod_sendall(smb_vc_t * vcp)904 smb_iod_sendall(smb_vc_t *vcp)
905 {
906 	struct smb_rq *rqp;
907 	int error, muxcnt;
908 
909 	/*
910 	 * Clear "newrq" to make sure threads adding
911 	 * new requests will run this function again.
912 	 */
913 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
914 	vcp->iod_newrq = 0;
915 
916 	/*
917 	 * We only read iod_rqlist, so downgrade rwlock.
918 	 * This allows the IOD to handle responses while
919 	 * some requesting thread may be blocked in send.
920 	 */
921 	rw_downgrade(&vcp->iod_rqlock);
922 
923 	/*
924 	 * Serialize to prevent multiple senders.
925 	 * Note lock order: iod_rqlock, vc_sendlock
926 	 */
927 	sema_p(&vcp->vc_sendlock);
928 
929 	/*
930 	 * Walk the list of requests and send when possible.
931 	 * We avoid having more than vc_maxmux requests
932 	 * outstanding to the server by traversing only
933 	 * vc_maxmux entries into this list.  Simple!
934 	 */
935 	ASSERT(vcp->vc_maxmux > 0);
936 	error = muxcnt = 0;
937 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
938 
939 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
940 			error = ENOTCONN; /* stop everything! */
941 			break;
942 		}
943 
944 		if (rqp->sr_state == SMBRQ_NOTSENT) {
945 			error = smb_iod_sendrq(rqp);
946 			if (error)
947 				break;
948 		}
949 
950 		if (++muxcnt == vcp->vc_maxmux) {
951 			SMBIODEBUG("muxcnt == vc_maxmux\n");
952 			break;
953 		}
954 
955 	}
956 
957 	/*
958 	 * If we have vc_maxmux requests outstanding,
959 	 * arrange for _waitrq to call _sendall as
960 	 * requests are completed.
961 	 */
962 	vcp->iod_muxfull =
963 	    (muxcnt < vcp->vc_maxmux) ? 0 : 1;
964 
965 	sema_v(&vcp->vc_sendlock);
966 	rw_exit(&vcp->iod_rqlock);
967 }
968 
969 int
smb_iod_vc_work(struct smb_vc * vcp,cred_t * cr)970 smb_iod_vc_work(struct smb_vc *vcp, cred_t *cr)
971 {
972 	struct file *fp = NULL;
973 	int err = 0;
974 
975 	/*
976 	 * This is called by the one-and-only
977 	 * IOD thread for this VC.
978 	 */
979 	ASSERT(vcp->iod_thr == curthread);
980 
981 	/*
982 	 * Get the network transport file pointer,
983 	 * and "loan" it to our transport module.
984 	 */
985 	if ((fp = getf(vcp->vc_tran_fd)) == NULL) {
986 		err = EBADF;
987 		goto out;
988 	}
989 	if ((err = SMB_TRAN_LOAN_FP(vcp, fp, cr)) != 0)
990 		goto out;
991 
992 	/*
993 	 * In case of reconnect, tell any enqueued requests
994 	 * then can GO!
995 	 */
996 	SMB_VC_LOCK(vcp);
997 	vcp->vc_genid++;	/* possibly new connection */
998 	smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
999 	cv_broadcast(&vcp->vc_statechg);
1000 	SMB_VC_UNLOCK(vcp);
1001 
1002 	/*
1003 	 * The above cv_broadcast should be sufficient to
1004 	 * get requests going again.
1005 	 *
1006 	 * If we have a callback function, run it.
1007 	 * Was: smb_iod_notify_connected()
1008 	 */
1009 	if (fscb && fscb->fscb_connect)
1010 		smb_vc_walkshares(vcp, fscb->fscb_connect);
1011 
1012 	/*
1013 	 * Run the "reader" loop.
1014 	 */
1015 	err = smb_iod_recvall(vcp);
1016 
1017 	/*
1018 	 * The reader loop returned, so we must have a
1019 	 * new state.  (disconnected or reconnecting)
1020 	 *
1021 	 * Notify shares of the disconnect.
1022 	 * Was: smb_iod_notify_disconnect()
1023 	 */
1024 	smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1025 
1026 	/*
1027 	 * The reader loop function returns only when
1028 	 * there's been an error on the connection, or
1029 	 * this VC has no more references.  It also
1030 	 * updates the state before it returns.
1031 	 *
1032 	 * Tell any requests to give up or restart.
1033 	 */
1034 	smb_iod_invrq(vcp);
1035 
1036 out:
1037 	/* Recall the file descriptor loan. */
1038 	(void) SMB_TRAN_LOAN_FP(vcp, NULL, cr);
1039 	if (fp != NULL) {
1040 		releasef(vcp->vc_tran_fd);
1041 	}
1042 
1043 	return (err);
1044 }
1045 
1046 /*
1047  * Wait around for someone to ask to use this VC.
1048  * If the VC has only the IOD reference, then
1049  * wait only a minute or so, then drop it.
1050  */
1051 int
smb_iod_vc_idle(struct smb_vc * vcp)1052 smb_iod_vc_idle(struct smb_vc *vcp)
1053 {
1054 	clock_t tr, delta = SEC_TO_TICK(15);
1055 	int err = 0;
1056 
1057 	/*
1058 	 * This is called by the one-and-only
1059 	 * IOD thread for this VC.
1060 	 */
1061 	ASSERT(vcp->iod_thr == curthread);
1062 
1063 	SMB_VC_LOCK(vcp);
1064 	while (vcp->vc_state == SMBIOD_ST_IDLE) {
1065 		tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1066 		    delta, TR_CLOCK_TICK);
1067 		if (tr == 0) {
1068 			err = EINTR;
1069 			break;
1070 		}
1071 		if (tr < 0) {
1072 			/* timeout */
1073 			if (vcp->vc_co.co_usecount == 1) {
1074 				/* Let this IOD terminate. */
1075 				smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
1076 				/* nobody to cv_broadcast */
1077 				break;
1078 			}
1079 		}
1080 	}
1081 	SMB_VC_UNLOCK(vcp);
1082 
1083 	return (err);
1084 }
1085 
1086 /*
1087  * After a failed reconnect attempt, smbiod will
1088  * call this to make current requests error out.
1089  */
1090 int
smb_iod_vc_rcfail(struct smb_vc * vcp)1091 smb_iod_vc_rcfail(struct smb_vc *vcp)
1092 {
1093 	clock_t tr;
1094 	int err = 0;
1095 
1096 	/*
1097 	 * This is called by the one-and-only
1098 	 * IOD thread for this VC.
1099 	 */
1100 	ASSERT(vcp->iod_thr == curthread);
1101 
1102 	if (vcp->vc_state != SMBIOD_ST_RECONNECT)
1103 		return (EINVAL);
1104 
1105 	SMB_VC_LOCK(vcp);
1106 
1107 	smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1108 	cv_broadcast(&vcp->vc_statechg);
1109 
1110 	/*
1111 	 * Short wait here for two reasons:
1112 	 * (1) Give requests a chance to error out.
1113 	 * (2) Prevent immediate retry.
1114 	 */
1115 	tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1116 	    SEC_TO_TICK(5), TR_CLOCK_TICK);
1117 	if (tr == 0)
1118 		err = EINTR;
1119 
1120 	smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1121 	cv_broadcast(&vcp->vc_statechg);
1122 
1123 	SMB_VC_UNLOCK(vcp);
1124 
1125 	return (err);
1126 }
1127 
1128 /*
1129  * Ask the IOD to reconnect (if not already underway)
1130  * then wait for the reconnect to finish.
1131  */
1132 int
smb_iod_reconnect(struct smb_vc * vcp)1133 smb_iod_reconnect(struct smb_vc *vcp)
1134 {
1135 	int err = 0, rv;
1136 
1137 	SMB_VC_LOCK(vcp);
1138 again:
1139 	switch (vcp->vc_state) {
1140 
1141 	case SMBIOD_ST_IDLE:
1142 		smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1143 		cv_signal(&vcp->iod_idle);
1144 		/* FALLTHROUGH */
1145 
1146 	case SMBIOD_ST_RECONNECT:
1147 		rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1148 		if (rv == 0) {
1149 			err = EINTR;
1150 			break;
1151 		}
1152 		goto again;
1153 
1154 	case SMBIOD_ST_VCACTIVE:
1155 		err = 0; /* success! */
1156 		break;
1157 
1158 	case SMBIOD_ST_RCFAILED:
1159 	case SMBIOD_ST_DEAD:
1160 	default:
1161 		err = ENOTCONN;
1162 		break;
1163 	}
1164 
1165 	SMB_VC_UNLOCK(vcp);
1166 	return (err);
1167 }
1168