xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_conn.c (revision 4eaa471005973e11a6110b69fe990530b3b95a38)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_conn.c,v 1.27.166.1 2005/05/27 02:35:29 lindak Exp $
33  */
34 /*
35  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
36  * Use is subject to license terms.
37  */
38 
39 /*
40  * Connection engine.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kmem.h>
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <sys/vnode.h>
49 #include <sys/stream.h>
50 #include <sys/stropts.h>
51 #include <sys/socketvar.h>
52 #include <sys/cred.h>
53 #include <sys/cred_impl.h>
54 #include <netinet/in.h>
55 #include <inet/ip.h>
56 #include <inet/ip6.h>
57 #include <sys/cmn_err.h>
58 #include <sys/thread.h>
59 #include <sys/atomic.h>
60 #include <sys/u8_textprep.h>
61 
62 #ifdef APPLE
63 #include <sys/smb_apple.h>
64 #include <sys/smb_iconv.h>
65 #else
66 #include <netsmb/smb_osdep.h>
67 #endif
68 
69 #include <netsmb/smb.h>
70 #include <netsmb/smb_conn.h>
71 #include <netsmb/smb_subr.h>
72 #include <netsmb/smb_tran.h>
73 #include <netsmb/smb_pass.h>
74 
75 static struct smb_connobj smb_vclist;
76 
77 void smb_co_init(struct smb_connobj *cp, int level, char *objname);
78 void smb_co_done(struct smb_connobj *cp);
79 void smb_co_hold(struct smb_connobj *cp);
80 void smb_co_rele(struct smb_connobj *cp);
81 void smb_co_kill(struct smb_connobj *cp);
82 
83 static void smb_vc_free(struct smb_connobj *cp);
84 static void smb_vc_gone(struct smb_connobj *cp);
85 
86 static void smb_share_free(struct smb_connobj *cp);
87 static void smb_share_gone(struct smb_connobj *cp);
88 
89 int
90 smb_sm_init(void)
91 {
92 	smb_co_init(&smb_vclist, SMBL_SM, "smbsm");
93 	return (0);
94 }
95 
96 int
97 smb_sm_idle(void)
98 {
99 	int error = 0;
100 	SMB_CO_LOCK(&smb_vclist);
101 	if (smb_vclist.co_usecount > 1) {
102 		SMBSDEBUG("%d connections still active\n",
103 		    smb_vclist.co_usecount - 1);
104 		error = EBUSY;
105 	}
106 	SMB_CO_UNLOCK(&smb_vclist);
107 	return (error);
108 }
109 
110 void
111 smb_sm_done(void)
112 {
113 	/*
114 	 * XXX Q4BP why are we not iterating on smb_vclist here?
115 	 * Because the caller has just called smb_sm_idle() to
116 	 * make sure we have no VCs before calling this.
117 	 */
118 	smb_co_done(&smb_vclist);
119 }
120 
121 
122 
123 /*
124  * Common code for connection object
125  */
126 /*ARGSUSED*/
127 void
128 smb_co_init(struct smb_connobj *cp, int level, char *objname)
129 {
130 
131 	mutex_init(&cp->co_lock, objname,  MUTEX_DRIVER, NULL);
132 
133 	cp->co_level = level;
134 	cp->co_usecount = 1;
135 	SLIST_INIT(&cp->co_children);
136 }
137 
138 /*
139  * Called just before free of an object
140  * of which smb_connobj is a part, i.e.
141  * _vc_free, _share_free, also sm_done.
142  */
143 void
144 smb_co_done(struct smb_connobj *cp)
145 {
146 	ASSERT(SLIST_EMPTY(&cp->co_children));
147 	mutex_destroy(&cp->co_lock);
148 }
149 
150 static void
151 smb_co_addchild(
152 	struct smb_connobj *parent,
153 	struct smb_connobj *child)
154 {
155 
156 	/*
157 	 * Set the child's pointer to the parent.
158 	 * No references yet, so no need to lock.
159 	 */
160 	ASSERT(child->co_usecount == 1);
161 	child->co_parent = parent;
162 
163 	/*
164 	 * Add the child to the parent's list of
165 	 * children, and in-line smb_co_hold
166 	 */
167 	ASSERT(MUTEX_HELD(&parent->co_lock));
168 	parent->co_usecount++;
169 	SLIST_INSERT_HEAD(&parent->co_children, child, co_next);
170 }
171 
172 void
173 smb_co_hold(struct smb_connobj *cp)
174 {
175 	SMB_CO_LOCK(cp);
176 	cp->co_usecount++;
177 	SMB_CO_UNLOCK(cp);
178 }
179 
180 /*
181  * Called via smb_vc_rele, smb_share_rele
182  */
183 void
184 smb_co_rele(struct smb_connobj *co)
185 {
186 	struct smb_connobj *parent;
187 	int old_flags;
188 
189 	SMB_CO_LOCK(co);
190 	if (co->co_usecount > 1) {
191 		co->co_usecount--;
192 		SMB_CO_UNLOCK(co);
193 		return;
194 	}
195 	ASSERT(co->co_usecount == 1);
196 	co->co_usecount = 0;
197 
198 	/*
199 	 * This list of children should be empty now.
200 	 * Check this while we're still linked, so
201 	 * we have a better chance of debugging.
202 	 */
203 	ASSERT(SLIST_EMPTY(&co->co_children));
204 
205 	/*
206 	 * OK, this element is going away.
207 	 *
208 	 * We need to drop the lock on this CO so we can take the
209 	 * parent CO lock. The _GONE flag prevents this CO from
210 	 * getting new references before we can unlink it from the
211 	 * parent list.
212 	 *
213 	 * The _GONE flag is also used to ensure that the co_gone
214 	 * function is called only once.  Note that smb_co_kill may
215 	 * do this before we get here.  If we find that the _GONE
216 	 * flag was not already set, then call the co_gone hook
217 	 * (smb_share_gone, smb_vc_gone) which will disconnect
218 	 * the share or the VC, respectively.
219 	 *
220 	 * Note the old: smb_co_gone(co, scred);
221 	 * is now in-line here.
222 	 */
223 	old_flags = co->co_flags;
224 	co->co_flags |= SMBO_GONE;
225 	SMB_CO_UNLOCK(co);
226 
227 	if ((old_flags & SMBO_GONE) == 0 && co->co_gone)
228 		co->co_gone(co);
229 
230 	/*
231 	 * If we have a parent (only smb_vclist does not)
232 	 * then unlink from parent's list of children.
233 	 * We have the only reference to the child.
234 	 */
235 	parent = co->co_parent;
236 	if (parent) {
237 		SMB_CO_LOCK(parent);
238 		ASSERT(SLIST_FIRST(&parent->co_children));
239 		if (SLIST_FIRST(&parent->co_children)) {
240 			SLIST_REMOVE(&parent->co_children, co,
241 			    smb_connobj, co_next);
242 		}
243 		SMB_CO_UNLOCK(parent);
244 	}
245 
246 	/*
247 	 * Now it's safe to free the CO
248 	 */
249 	if (co->co_free) {
250 		co->co_free(co);
251 	}
252 
253 	/*
254 	 * Finally, if the CO had a parent, decrement
255 	 * the parent's hold count for the lost child.
256 	 */
257 	if (parent) {
258 		/*
259 		 * Recursive call here (easier for debugging).
260 		 * Can only go two levels.
261 		 */
262 		smb_co_rele(parent);
263 	}
264 }
265 
266 /*
267  * Do just the first part of what co_gone does,
268  * i.e. tree disconnect, or disconnect a VC.
269  * This is used to forcibly close things.
270  */
271 void
272 smb_co_kill(struct smb_connobj *co)
273 {
274 	int old_flags;
275 
276 	SMB_CO_LOCK(co);
277 	old_flags = co->co_flags;
278 	co->co_flags |= SMBO_GONE;
279 	SMB_CO_UNLOCK(co);
280 
281 	/*
282 	 * Do the same "call only once" logic here as in
283 	 * smb_co_rele, though it's probably not possible
284 	 * for this to be called after smb_co_rele.
285 	 */
286 	if ((old_flags & SMBO_GONE) == 0 && co->co_gone)
287 		co->co_gone(co);
288 
289 	/* XXX: Walk list of children and kill those too? */
290 }
291 
292 
293 /*
294  * Session objects, which are referred to as "VC" for
295  * "virtual cirtuit". This has nothing to do with the
296  * CIFS notion of a "virtual cirtuit".  See smb_conn.h
297  */
298 
299 void
300 smb_vc_hold(struct smb_vc *vcp)
301 {
302 	smb_co_hold(VCTOCP(vcp));
303 }
304 
305 void
306 smb_vc_rele(struct smb_vc *vcp)
307 {
308 	smb_co_rele(VCTOCP(vcp));
309 }
310 
311 void
312 smb_vc_kill(struct smb_vc *vcp)
313 {
314 	smb_co_kill(VCTOCP(vcp));
315 }
316 
317 /*
318  * Normally called via smb_vc_rele()
319  * after co_usecount drops to zero.
320  * Also called via: smb_vc_kill()
321  *
322  * Shutdown the VC to this server,
323  * invalidate shares linked with it.
324  */
325 /*ARGSUSED*/
326 static void
327 smb_vc_gone(struct smb_connobj *cp)
328 {
329 	struct smb_vc *vcp = CPTOVC(cp);
330 
331 	/*
332 	 * Was smb_vc_disconnect(vcp);
333 	 */
334 	smb_iod_disconnect(vcp);
335 }
336 
337 /*
338  * The VC has no more references.  Free it.
339  * No locks needed here.
340  */
341 static void
342 smb_vc_free(struct smb_connobj *cp)
343 {
344 	struct smb_vc *vcp = CPTOVC(cp);
345 
346 	/*
347 	 * The _gone call should have emptied the request list,
348 	 * but let's make sure, as requests may have references
349 	 * to this VC without taking a hold.  (The hold is the
350 	 * responsibility of threads placing requests.)
351 	 */
352 	ASSERT(vcp->iod_rqlist.tqh_first == NULL);
353 
354 	if (vcp->vc_tdata)
355 		SMB_TRAN_DONE(vcp);
356 
357 /*
358  * We are not using the iconv routines here. So commenting them for now.
359  * REVISIT.
360  */
361 #ifdef NOTYETDEFINED
362 	if (vcp->vc_tolower)
363 		iconv_close(vcp->vc_tolower);
364 	if (vcp->vc_toupper)
365 		iconv_close(vcp->vc_toupper);
366 	if (vcp->vc_tolocal)
367 		iconv_close(vcp->vc_tolocal);
368 	if (vcp->vc_toserver)
369 		iconv_close(vcp->vc_toserver);
370 #endif
371 
372 	if (vcp->vc_mackey != NULL)
373 		kmem_free(vcp->vc_mackey, vcp->vc_mackeylen);
374 
375 	cv_destroy(&vcp->iod_idle);
376 	rw_destroy(&vcp->iod_rqlock);
377 	sema_destroy(&vcp->vc_sendlock);
378 	cv_destroy(&vcp->vc_statechg);
379 	smb_co_done(VCTOCP(vcp));
380 	kmem_free(vcp, sizeof (*vcp));
381 }
382 
383 /*ARGSUSED*/
384 int
385 smb_vc_create(smbioc_ossn_t *ossn, smb_cred_t *scred, smb_vc_t **vcpp)
386 {
387 	static char objtype[] = "smb_vc";
388 	cred_t *cr = scred->scr_cred;
389 	struct smb_vc *vcp;
390 	int error = 0;
391 
392 	ASSERT(MUTEX_HELD(&smb_vclist.co_lock));
393 
394 	vcp = kmem_zalloc(sizeof (struct smb_vc), KM_SLEEP);
395 
396 	smb_co_init(VCTOCP(vcp), SMBL_VC, objtype);
397 	vcp->vc_co.co_free = smb_vc_free;
398 	vcp->vc_co.co_gone = smb_vc_gone;
399 
400 	cv_init(&vcp->vc_statechg, objtype, CV_DRIVER, NULL);
401 	sema_init(&vcp->vc_sendlock, 1, objtype, SEMA_DRIVER, NULL);
402 	rw_init(&vcp->iod_rqlock, objtype, RW_DRIVER, NULL);
403 	cv_init(&vcp->iod_idle, objtype, CV_DRIVER, NULL);
404 
405 	/* Expanded TAILQ_HEAD_INITIALIZER */
406 	vcp->iod_rqlist.tqh_last = &vcp->iod_rqlist.tqh_first;
407 
408 	vcp->vc_state = SMBIOD_ST_IDLE;
409 
410 	/*
411 	 * These identify the connection.
412 	 */
413 	vcp->vc_zoneid = getzoneid();
414 	bcopy(ossn, &vcp->vc_ssn, sizeof (*ossn));
415 
416 	/* This fills in vcp->vc_tdata */
417 	vcp->vc_tdesc = &smb_tran_nbtcp_desc;
418 	if ((error = SMB_TRAN_CREATE(vcp, cr)) != 0)
419 		goto errout;
420 
421 	/* Success! */
422 	smb_co_addchild(&smb_vclist, VCTOCP(vcp));
423 	*vcpp = vcp;
424 	return (0);
425 
426 errout:
427 	/*
428 	 * This will destroy the new vc.
429 	 * See: smb_vc_free
430 	 */
431 	smb_vc_rele(vcp);
432 	return (error);
433 }
434 
435 /*
436  * Find or create a VC identified by the info in ossn
437  * and return it with a "hold", but not locked.
438  */
439 /*ARGSUSED*/
440 int
441 smb_vc_findcreate(smbioc_ossn_t *ossn, smb_cred_t *scred, smb_vc_t **vcpp)
442 {
443 	struct smb_connobj *co;
444 	struct smb_vc *vcp;
445 	smbioc_ssn_ident_t *vc_id;
446 	int error;
447 	zoneid_t zoneid = getzoneid();
448 
449 	*vcpp = vcp = NULL;
450 
451 	SMB_CO_LOCK(&smb_vclist);
452 
453 	/* var, head, next_field */
454 	SLIST_FOREACH(co, &smb_vclist.co_children, co_next) {
455 		vcp = CPTOVC(co);
456 
457 		/*
458 		 * Some things we can check without
459 		 * holding the lock (those that are
460 		 * set at creation and never change).
461 		 */
462 
463 		/* VCs in other zones are invisibile. */
464 		if (vcp->vc_zoneid != zoneid)
465 			continue;
466 
467 		/* Also segregate by Unix owner. */
468 		if (vcp->vc_owner != ossn->ssn_owner)
469 			continue;
470 
471 		/*
472 		 * Compare identifying info:
473 		 * server address, user, domain
474 		 * names are case-insensitive
475 		 */
476 		vc_id = &vcp->vc_ssn.ssn_id;
477 		if (bcmp(&vc_id->id_srvaddr,
478 		    &ossn->ssn_id.id_srvaddr,
479 		    sizeof (vc_id->id_srvaddr)))
480 			continue;
481 		if (u8_strcmp(vc_id->id_user, ossn->ssn_id.id_user, 0,
482 		    U8_STRCMP_CI_LOWER, U8_UNICODE_LATEST, &error))
483 			continue;
484 		if (u8_strcmp(vc_id->id_domain, ossn->ssn_id.id_domain, 0,
485 		    U8_STRCMP_CI_LOWER, U8_UNICODE_LATEST, &error))
486 			continue;
487 
488 		/*
489 		 * We have a match, but still have to check
490 		 * the _GONE flag, and do that with a lock.
491 		 * No new references when _GONE is set.
492 		 *
493 		 * Also clear SMBVOPT_CREATE which the caller
494 		 * may check to find out if we did create.
495 		 */
496 		SMB_VC_LOCK(vcp);
497 		if ((vcp->vc_flags & SMBV_GONE) == 0) {
498 			ossn->ssn_vopt &= ~SMBVOPT_CREATE;
499 			/*
500 			 * Return it held, unlocked.
501 			 * In-line smb_vc_hold here.
502 			 */
503 			co->co_usecount++;
504 			SMB_VC_UNLOCK(vcp);
505 			*vcpp = vcp;
506 			error = 0;
507 			goto out;
508 		}
509 		SMB_VC_UNLOCK(vcp);
510 		/* keep looking. */
511 	}
512 	vcp = NULL;
513 
514 	/* Note: smb_vclist is still locked. */
515 
516 	if (ossn->ssn_vopt & SMBVOPT_CREATE) {
517 		/*
518 		 * Create a new VC.  It starts out with
519 		 * hold count = 1, so don't incr. here.
520 		 */
521 		error = smb_vc_create(ossn, scred, &vcp);
522 		if (error == 0)
523 			*vcpp = vcp;
524 	} else
525 		error = ENOENT;
526 
527 out:
528 	SMB_CO_UNLOCK(&smb_vclist);
529 	return (error);
530 }
531 
532 
533 /*
534  * Helper functions that operate on VCs
535  */
536 
537 /*
538  * Get a pointer to the IP address suitable for passing to Trusted
539  * Extensions find_tpc() routine.  Used by smbfs_mount_label_policy().
540  * Compare this code to nfs_mount_label_policy() if problems arise.
541  */
542 void *
543 smb_vc_getipaddr(struct smb_vc *vcp, int *ipvers)
544 {
545 	smbioc_ssn_ident_t *id = &vcp->vc_ssn.ssn_id;
546 	void *ret;
547 
548 	switch (id->id_srvaddr.sa.sa_family) {
549 	case AF_INET:
550 		*ipvers = IPV4_VERSION;
551 		ret = &id->id_srvaddr.sin.sin_addr;
552 		break;
553 
554 	case AF_INET6:
555 		*ipvers = IPV6_VERSION;
556 		ret = &id->id_srvaddr.sin6.sin6_addr;
557 		break;
558 	default:
559 		SMBSDEBUG("invalid address family %d\n",
560 		    id->id_srvaddr.sa.sa_family);
561 		*ipvers = 0;
562 		ret = NULL;
563 		break;
564 	}
565 	return (ret);
566 }
567 
568 void
569 smb_vc_walkshares(struct smb_vc *vcp,
570 	walk_share_func_t func)
571 {
572 	smb_connobj_t *co;
573 	smb_share_t *ssp;
574 
575 	/*
576 	 * Walk the share list calling func(ssp, arg)
577 	 */
578 	SMB_VC_LOCK(vcp);
579 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
580 		ssp = CPTOSS(co);
581 		SMB_SS_LOCK(ssp);
582 		func(ssp);
583 		SMB_SS_UNLOCK(ssp);
584 	}
585 	SMB_VC_UNLOCK(vcp);
586 }
587 
588 
589 /*
590  * Share implementation
591  */
592 
593 void
594 smb_share_hold(struct smb_share *ssp)
595 {
596 	smb_co_hold(SSTOCP(ssp));
597 }
598 
599 void
600 smb_share_rele(struct smb_share *ssp)
601 {
602 	smb_co_rele(SSTOCP(ssp));
603 }
604 
605 void
606 smb_share_kill(struct smb_share *ssp)
607 {
608 	smb_co_kill(SSTOCP(ssp));
609 }
610 
611 /*
612  * Normally called via smb_share_rele()
613  * after co_usecount drops to zero.
614  * Also called via: smb_share_kill()
615  */
616 static void
617 smb_share_gone(struct smb_connobj *cp)
618 {
619 	struct smb_cred scred;
620 	struct smb_share *ssp = CPTOSS(cp);
621 
622 	smb_credinit(&scred, NULL);
623 	smb_iod_shutdown_share(ssp);
624 	smb_smb_treedisconnect(ssp, &scred);
625 	smb_credrele(&scred);
626 }
627 
628 /*
629  * Normally called via smb_share_rele()
630  * after co_usecount drops to zero.
631  */
632 static void
633 smb_share_free(struct smb_connobj *cp)
634 {
635 	struct smb_share *ssp = CPTOSS(cp);
636 
637 	cv_destroy(&ssp->ss_conn_done);
638 	smb_co_done(SSTOCP(ssp));
639 	kmem_free(ssp, sizeof (*ssp));
640 }
641 
642 /*
643  * Allocate share structure and attach it to the given VC
644  * Connection expected to be locked on entry. Share will be returned
645  * in locked state.
646  */
647 /*ARGSUSED*/
648 int
649 smb_share_create(smbioc_tcon_t *tcon, struct smb_vc *vcp,
650 	struct smb_share **sspp, struct smb_cred *scred)
651 {
652 	static char objtype[] = "smb_ss";
653 	struct smb_share *ssp;
654 
655 	ASSERT(MUTEX_HELD(&vcp->vc_lock));
656 
657 	ssp = kmem_zalloc(sizeof (struct smb_share), KM_SLEEP);
658 	smb_co_init(SSTOCP(ssp), SMBL_SHARE, objtype);
659 	ssp->ss_co.co_free = smb_share_free;
660 	ssp->ss_co.co_gone = smb_share_gone;
661 
662 	cv_init(&ssp->ss_conn_done, objtype, CV_DRIVER, NULL);
663 	ssp->ss_tid = SMB_TID_UNKNOWN;
664 
665 	bcopy(&tcon->tc_sh, &ssp->ss_ioc,
666 	    sizeof (smbioc_oshare_t));
667 
668 	smb_co_addchild(VCTOCP(vcp), SSTOCP(ssp));
669 	*sspp = ssp;
670 
671 	return (0);
672 }
673 
674 /*
675  * Find or create a share under the given VC
676  * and return it with a "hold", but not locked.
677  */
678 
679 int
680 smb_share_findcreate(smbioc_tcon_t *tcon, struct smb_vc *vcp,
681 	struct smb_share **sspp, struct smb_cred *scred)
682 {
683 	struct smb_connobj *co;
684 	struct smb_share *ssp = NULL;
685 	int error = 0;
686 
687 	*sspp = NULL;
688 
689 	SMB_VC_LOCK(vcp);
690 
691 	/* var, head, next_field */
692 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
693 		ssp = CPTOSS(co);
694 
695 		/* Share name */
696 		if (u8_strcmp(ssp->ss_name, tcon->tc_sh.sh_name, 0,
697 		    U8_STRCMP_CI_LOWER, U8_UNICODE_LATEST, &error))
698 			continue;
699 
700 		/*
701 		 * We have a match, but still have to check
702 		 * the _GONE flag, and do that with a lock.
703 		 * No new references when _GONE is set.
704 		 *
705 		 * Also clear SMBSOPT_CREATE which the caller
706 		 * may check to find out if we did create.
707 		 */
708 		SMB_SS_LOCK(ssp);
709 		if ((ssp->ss_flags & SMBS_GONE) == 0) {
710 			tcon->tc_opt &= ~SMBSOPT_CREATE;
711 			/*
712 			 * Return it held, unlocked.
713 			 * In-line smb_share_hold here.
714 			 */
715 			co->co_usecount++;
716 			SMB_SS_UNLOCK(ssp);
717 			*sspp = ssp;
718 			error = 0;
719 			goto out;
720 		}
721 		SMB_SS_UNLOCK(ssp);
722 		/* keep looking. */
723 	}
724 	ssp = NULL;
725 
726 	/* Note: vcp (list of shares) is still locked. */
727 
728 	if (tcon->tc_opt & SMBSOPT_CREATE) {
729 		/*
730 		 * Create a new share.  It starts out with
731 		 * hold count = 1, so don't incr. here.
732 		 */
733 		error = smb_share_create(tcon, vcp, &ssp, scred);
734 		if (error == 0)
735 			*sspp = ssp;
736 	} else
737 		error = ENOENT;
738 
739 out:
740 	SMB_VC_UNLOCK(vcp);
741 	return (error);
742 }
743 
744 
745 /*
746  * Helper functions that operate on shares
747  */
748 
749 /*
750  * Mark this share as invalid, so consumers will know
751  * their file handles have become invalid.
752  *
753  * Most share consumers store a copy of ss_vcgenid when
754  * opening a file handle and compare that with what's in
755  * the share before using a file handle.  If the genid
756  * doesn't match, the file handle has become "stale"
757  * due to disconnect.  Therefore, zap ss_vcgenid here.
758  */
759 void
760 smb_share_invalidate(struct smb_share *ssp)
761 {
762 
763 	ASSERT(MUTEX_HELD(&ssp->ss_lock));
764 
765 	ssp->ss_flags &= ~SMBS_CONNECTED;
766 	ssp->ss_tid = SMB_TID_UNKNOWN;
767 	ssp->ss_vcgenid = 0;
768 }
769 
770 /*
771  * Connect (or reconnect) a share object.
772  *
773  * Called by smb_usr_get_tree() for new connections,
774  * and called by smb_rq_enqueue() for reconnect.
775  */
776 int
777 smb_share_tcon(smb_share_t *ssp, smb_cred_t *scred)
778 {
779 	clock_t tmo;
780 	int error;
781 
782 	SMB_SS_LOCK(ssp);
783 
784 	if (ssp->ss_flags & SMBS_CONNECTED) {
785 		SMBIODEBUG("alread connected?");
786 		error = 0;
787 		goto out;
788 	}
789 
790 	/*
791 	 * Wait for completion of any state changes
792 	 * that might be underway.
793 	 */
794 	while (ssp->ss_flags & SMBS_RECONNECTING) {
795 		ssp->ss_conn_waiters++;
796 		tmo = cv_wait_sig(&ssp->ss_conn_done, &ssp->ss_lock);
797 		ssp->ss_conn_waiters--;
798 		if (tmo == 0) {
799 			/* Interrupt! */
800 			error = EINTR;
801 			goto out;
802 		}
803 	}
804 
805 	/* Did someone else do it for us? */
806 	if (ssp->ss_flags & SMBS_CONNECTED) {
807 		error = 0;
808 		goto out;
809 	}
810 
811 	/*
812 	 * OK, we'll do the work.
813 	 */
814 	ssp->ss_flags |= SMBS_RECONNECTING;
815 
816 	/*
817 	 * Drop the lock while doing the TCON.
818 	 * On success, sets ss_tid, ss_vcgenid,
819 	 * and ss_flags |= SMBS_CONNECTED;
820 	 */
821 	SMB_SS_UNLOCK(ssp);
822 	error = smb_smb_treeconnect(ssp, scred);
823 	SMB_SS_LOCK(ssp);
824 
825 	ssp->ss_flags &= ~SMBS_RECONNECTING;
826 
827 	/* They can all go ahead! */
828 	if (ssp->ss_conn_waiters)
829 		cv_broadcast(&ssp->ss_conn_done);
830 
831 out:
832 	SMB_SS_UNLOCK(ssp);
833 
834 	return (error);
835 }
836 
837 /*
838  * Solaris zones support
839  */
840 /*ARGSUSED*/
841 void
842 lingering_vc(struct smb_vc *vc)
843 {
844 	/* good place for a breakpoint */
845 	DEBUG_ENTER("lingering VC");
846 }
847 
848 /*
849  * On zone shutdown, kill any IOD threads still running in this zone.
850  */
851 /* ARGSUSED */
852 void
853 nsmb_zone_shutdown(zoneid_t zoneid, void *data)
854 {
855 	struct smb_connobj *co;
856 	struct smb_vc *vcp;
857 
858 	SMB_CO_LOCK(&smb_vclist);
859 	SLIST_FOREACH(co, &smb_vclist.co_children, co_next) {
860 		vcp = CPTOVC(co);
861 
862 		if (vcp->vc_zoneid != zoneid)
863 			continue;
864 
865 		/*
866 		 * This will close the connection, and
867 		 * cause the IOD thread to terminate.
868 		 */
869 		smb_vc_kill(vcp);
870 	}
871 	SMB_CO_UNLOCK(&smb_vclist);
872 }
873 
874 /*
875  * On zone destroy, kill any IOD threads and free all resources they used.
876  */
877 /* ARGSUSED */
878 void
879 nsmb_zone_destroy(zoneid_t zoneid, void *data)
880 {
881 	struct smb_connobj *co;
882 	struct smb_vc *vcp;
883 
884 	/*
885 	 * We will repeat what should have already happened
886 	 * in zone_shutdown to make things go away.
887 	 *
888 	 * There should have been an smb_vc_rele call
889 	 * by now for all VCs in the zone.  If not,
890 	 * there's probably more we needed to do in
891 	 * the shutdown call.
892 	 */
893 
894 	SMB_CO_LOCK(&smb_vclist);
895 
896 	if (smb_vclist.co_usecount > 1) {
897 		SMBERROR("%d connections still active\n",
898 		    smb_vclist.co_usecount - 1);
899 	}
900 
901 	/* var, head, next_field */
902 	SLIST_FOREACH(co, &smb_vclist.co_children, co_next) {
903 		vcp = CPTOVC(co);
904 
905 		if (vcp->vc_zoneid != zoneid)
906 			continue;
907 
908 		/* Debugging */
909 		lingering_vc(vcp);
910 	}
911 
912 	SMB_CO_UNLOCK(&smb_vclist);
913 }
914