xref: /titanic_41/usr/src/uts/common/fs/smbclnt/netsmb/smb_conn.c (revision 02d09e03eb27f3a2dc299de704e45dae5173f43f)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_conn.c,v 1.27.166.1 2005/05/27 02:35:29 lindak Exp $
33  */
34 /*
35  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
36  * Use is subject to license terms.
37  */
38 
39 /*
40  * Connection engine.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kmem.h>
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <sys/vnode.h>
49 #include <sys/stream.h>
50 #include <sys/stropts.h>
51 #include <sys/socketvar.h>
52 #include <sys/cred.h>
53 #include <netinet/in.h>
54 #include <inet/ip.h>
55 #include <inet/ip6.h>
56 #include <sys/cmn_err.h>
57 #include <sys/thread.h>
58 #include <sys/atomic.h>
59 #include <sys/u8_textprep.h>
60 
61 #include <netsmb/smb_osdep.h>
62 
63 #include <netsmb/smb.h>
64 #include <netsmb/smb_conn.h>
65 #include <netsmb/smb_subr.h>
66 #include <netsmb/smb_tran.h>
67 #include <netsmb/smb_pass.h>
68 
69 static struct smb_connobj smb_vclist;
70 
71 void smb_co_init(struct smb_connobj *cp, int level, char *objname);
72 void smb_co_done(struct smb_connobj *cp);
73 void smb_co_hold(struct smb_connobj *cp);
74 void smb_co_rele(struct smb_connobj *cp);
75 void smb_co_kill(struct smb_connobj *cp);
76 
77 static void smb_vc_free(struct smb_connobj *cp);
78 static void smb_vc_gone(struct smb_connobj *cp);
79 
80 static void smb_share_free(struct smb_connobj *cp);
81 static void smb_share_gone(struct smb_connobj *cp);
82 
83 int
smb_sm_init(void)84 smb_sm_init(void)
85 {
86 	smb_co_init(&smb_vclist, SMBL_SM, "smbsm");
87 	return (0);
88 }
89 
90 int
smb_sm_idle(void)91 smb_sm_idle(void)
92 {
93 	int error = 0;
94 	SMB_CO_LOCK(&smb_vclist);
95 	if (smb_vclist.co_usecount > 1) {
96 		SMBSDEBUG("%d connections still active\n",
97 		    smb_vclist.co_usecount - 1);
98 		error = EBUSY;
99 	}
100 	SMB_CO_UNLOCK(&smb_vclist);
101 	return (error);
102 }
103 
104 void
smb_sm_done(void)105 smb_sm_done(void)
106 {
107 	/*
108 	 * XXX Q4BP why are we not iterating on smb_vclist here?
109 	 * Because the caller has just called smb_sm_idle() to
110 	 * make sure we have no VCs before calling this.
111 	 */
112 	smb_co_done(&smb_vclist);
113 }
114 
115 
116 
117 /*
118  * Common code for connection object
119  */
120 /*ARGSUSED*/
121 void
smb_co_init(struct smb_connobj * cp,int level,char * objname)122 smb_co_init(struct smb_connobj *cp, int level, char *objname)
123 {
124 
125 	mutex_init(&cp->co_lock, objname,  MUTEX_DRIVER, NULL);
126 
127 	cp->co_level = level;
128 	cp->co_usecount = 1;
129 	SLIST_INIT(&cp->co_children);
130 }
131 
132 /*
133  * Called just before free of an object
134  * of which smb_connobj is a part, i.e.
135  * _vc_free, _share_free, also sm_done.
136  */
137 void
smb_co_done(struct smb_connobj * cp)138 smb_co_done(struct smb_connobj *cp)
139 {
140 	ASSERT(SLIST_EMPTY(&cp->co_children));
141 	mutex_destroy(&cp->co_lock);
142 }
143 
144 static void
smb_co_addchild(struct smb_connobj * parent,struct smb_connobj * child)145 smb_co_addchild(
146 	struct smb_connobj *parent,
147 	struct smb_connobj *child)
148 {
149 
150 	/*
151 	 * Set the child's pointer to the parent.
152 	 * No references yet, so no need to lock.
153 	 */
154 	ASSERT(child->co_usecount == 1);
155 	child->co_parent = parent;
156 
157 	/*
158 	 * Add the child to the parent's list of
159 	 * children, and in-line smb_co_hold
160 	 */
161 	ASSERT(MUTEX_HELD(&parent->co_lock));
162 	parent->co_usecount++;
163 	SLIST_INSERT_HEAD(&parent->co_children, child, co_next);
164 }
165 
166 void
smb_co_hold(struct smb_connobj * cp)167 smb_co_hold(struct smb_connobj *cp)
168 {
169 	SMB_CO_LOCK(cp);
170 	cp->co_usecount++;
171 	SMB_CO_UNLOCK(cp);
172 }
173 
174 /*
175  * Called via smb_vc_rele, smb_share_rele
176  */
177 void
smb_co_rele(struct smb_connobj * co)178 smb_co_rele(struct smb_connobj *co)
179 {
180 	struct smb_connobj *parent;
181 	int old_flags;
182 
183 	SMB_CO_LOCK(co);
184 	if (co->co_usecount > 1) {
185 		co->co_usecount--;
186 		SMB_CO_UNLOCK(co);
187 		return;
188 	}
189 	ASSERT(co->co_usecount == 1);
190 	co->co_usecount = 0;
191 
192 	/*
193 	 * This list of children should be empty now.
194 	 * Check this while we're still linked, so
195 	 * we have a better chance of debugging.
196 	 */
197 	ASSERT(SLIST_EMPTY(&co->co_children));
198 
199 	/*
200 	 * OK, this element is going away.
201 	 *
202 	 * We need to drop the lock on this CO so we can take the
203 	 * parent CO lock. The _GONE flag prevents this CO from
204 	 * getting new references before we can unlink it from the
205 	 * parent list.
206 	 *
207 	 * The _GONE flag is also used to ensure that the co_gone
208 	 * function is called only once.  Note that smb_co_kill may
209 	 * do this before we get here.  If we find that the _GONE
210 	 * flag was not already set, then call the co_gone hook
211 	 * (smb_share_gone, smb_vc_gone) which will disconnect
212 	 * the share or the VC, respectively.
213 	 *
214 	 * Note the old: smb_co_gone(co, scred);
215 	 * is now in-line here.
216 	 */
217 	old_flags = co->co_flags;
218 	co->co_flags |= SMBO_GONE;
219 	SMB_CO_UNLOCK(co);
220 
221 	if ((old_flags & SMBO_GONE) == 0 && co->co_gone)
222 		co->co_gone(co);
223 
224 	/*
225 	 * If we have a parent (only smb_vclist does not)
226 	 * then unlink from parent's list of children.
227 	 * We have the only reference to the child.
228 	 */
229 	parent = co->co_parent;
230 	if (parent) {
231 		SMB_CO_LOCK(parent);
232 		ASSERT(SLIST_FIRST(&parent->co_children));
233 		if (SLIST_FIRST(&parent->co_children)) {
234 			SLIST_REMOVE(&parent->co_children, co,
235 			    smb_connobj, co_next);
236 		}
237 		SMB_CO_UNLOCK(parent);
238 	}
239 
240 	/*
241 	 * Now it's safe to free the CO
242 	 */
243 	if (co->co_free) {
244 		co->co_free(co);
245 	}
246 
247 	/*
248 	 * Finally, if the CO had a parent, decrement
249 	 * the parent's hold count for the lost child.
250 	 */
251 	if (parent) {
252 		/*
253 		 * Recursive call here (easier for debugging).
254 		 * Can only go two levels.
255 		 */
256 		smb_co_rele(parent);
257 	}
258 }
259 
260 /*
261  * Do just the first part of what co_gone does,
262  * i.e. tree disconnect, or disconnect a VC.
263  * This is used to forcibly close things.
264  */
265 void
smb_co_kill(struct smb_connobj * co)266 smb_co_kill(struct smb_connobj *co)
267 {
268 	int old_flags;
269 
270 	SMB_CO_LOCK(co);
271 	old_flags = co->co_flags;
272 	co->co_flags |= SMBO_GONE;
273 	SMB_CO_UNLOCK(co);
274 
275 	/*
276 	 * Do the same "call only once" logic here as in
277 	 * smb_co_rele, though it's probably not possible
278 	 * for this to be called after smb_co_rele.
279 	 */
280 	if ((old_flags & SMBO_GONE) == 0 && co->co_gone)
281 		co->co_gone(co);
282 
283 	/* XXX: Walk list of children and kill those too? */
284 }
285 
286 
287 /*
288  * Session objects, which are referred to as "VC" for
289  * "virtual cirtuit". This has nothing to do with the
290  * CIFS notion of a "virtual cirtuit".  See smb_conn.h
291  */
292 
293 void
smb_vc_hold(struct smb_vc * vcp)294 smb_vc_hold(struct smb_vc *vcp)
295 {
296 	smb_co_hold(VCTOCP(vcp));
297 }
298 
299 void
smb_vc_rele(struct smb_vc * vcp)300 smb_vc_rele(struct smb_vc *vcp)
301 {
302 	smb_co_rele(VCTOCP(vcp));
303 }
304 
305 void
smb_vc_kill(struct smb_vc * vcp)306 smb_vc_kill(struct smb_vc *vcp)
307 {
308 	smb_co_kill(VCTOCP(vcp));
309 }
310 
311 /*
312  * Normally called via smb_vc_rele()
313  * after co_usecount drops to zero.
314  * Also called via: smb_vc_kill()
315  *
316  * Shutdown the VC to this server,
317  * invalidate shares linked with it.
318  */
319 /*ARGSUSED*/
320 static void
smb_vc_gone(struct smb_connobj * cp)321 smb_vc_gone(struct smb_connobj *cp)
322 {
323 	struct smb_vc *vcp = CPTOVC(cp);
324 
325 	/*
326 	 * Was smb_vc_disconnect(vcp);
327 	 */
328 	smb_iod_disconnect(vcp);
329 }
330 
331 /*
332  * The VC has no more references.  Free it.
333  * No locks needed here.
334  */
335 static void
smb_vc_free(struct smb_connobj * cp)336 smb_vc_free(struct smb_connobj *cp)
337 {
338 	struct smb_vc *vcp = CPTOVC(cp);
339 
340 	/*
341 	 * The _gone call should have emptied the request list,
342 	 * but let's make sure, as requests may have references
343 	 * to this VC without taking a hold.  (The hold is the
344 	 * responsibility of threads placing requests.)
345 	 */
346 	ASSERT(vcp->iod_rqlist.tqh_first == NULL);
347 
348 	if (vcp->vc_tdata)
349 		SMB_TRAN_DONE(vcp);
350 
351 /*
352  * We are not using the iconv routines here. So commenting them for now.
353  * REVISIT.
354  */
355 #ifdef NOTYETDEFINED
356 	if (vcp->vc_tolower)
357 		iconv_close(vcp->vc_tolower);
358 	if (vcp->vc_toupper)
359 		iconv_close(vcp->vc_toupper);
360 	if (vcp->vc_tolocal)
361 		iconv_close(vcp->vc_tolocal);
362 	if (vcp->vc_toserver)
363 		iconv_close(vcp->vc_toserver);
364 #endif
365 
366 	if (vcp->vc_mackey != NULL)
367 		kmem_free(vcp->vc_mackey, vcp->vc_mackeylen);
368 
369 	cv_destroy(&vcp->iod_idle);
370 	rw_destroy(&vcp->iod_rqlock);
371 	sema_destroy(&vcp->vc_sendlock);
372 	cv_destroy(&vcp->vc_statechg);
373 	smb_co_done(VCTOCP(vcp));
374 	kmem_free(vcp, sizeof (*vcp));
375 }
376 
377 /*ARGSUSED*/
378 int
smb_vc_create(smbioc_ossn_t * ossn,smb_cred_t * scred,smb_vc_t ** vcpp)379 smb_vc_create(smbioc_ossn_t *ossn, smb_cred_t *scred, smb_vc_t **vcpp)
380 {
381 	static char objtype[] = "smb_vc";
382 	cred_t *cr = scred->scr_cred;
383 	struct smb_vc *vcp;
384 	int error = 0;
385 
386 	ASSERT(MUTEX_HELD(&smb_vclist.co_lock));
387 
388 	vcp = kmem_zalloc(sizeof (struct smb_vc), KM_SLEEP);
389 
390 	smb_co_init(VCTOCP(vcp), SMBL_VC, objtype);
391 	vcp->vc_co.co_free = smb_vc_free;
392 	vcp->vc_co.co_gone = smb_vc_gone;
393 
394 	cv_init(&vcp->vc_statechg, objtype, CV_DRIVER, NULL);
395 	sema_init(&vcp->vc_sendlock, 1, objtype, SEMA_DRIVER, NULL);
396 	rw_init(&vcp->iod_rqlock, objtype, RW_DRIVER, NULL);
397 	cv_init(&vcp->iod_idle, objtype, CV_DRIVER, NULL);
398 
399 	/* Expanded TAILQ_HEAD_INITIALIZER */
400 	vcp->iod_rqlist.tqh_last = &vcp->iod_rqlist.tqh_first;
401 
402 	vcp->vc_state = SMBIOD_ST_IDLE;
403 
404 	/*
405 	 * These identify the connection.
406 	 */
407 	vcp->vc_zoneid = getzoneid();
408 	bcopy(ossn, &vcp->vc_ssn, sizeof (*ossn));
409 
410 	/* This fills in vcp->vc_tdata */
411 	vcp->vc_tdesc = &smb_tran_nbtcp_desc;
412 	if ((error = SMB_TRAN_CREATE(vcp, cr)) != 0)
413 		goto errout;
414 
415 	/* Success! */
416 	smb_co_addchild(&smb_vclist, VCTOCP(vcp));
417 	*vcpp = vcp;
418 	return (0);
419 
420 errout:
421 	/*
422 	 * This will destroy the new vc.
423 	 * See: smb_vc_free
424 	 */
425 	smb_vc_rele(vcp);
426 	return (error);
427 }
428 
429 /*
430  * Find or create a VC identified by the info in ossn
431  * and return it with a "hold", but not locked.
432  */
433 /*ARGSUSED*/
434 int
smb_vc_findcreate(smbioc_ossn_t * ossn,smb_cred_t * scred,smb_vc_t ** vcpp)435 smb_vc_findcreate(smbioc_ossn_t *ossn, smb_cred_t *scred, smb_vc_t **vcpp)
436 {
437 	struct smb_connobj *co;
438 	struct smb_vc *vcp;
439 	smbioc_ssn_ident_t *vc_id;
440 	int error;
441 	zoneid_t zoneid = getzoneid();
442 
443 	*vcpp = vcp = NULL;
444 
445 	SMB_CO_LOCK(&smb_vclist);
446 
447 	/* var, head, next_field */
448 	SLIST_FOREACH(co, &smb_vclist.co_children, co_next) {
449 		vcp = CPTOVC(co);
450 
451 		/*
452 		 * Some things we can check without
453 		 * holding the lock (those that are
454 		 * set at creation and never change).
455 		 */
456 
457 		/* VCs in other zones are invisibile. */
458 		if (vcp->vc_zoneid != zoneid)
459 			continue;
460 
461 		/* Also segregate by Unix owner. */
462 		if (vcp->vc_owner != ossn->ssn_owner)
463 			continue;
464 
465 		/*
466 		 * Compare identifying info:
467 		 * server address, user, domain
468 		 * names are case-insensitive
469 		 */
470 		vc_id = &vcp->vc_ssn.ssn_id;
471 		if (bcmp(&vc_id->id_srvaddr,
472 		    &ossn->ssn_id.id_srvaddr,
473 		    sizeof (vc_id->id_srvaddr)))
474 			continue;
475 		if (u8_strcmp(vc_id->id_user, ossn->ssn_id.id_user, 0,
476 		    U8_STRCMP_CI_LOWER, U8_UNICODE_LATEST, &error))
477 			continue;
478 		if (u8_strcmp(vc_id->id_domain, ossn->ssn_id.id_domain, 0,
479 		    U8_STRCMP_CI_LOWER, U8_UNICODE_LATEST, &error))
480 			continue;
481 
482 		/*
483 		 * We have a match, but still have to check
484 		 * the _GONE flag, and do that with a lock.
485 		 * No new references when _GONE is set.
486 		 *
487 		 * Also clear SMBVOPT_CREATE which the caller
488 		 * may check to find out if we did create.
489 		 */
490 		SMB_VC_LOCK(vcp);
491 		if ((vcp->vc_flags & SMBV_GONE) == 0) {
492 			ossn->ssn_vopt &= ~SMBVOPT_CREATE;
493 			/*
494 			 * Return it held, unlocked.
495 			 * In-line smb_vc_hold here.
496 			 */
497 			co->co_usecount++;
498 			SMB_VC_UNLOCK(vcp);
499 			*vcpp = vcp;
500 			error = 0;
501 			goto out;
502 		}
503 		SMB_VC_UNLOCK(vcp);
504 		/* keep looking. */
505 	}
506 	vcp = NULL;
507 
508 	/* Note: smb_vclist is still locked. */
509 
510 	if (ossn->ssn_vopt & SMBVOPT_CREATE) {
511 		/*
512 		 * Create a new VC.  It starts out with
513 		 * hold count = 1, so don't incr. here.
514 		 */
515 		error = smb_vc_create(ossn, scred, &vcp);
516 		if (error == 0)
517 			*vcpp = vcp;
518 	} else
519 		error = ENOENT;
520 
521 out:
522 	SMB_CO_UNLOCK(&smb_vclist);
523 	return (error);
524 }
525 
526 
527 /*
528  * Helper functions that operate on VCs
529  */
530 
531 /*
532  * Get a pointer to the IP address suitable for passing to Trusted
533  * Extensions find_tpc() routine.  Used by smbfs_mount_label_policy().
534  * Compare this code to nfs_mount_label_policy() if problems arise.
535  */
536 void *
smb_vc_getipaddr(struct smb_vc * vcp,int * ipvers)537 smb_vc_getipaddr(struct smb_vc *vcp, int *ipvers)
538 {
539 	smbioc_ssn_ident_t *id = &vcp->vc_ssn.ssn_id;
540 	void *ret;
541 
542 	switch (id->id_srvaddr.sa.sa_family) {
543 	case AF_INET:
544 		*ipvers = IPV4_VERSION;
545 		ret = &id->id_srvaddr.sin.sin_addr;
546 		break;
547 
548 	case AF_INET6:
549 		*ipvers = IPV6_VERSION;
550 		ret = &id->id_srvaddr.sin6.sin6_addr;
551 		break;
552 	default:
553 		SMBSDEBUG("invalid address family %d\n",
554 		    id->id_srvaddr.sa.sa_family);
555 		*ipvers = 0;
556 		ret = NULL;
557 		break;
558 	}
559 	return (ret);
560 }
561 
562 void
smb_vc_walkshares(struct smb_vc * vcp,walk_share_func_t func)563 smb_vc_walkshares(struct smb_vc *vcp,
564 	walk_share_func_t func)
565 {
566 	smb_connobj_t *co;
567 	smb_share_t *ssp;
568 
569 	/*
570 	 * Walk the share list calling func(ssp, arg)
571 	 */
572 	SMB_VC_LOCK(vcp);
573 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
574 		ssp = CPTOSS(co);
575 		SMB_SS_LOCK(ssp);
576 		func(ssp);
577 		SMB_SS_UNLOCK(ssp);
578 	}
579 	SMB_VC_UNLOCK(vcp);
580 }
581 
582 
583 /*
584  * Share implementation
585  */
586 
587 void
smb_share_hold(struct smb_share * ssp)588 smb_share_hold(struct smb_share *ssp)
589 {
590 	smb_co_hold(SSTOCP(ssp));
591 }
592 
593 void
smb_share_rele(struct smb_share * ssp)594 smb_share_rele(struct smb_share *ssp)
595 {
596 	smb_co_rele(SSTOCP(ssp));
597 }
598 
599 void
smb_share_kill(struct smb_share * ssp)600 smb_share_kill(struct smb_share *ssp)
601 {
602 	smb_co_kill(SSTOCP(ssp));
603 }
604 
605 /*
606  * Normally called via smb_share_rele()
607  * after co_usecount drops to zero.
608  * Also called via: smb_share_kill()
609  */
610 static void
smb_share_gone(struct smb_connobj * cp)611 smb_share_gone(struct smb_connobj *cp)
612 {
613 	struct smb_cred scred;
614 	struct smb_share *ssp = CPTOSS(cp);
615 
616 	smb_credinit(&scred, NULL);
617 	smb_iod_shutdown_share(ssp);
618 	(void) smb_smb_treedisconnect(ssp, &scred);
619 	smb_credrele(&scred);
620 }
621 
622 /*
623  * Normally called via smb_share_rele()
624  * after co_usecount drops to zero.
625  */
626 static void
smb_share_free(struct smb_connobj * cp)627 smb_share_free(struct smb_connobj *cp)
628 {
629 	struct smb_share *ssp = CPTOSS(cp);
630 
631 	cv_destroy(&ssp->ss_conn_done);
632 	smb_co_done(SSTOCP(ssp));
633 	kmem_free(ssp, sizeof (*ssp));
634 }
635 
636 /*
637  * Allocate share structure and attach it to the given VC
638  * Connection expected to be locked on entry. Share will be returned
639  * in locked state.
640  */
641 /*ARGSUSED*/
642 int
smb_share_create(smbioc_tcon_t * tcon,struct smb_vc * vcp,struct smb_share ** sspp,struct smb_cred * scred)643 smb_share_create(smbioc_tcon_t *tcon, struct smb_vc *vcp,
644 	struct smb_share **sspp, struct smb_cred *scred)
645 {
646 	static char objtype[] = "smb_ss";
647 	struct smb_share *ssp;
648 
649 	ASSERT(MUTEX_HELD(&vcp->vc_lock));
650 
651 	ssp = kmem_zalloc(sizeof (struct smb_share), KM_SLEEP);
652 	smb_co_init(SSTOCP(ssp), SMBL_SHARE, objtype);
653 	ssp->ss_co.co_free = smb_share_free;
654 	ssp->ss_co.co_gone = smb_share_gone;
655 
656 	cv_init(&ssp->ss_conn_done, objtype, CV_DRIVER, NULL);
657 	ssp->ss_tid = SMB_TID_UNKNOWN;
658 
659 	bcopy(&tcon->tc_sh, &ssp->ss_ioc,
660 	    sizeof (smbioc_oshare_t));
661 
662 	smb_co_addchild(VCTOCP(vcp), SSTOCP(ssp));
663 	*sspp = ssp;
664 
665 	return (0);
666 }
667 
668 /*
669  * Find or create a share under the given VC
670  * and return it with a "hold", but not locked.
671  */
672 
673 int
smb_share_findcreate(smbioc_tcon_t * tcon,struct smb_vc * vcp,struct smb_share ** sspp,struct smb_cred * scred)674 smb_share_findcreate(smbioc_tcon_t *tcon, struct smb_vc *vcp,
675 	struct smb_share **sspp, struct smb_cred *scred)
676 {
677 	struct smb_connobj *co;
678 	struct smb_share *ssp = NULL;
679 	int error = 0;
680 
681 	*sspp = NULL;
682 
683 	SMB_VC_LOCK(vcp);
684 
685 	/* var, head, next_field */
686 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
687 		ssp = CPTOSS(co);
688 
689 		/* Share name */
690 		if (u8_strcmp(ssp->ss_name, tcon->tc_sh.sh_name, 0,
691 		    U8_STRCMP_CI_LOWER, U8_UNICODE_LATEST, &error))
692 			continue;
693 
694 		/*
695 		 * We have a match, but still have to check
696 		 * the _GONE flag, and do that with a lock.
697 		 * No new references when _GONE is set.
698 		 *
699 		 * Also clear SMBSOPT_CREATE which the caller
700 		 * may check to find out if we did create.
701 		 */
702 		SMB_SS_LOCK(ssp);
703 		if ((ssp->ss_flags & SMBS_GONE) == 0) {
704 			tcon->tc_opt &= ~SMBSOPT_CREATE;
705 			/*
706 			 * Return it held, unlocked.
707 			 * In-line smb_share_hold here.
708 			 */
709 			co->co_usecount++;
710 			SMB_SS_UNLOCK(ssp);
711 			*sspp = ssp;
712 			error = 0;
713 			goto out;
714 		}
715 		SMB_SS_UNLOCK(ssp);
716 		/* keep looking. */
717 	}
718 	ssp = NULL;
719 
720 	/* Note: vcp (list of shares) is still locked. */
721 
722 	if (tcon->tc_opt & SMBSOPT_CREATE) {
723 		/*
724 		 * Create a new share.  It starts out with
725 		 * hold count = 1, so don't incr. here.
726 		 */
727 		error = smb_share_create(tcon, vcp, &ssp, scred);
728 		if (error == 0)
729 			*sspp = ssp;
730 	} else
731 		error = ENOENT;
732 
733 out:
734 	SMB_VC_UNLOCK(vcp);
735 	return (error);
736 }
737 
738 
739 /*
740  * Helper functions that operate on shares
741  */
742 
743 /*
744  * Mark this share as invalid, so consumers will know
745  * their file handles have become invalid.
746  *
747  * Most share consumers store a copy of ss_vcgenid when
748  * opening a file handle and compare that with what's in
749  * the share before using a file handle.  If the genid
750  * doesn't match, the file handle has become "stale"
751  * due to disconnect.  Therefore, zap ss_vcgenid here.
752  */
753 void
smb_share_invalidate(struct smb_share * ssp)754 smb_share_invalidate(struct smb_share *ssp)
755 {
756 
757 	ASSERT(MUTEX_HELD(&ssp->ss_lock));
758 
759 	ssp->ss_flags &= ~SMBS_CONNECTED;
760 	ssp->ss_tid = SMB_TID_UNKNOWN;
761 	ssp->ss_vcgenid = 0;
762 }
763 
764 /*
765  * Connect (or reconnect) a share object.
766  *
767  * Called by smb_usr_get_tree() for new connections,
768  * and called by smb_rq_enqueue() for reconnect.
769  */
770 int
smb_share_tcon(smb_share_t * ssp,smb_cred_t * scred)771 smb_share_tcon(smb_share_t *ssp, smb_cred_t *scred)
772 {
773 	clock_t tmo;
774 	int error;
775 
776 	SMB_SS_LOCK(ssp);
777 
778 	if (ssp->ss_flags & SMBS_CONNECTED) {
779 		SMBIODEBUG("alread connected?");
780 		error = 0;
781 		goto out;
782 	}
783 
784 	/*
785 	 * Wait for completion of any state changes
786 	 * that might be underway.
787 	 */
788 	while (ssp->ss_flags & SMBS_RECONNECTING) {
789 		ssp->ss_conn_waiters++;
790 		tmo = cv_wait_sig(&ssp->ss_conn_done, &ssp->ss_lock);
791 		ssp->ss_conn_waiters--;
792 		if (tmo == 0) {
793 			/* Interrupt! */
794 			error = EINTR;
795 			goto out;
796 		}
797 	}
798 
799 	/* Did someone else do it for us? */
800 	if (ssp->ss_flags & SMBS_CONNECTED) {
801 		error = 0;
802 		goto out;
803 	}
804 
805 	/*
806 	 * OK, we'll do the work.
807 	 */
808 	ssp->ss_flags |= SMBS_RECONNECTING;
809 
810 	/*
811 	 * Drop the lock while doing the TCON.
812 	 * On success, sets ss_tid, ss_vcgenid,
813 	 * and ss_flags |= SMBS_CONNECTED;
814 	 */
815 	SMB_SS_UNLOCK(ssp);
816 	error = smb_smb_treeconnect(ssp, scred);
817 	SMB_SS_LOCK(ssp);
818 
819 	ssp->ss_flags &= ~SMBS_RECONNECTING;
820 
821 	/* They can all go ahead! */
822 	if (ssp->ss_conn_waiters)
823 		cv_broadcast(&ssp->ss_conn_done);
824 
825 out:
826 	SMB_SS_UNLOCK(ssp);
827 
828 	return (error);
829 }
830 
831 /*
832  * Solaris zones support
833  */
834 /*ARGSUSED*/
835 void
lingering_vc(struct smb_vc * vc)836 lingering_vc(struct smb_vc *vc)
837 {
838 	/* good place for a breakpoint */
839 	DEBUG_ENTER("lingering VC");
840 }
841 
842 /*
843  * On zone shutdown, kill any IOD threads still running in this zone.
844  */
845 /* ARGSUSED */
846 void
nsmb_zone_shutdown(zoneid_t zoneid,void * data)847 nsmb_zone_shutdown(zoneid_t zoneid, void *data)
848 {
849 	struct smb_connobj *co;
850 	struct smb_vc *vcp;
851 
852 	SMB_CO_LOCK(&smb_vclist);
853 	SLIST_FOREACH(co, &smb_vclist.co_children, co_next) {
854 		vcp = CPTOVC(co);
855 
856 		if (vcp->vc_zoneid != zoneid)
857 			continue;
858 
859 		/*
860 		 * This will close the connection, and
861 		 * cause the IOD thread to terminate.
862 		 */
863 		smb_vc_kill(vcp);
864 	}
865 	SMB_CO_UNLOCK(&smb_vclist);
866 }
867 
868 /*
869  * On zone destroy, kill any IOD threads and free all resources they used.
870  */
871 /* ARGSUSED */
872 void
nsmb_zone_destroy(zoneid_t zoneid,void * data)873 nsmb_zone_destroy(zoneid_t zoneid, void *data)
874 {
875 	struct smb_connobj *co;
876 	struct smb_vc *vcp;
877 
878 	/*
879 	 * We will repeat what should have already happened
880 	 * in zone_shutdown to make things go away.
881 	 *
882 	 * There should have been an smb_vc_rele call
883 	 * by now for all VCs in the zone.  If not,
884 	 * there's probably more we needed to do in
885 	 * the shutdown call.
886 	 */
887 
888 	SMB_CO_LOCK(&smb_vclist);
889 
890 	if (smb_vclist.co_usecount > 1) {
891 		SMBERROR("%d connections still active\n",
892 		    smb_vclist.co_usecount - 1);
893 	}
894 
895 	/* var, head, next_field */
896 	SLIST_FOREACH(co, &smb_vclist.co_children, co_next) {
897 		vcp = CPTOVC(co);
898 
899 		if (vcp->vc_zoneid != zoneid)
900 			continue;
901 
902 		/* Debugging */
903 		lingering_vc(vcp);
904 	}
905 
906 	SMB_CO_UNLOCK(&smb_vclist);
907 }
908