xref: /illumos-gate/usr/src/uts/common/fs/smbsrv/smb_kshare.c (revision 14b24e2b79293068c8e016a69ef1d872fb5e2fd5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright 2017 Joyent, Inc.
26  */
27 
28 #include <smbsrv/smb_door.h>
29 #include <smbsrv/smb_kproto.h>
30 #include <smbsrv/smb_ktypes.h>
31 
32 typedef struct smb_unshare {
33 	list_node_t	us_lnd;
34 	char		us_sharename[MAXNAMELEN];
35 } smb_unshare_t;
36 
37 static kmem_cache_t	*smb_kshare_cache_share;
38 static kmem_cache_t	*smb_kshare_cache_unexport;
39 kmem_cache_t	*smb_kshare_cache_vfs;
40 
41 static int smb_kshare_cmp(const void *, const void *);
42 static void smb_kshare_hold(const void *);
43 static boolean_t smb_kshare_rele(const void *);
44 static void smb_kshare_destroy(void *);
45 static char *smb_kshare_oemname(const char *);
46 static int smb_kshare_is_special(const char *);
47 static boolean_t smb_kshare_is_admin(const char *);
48 static smb_kshare_t *smb_kshare_decode(nvlist_t *);
49 static uint32_t smb_kshare_decode_bool(nvlist_t *, const char *, uint32_t);
50 static void smb_kshare_unexport_thread(smb_thread_t *, void *);
51 static int smb_kshare_export(smb_server_t *, smb_kshare_t *);
52 static int smb_kshare_unexport(smb_server_t *, const char *);
53 static int smb_kshare_export_trans(smb_server_t *, char *, char *, char *);
54 static void smb_kshare_csc_flags(smb_kshare_t *, const char *);
55 
56 static boolean_t smb_export_isready(smb_server_t *);
57 
58 #ifdef	_KERNEL
59 static int smb_kshare_chk_dsrv_status(int, smb_dr_ctx_t *);
60 #endif	/* _KERNEL */
61 
62 static const smb_avl_nops_t smb_kshare_avlops = {
63 	smb_kshare_cmp,
64 	smb_kshare_hold,
65 	smb_kshare_rele,
66 	smb_kshare_destroy
67 };
68 
69 #ifdef	_KERNEL
70 /*
71  * This function is not MultiThread safe. The caller has to make sure only one
72  * thread calls this function.
73  */
74 door_handle_t
75 smb_kshare_door_init(int door_id)
76 {
77 	return (door_ki_lookup(door_id));
78 }
79 
80 /*
81  * This function is not MultiThread safe. The caller has to make sure only one
82  * thread calls this function.
83  */
84 void
85 smb_kshare_door_fini(door_handle_t dhdl)
86 {
87 	if (dhdl)
88 		door_ki_rele(dhdl);
89 }
90 
91 /*
92  * This is a special interface that will be utilized by ZFS to cause
93  * a share to be added/removed
94  *
95  * arg is either a smb_share_t or share_name from userspace.
96  * It will need to be copied into the kernel.   It is smb_share_t
97  * for add operations and share_name for delete operations.
98  */
99 int
100 smb_kshare_upcall(door_handle_t dhdl, void *arg, boolean_t add_share)
101 {
102 	door_arg_t	doorarg = { 0 };
103 	char		*buf = NULL;
104 	char		*str = NULL;
105 	int		error;
106 	int		rc;
107 	unsigned int	used;
108 	smb_dr_ctx_t	*dec_ctx;
109 	smb_dr_ctx_t	*enc_ctx;
110 	smb_share_t	*lmshare = NULL;
111 	int		opcode;
112 
113 	opcode = (add_share) ? SMB_SHROP_ADD : SMB_SHROP_DELETE;
114 
115 	buf = kmem_alloc(SMB_SHARE_DSIZE, KM_SLEEP);
116 	enc_ctx = smb_dr_encode_start(buf, SMB_SHARE_DSIZE);
117 	smb_dr_put_uint32(enc_ctx, opcode);
118 
119 	switch (opcode) {
120 	case SMB_SHROP_ADD:
121 		lmshare = kmem_alloc(sizeof (smb_share_t), KM_SLEEP);
122 		error = xcopyin(arg, lmshare, sizeof (smb_share_t));
123 		if (error != 0) {
124 			kmem_free(lmshare, sizeof (smb_share_t));
125 			kmem_free(buf, SMB_SHARE_DSIZE);
126 			return (error);
127 		}
128 		smb_dr_put_share(enc_ctx, lmshare);
129 		break;
130 
131 	case SMB_SHROP_DELETE:
132 		str = kmem_alloc(MAXPATHLEN, KM_SLEEP);
133 		error = copyinstr(arg, str, MAXPATHLEN, NULL);
134 		if (error != 0) {
135 			kmem_free(str, MAXPATHLEN);
136 			kmem_free(buf, SMB_SHARE_DSIZE);
137 			return (error);
138 		}
139 		smb_dr_put_string(enc_ctx, str);
140 		kmem_free(str, MAXPATHLEN);
141 		break;
142 	}
143 
144 	if ((error = smb_dr_encode_finish(enc_ctx, &used)) != 0) {
145 		kmem_free(buf, SMB_SHARE_DSIZE);
146 		if (lmshare)
147 			kmem_free(lmshare, sizeof (smb_share_t));
148 		return (NERR_InternalError);
149 	}
150 
151 	doorarg.data_ptr = buf;
152 	doorarg.data_size = used;
153 	doorarg.rbuf = buf;
154 	doorarg.rsize = SMB_SHARE_DSIZE;
155 
156 	error = door_ki_upcall_limited(dhdl, &doorarg, NULL, SIZE_MAX, 0);
157 
158 	if (error) {
159 		kmem_free(buf, SMB_SHARE_DSIZE);
160 		if (lmshare)
161 			kmem_free(lmshare, sizeof (smb_share_t));
162 		return (error);
163 	}
164 
165 	dec_ctx = smb_dr_decode_start(doorarg.data_ptr, doorarg.data_size);
166 	if (smb_kshare_chk_dsrv_status(opcode, dec_ctx) != 0) {
167 		kmem_free(buf, SMB_SHARE_DSIZE);
168 		if (lmshare)
169 			kmem_free(lmshare, sizeof (smb_share_t));
170 		return (NERR_InternalError);
171 	}
172 
173 	rc = smb_dr_get_uint32(dec_ctx);
174 	if (opcode == SMB_SHROP_ADD)
175 		smb_dr_get_share(dec_ctx, lmshare);
176 
177 	if (smb_dr_decode_finish(dec_ctx))
178 		rc = NERR_InternalError;
179 
180 	kmem_free(buf, SMB_SHARE_DSIZE);
181 	if (lmshare)
182 		kmem_free(lmshare, sizeof (smb_share_t));
183 
184 	return ((rc == NERR_DuplicateShare && add_share) ? 0 : rc);
185 }
186 #endif	/* _KERNEL */
187 
188 /*
189  * Executes map and unmap command for shares.
190  */
191 int
192 smb_kshare_exec(smb_server_t *sv, smb_shr_execinfo_t *execinfo)
193 {
194 	int exec_rc = 0;
195 
196 	(void) smb_kdoor_upcall(sv, SMB_DR_SHR_EXEC,
197 	    execinfo, smb_shr_execinfo_xdr, &exec_rc, xdr_int);
198 
199 	return (exec_rc);
200 }
201 
202 /*
203  * Obtains any host access restriction on the specified
204  * share for the given host (ipaddr) by calling smbd
205  */
206 uint32_t
207 smb_kshare_hostaccess(smb_kshare_t *shr, smb_session_t *session)
208 {
209 	smb_shr_hostaccess_query_t req;
210 	smb_inaddr_t *ipaddr = &session->ipaddr;
211 	uint32_t host_access = SMB_SHRF_ACC_OPEN;
212 	uint32_t flag = SMB_SHRF_ACC_OPEN;
213 	uint32_t access;
214 
215 	if (smb_inet_iszero(ipaddr))
216 		return (ACE_ALL_PERMS);
217 
218 	if ((shr->shr_access_none == NULL || *shr->shr_access_none == '\0') &&
219 	    (shr->shr_access_ro == NULL || *shr->shr_access_ro == '\0') &&
220 	    (shr->shr_access_rw == NULL || *shr->shr_access_rw == '\0'))
221 		return (ACE_ALL_PERMS);
222 
223 	if (shr->shr_access_none != NULL)
224 		flag |= SMB_SHRF_ACC_NONE;
225 	if (shr->shr_access_ro != NULL)
226 		flag |= SMB_SHRF_ACC_RO;
227 	if (shr->shr_access_rw != NULL)
228 		flag |= SMB_SHRF_ACC_RW;
229 
230 	req.shq_none = shr->shr_access_none;
231 	req.shq_ro = shr->shr_access_ro;
232 	req.shq_rw = shr->shr_access_rw;
233 	req.shq_flag = flag;
234 	req.shq_ipaddr = *ipaddr;
235 
236 	(void) smb_kdoor_upcall(session->s_server, SMB_DR_SHR_HOSTACCESS,
237 	    &req, smb_shr_hostaccess_query_xdr, &host_access, xdr_uint32_t);
238 
239 	switch (host_access) {
240 	case SMB_SHRF_ACC_RO:
241 		access = ACE_ALL_PERMS & ~ACE_ALL_WRITE_PERMS;
242 		break;
243 	case SMB_SHRF_ACC_OPEN:
244 	case SMB_SHRF_ACC_RW:
245 		access = ACE_ALL_PERMS;
246 		break;
247 	case SMB_SHRF_ACC_NONE:
248 	default:
249 		access = 0;
250 	}
251 
252 	return (access);
253 }
254 
255 /*
256  * This function is called when smb_server_t is
257  * created which means smb/service is ready for
258  * exporting SMB shares
259  */
260 void
261 smb_export_start(smb_server_t *sv)
262 {
263 	mutex_enter(&sv->sv_export.e_mutex);
264 	if (sv->sv_export.e_ready) {
265 		mutex_exit(&sv->sv_export.e_mutex);
266 		return;
267 	}
268 
269 	sv->sv_export.e_ready = B_TRUE;
270 	mutex_exit(&sv->sv_export.e_mutex);
271 
272 	smb_avl_create(&sv->sv_export.e_share_avl, sizeof (smb_kshare_t),
273 	    offsetof(smb_kshare_t, shr_link), &smb_kshare_avlops);
274 
275 	(void) smb_kshare_export_trans(sv, "IPC$", "IPC$", "Remote IPC");
276 	(void) smb_kshare_export_trans(sv, "c$", SMB_CVOL, "Default Share");
277 	(void) smb_kshare_export_trans(sv, "vss$", SMB_VSS, "VSS");
278 }
279 
280 /*
281  * This function is called when smb_server_t goes
282  * away which means SMB shares should not be made
283  * available to clients
284  */
285 void
286 smb_export_stop(smb_server_t *sv)
287 {
288 	mutex_enter(&sv->sv_export.e_mutex);
289 	if (!sv->sv_export.e_ready) {
290 		mutex_exit(&sv->sv_export.e_mutex);
291 		return;
292 	}
293 	sv->sv_export.e_ready = B_FALSE;
294 	mutex_exit(&sv->sv_export.e_mutex);
295 
296 	smb_avl_destroy(&sv->sv_export.e_share_avl);
297 	smb_vfs_rele_all(&sv->sv_export);
298 }
299 
300 void
301 smb_kshare_g_init(void)
302 {
303 	smb_kshare_cache_share = kmem_cache_create("smb_share_cache",
304 	    sizeof (smb_kshare_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
305 
306 	smb_kshare_cache_unexport = kmem_cache_create("smb_unexport_cache",
307 	    sizeof (smb_unshare_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
308 
309 	smb_kshare_cache_vfs = kmem_cache_create("smb_vfs_cache",
310 	    sizeof (smb_vfs_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
311 }
312 
313 void
314 smb_kshare_init(smb_server_t *sv)
315 {
316 
317 	smb_llist_constructor(&sv->sv_export.e_vfs_list, sizeof (smb_vfs_t),
318 	    offsetof(smb_vfs_t, sv_lnd));
319 
320 	smb_slist_constructor(&sv->sv_export.e_unexport_list,
321 	    sizeof (smb_unshare_t), offsetof(smb_unshare_t, us_lnd));
322 }
323 
324 int
325 smb_kshare_start(smb_server_t *sv)
326 {
327 	smb_thread_init(&sv->sv_export.e_unexport_thread, "smb_kshare_unexport",
328 	    smb_kshare_unexport_thread, sv, smbsrv_base_pri);
329 
330 	return (smb_thread_start(&sv->sv_export.e_unexport_thread));
331 }
332 
333 void
334 smb_kshare_stop(smb_server_t *sv)
335 {
336 	smb_thread_stop(&sv->sv_export.e_unexport_thread);
337 	smb_thread_destroy(&sv->sv_export.e_unexport_thread);
338 }
339 
340 void
341 smb_kshare_fini(smb_server_t *sv)
342 {
343 	smb_unshare_t *ux;
344 
345 	while ((ux = list_head(&sv->sv_export.e_unexport_list.sl_list))
346 	    != NULL) {
347 		smb_slist_remove(&sv->sv_export.e_unexport_list, ux);
348 		kmem_cache_free(smb_kshare_cache_unexport, ux);
349 	}
350 	smb_slist_destructor(&sv->sv_export.e_unexport_list);
351 
352 	smb_vfs_rele_all(&sv->sv_export);
353 
354 	smb_llist_destructor(&sv->sv_export.e_vfs_list);
355 }
356 
357 void
358 smb_kshare_g_fini(void)
359 {
360 	kmem_cache_destroy(smb_kshare_cache_unexport);
361 	kmem_cache_destroy(smb_kshare_cache_share);
362 	kmem_cache_destroy(smb_kshare_cache_vfs);
363 }
364 
365 /*
366  * A list of shares in nvlist format can be sent down
367  * from userspace thourgh the IOCTL interface. The nvlist
368  * is unpacked here and all the shares in the list will
369  * be exported.
370  */
371 int
372 smb_kshare_export_list(smb_ioc_share_t *ioc)
373 {
374 	smb_server_t	*sv = NULL;
375 	nvlist_t	*shrlist = NULL;
376 	nvlist_t	 *share;
377 	nvpair_t	 *nvp;
378 	smb_kshare_t	 *shr;
379 	char		*shrname;
380 	int		rc;
381 
382 	if ((rc = smb_server_lookup(&sv)) != 0)
383 		return (rc);
384 
385 	if (!smb_export_isready(sv)) {
386 		rc = ENOTACTIVE;
387 		goto out;
388 	}
389 
390 	/*
391 	 * Reality check that the nvlist's reported length doesn't exceed the
392 	 * ioctl's total length.  We then assume the nvlist_unpack() will
393 	 * sanity check the nvlist itself.
394 	 */
395 	if ((ioc->shrlen + offsetof(smb_ioc_share_t, shr)) > ioc->hdr.len) {
396 		rc = EINVAL;
397 		goto out;
398 	}
399 	rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, KM_SLEEP);
400 	if (rc != 0)
401 		goto out;
402 
403 	for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL;
404 	    nvp = nvlist_next_nvpair(shrlist, nvp)) {
405 
406 		/*
407 		 * Since this loop can run for a while we want to exit
408 		 * as soon as the server state is anything but RUNNING
409 		 * to allow shutdown to proceed.
410 		 */
411 		if (sv->sv_state != SMB_SERVER_STATE_RUNNING)
412 			goto out;
413 
414 		if (nvpair_type(nvp) != DATA_TYPE_NVLIST)
415 			continue;
416 
417 		shrname = nvpair_name(nvp);
418 		ASSERT(shrname);
419 
420 		if ((rc = nvpair_value_nvlist(nvp, &share)) != 0) {
421 			cmn_err(CE_WARN, "export[%s]: failed accessing",
422 			    shrname);
423 			continue;
424 		}
425 
426 		if ((shr = smb_kshare_decode(share)) == NULL) {
427 			cmn_err(CE_WARN, "export[%s]: failed decoding",
428 			    shrname);
429 			continue;
430 		}
431 
432 		/* smb_kshare_export consumes shr so it's not leaked */
433 		if ((rc = smb_kshare_export(sv, shr)) != 0) {
434 			smb_kshare_destroy(shr);
435 			continue;
436 		}
437 	}
438 	rc = 0;
439 
440 out:
441 	nvlist_free(shrlist);
442 	smb_server_release(sv);
443 	return (rc);
444 }
445 
446 /*
447  * This function is invoked when a share is disabled to disconnect trees
448  * and close files.  Cleaning up may involve VOP and/or VFS calls, which
449  * may conflict/deadlock with stuck threads if something is amiss with the
450  * file system.  Queueing the request for asynchronous processing allows the
451  * call to return immediately so that, if the unshare is being done in the
452  * context of a forced unmount, the forced unmount will always be able to
453  * proceed (unblocking stuck I/O and eventually allowing all blocked unshare
454  * processes to complete).
455  *
456  * The path lookup to find the root vnode of the VFS in question and the
457  * release of this vnode are done synchronously prior to any associated
458  * unmount.  Doing these asynchronous to an associated unmount could run
459  * the risk of a spurious EBUSY for a standard unmount or an EIO during
460  * the path lookup due to a forced unmount finishing first.
461  */
462 int
463 smb_kshare_unexport_list(smb_ioc_share_t *ioc)
464 {
465 	smb_server_t	*sv = NULL;
466 	smb_unshare_t	*ux;
467 	nvlist_t	*shrlist = NULL;
468 	nvpair_t	*nvp;
469 	boolean_t	unexport = B_FALSE;
470 	char		*shrname;
471 	int		rc;
472 
473 	if ((rc = smb_server_lookup(&sv)) != 0)
474 		return (rc);
475 
476 	/*
477 	 * Reality check that the nvlist's reported length doesn't exceed the
478 	 * ioctl's total length.  We then assume the nvlist_unpack() will
479 	 * sanity check the nvlist itself.
480 	 */
481 	if ((ioc->shrlen + offsetof(smb_ioc_share_t, shr)) > ioc->hdr.len) {
482 		rc = EINVAL;
483 		goto out;
484 	}
485 	if ((rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, 0)) != 0)
486 		goto out;
487 
488 	for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL;
489 	    nvp = nvlist_next_nvpair(shrlist, nvp)) {
490 		if (nvpair_type(nvp) != DATA_TYPE_NVLIST)
491 			continue;
492 
493 		shrname = nvpair_name(nvp);
494 		ASSERT(shrname);
495 
496 		if ((rc = smb_kshare_unexport(sv, shrname)) != 0)
497 			continue;
498 
499 		ux = kmem_cache_alloc(smb_kshare_cache_unexport, KM_SLEEP);
500 		(void) strlcpy(ux->us_sharename, shrname, MAXNAMELEN);
501 
502 		smb_slist_insert_tail(&sv->sv_export.e_unexport_list, ux);
503 		unexport = B_TRUE;
504 	}
505 
506 	if (unexport)
507 		smb_thread_signal(&sv->sv_export.e_unexport_thread);
508 	rc = 0;
509 
510 out:
511 	nvlist_free(shrlist);
512 	smb_server_release(sv);
513 	return (rc);
514 }
515 
516 /*
517  * Get properties (currently only shortname enablement)
518  * of specified share.
519  */
520 int
521 smb_kshare_info(smb_ioc_shareinfo_t *ioc)
522 {
523 	ioc->shortnames = smb_shortnames;
524 	return (0);
525 }
526 
527 /*
528  * This function builds a response for a NetShareEnum RAP request.
529  * List of shares is scanned twice. In the first round the total number
530  * of shares which their OEM name is shorter than 13 chars (esi->es_ntotal)
531  * and also the number of shares that fit in the given buffer are calculated.
532  * In the second round the shares data are encoded in the buffer.
533  *
534  * The data associated with each share has two parts, a fixed size part and
535  * a variable size part which is share's comment. The outline of the response
536  * buffer is so that fixed part for all the shares will appear first and follows
537  * with the comments for all those shares and that's why the data cannot be
538  * encoded in one round without unnecessarily complicating the code.
539  */
540 void
541 smb_kshare_enum(smb_server_t *sv, smb_enumshare_info_t *esi)
542 {
543 	smb_avl_t *share_avl;
544 	smb_avl_cursor_t cursor;
545 	smb_kshare_t *shr;
546 	int remained;
547 	uint16_t infolen = 0;
548 	uint16_t cmntlen = 0;
549 	uint16_t sharelen;
550 	uint16_t clen;
551 	uint32_t cmnt_offs;
552 	smb_msgbuf_t info_mb;
553 	smb_msgbuf_t cmnt_mb;
554 	boolean_t autohome_added = B_FALSE;
555 
556 	if (!smb_export_isready(sv)) {
557 		esi->es_ntotal = esi->es_nsent = 0;
558 		esi->es_datasize = 0;
559 		return;
560 	}
561 
562 	esi->es_ntotal = esi->es_nsent = 0;
563 	remained = esi->es_bufsize;
564 	share_avl = &sv->sv_export.e_share_avl;
565 
566 	/* Do the necessary calculations in the first round */
567 	smb_avl_iterinit(share_avl, &cursor);
568 
569 	while ((shr = smb_avl_iterate(share_avl, &cursor)) != NULL) {
570 		if (shr->shr_oemname == NULL) {
571 			smb_avl_release(share_avl, shr);
572 			continue;
573 		}
574 
575 		if ((shr->shr_flags & SMB_SHRF_AUTOHOME) && !autohome_added) {
576 			if (esi->es_posix_uid == shr->shr_uid) {
577 				autohome_added = B_TRUE;
578 			} else {
579 				smb_avl_release(share_avl, shr);
580 				continue;
581 			}
582 		}
583 
584 		esi->es_ntotal++;
585 
586 		if (remained <= 0) {
587 			smb_avl_release(share_avl, shr);
588 			continue;
589 		}
590 
591 		clen = strlen(shr->shr_cmnt) + 1;
592 		sharelen = SHARE_INFO_1_SIZE + clen;
593 
594 		if (sharelen <= remained) {
595 			infolen += SHARE_INFO_1_SIZE;
596 			cmntlen += clen;
597 		}
598 
599 		remained -= sharelen;
600 		smb_avl_release(share_avl, shr);
601 	}
602 
603 	esi->es_datasize = infolen + cmntlen;
604 
605 	smb_msgbuf_init(&info_mb, (uint8_t *)esi->es_buf, infolen, 0);
606 	smb_msgbuf_init(&cmnt_mb, (uint8_t *)esi->es_buf + infolen, cmntlen, 0);
607 	cmnt_offs = infolen;
608 
609 	/* Encode the data in the second round */
610 	smb_avl_iterinit(share_avl, &cursor);
611 	autohome_added = B_FALSE;
612 
613 	while ((shr = smb_avl_iterate(share_avl, &cursor)) != NULL) {
614 		if (shr->shr_oemname == NULL) {
615 			smb_avl_release(share_avl, shr);
616 			continue;
617 		}
618 
619 		if ((shr->shr_flags & SMB_SHRF_AUTOHOME) && !autohome_added) {
620 			if (esi->es_posix_uid == shr->shr_uid) {
621 				autohome_added = B_TRUE;
622 			} else {
623 				smb_avl_release(share_avl, shr);
624 				continue;
625 			}
626 		}
627 
628 		if (smb_msgbuf_encode(&info_mb, "13c.wl",
629 		    shr->shr_oemname, shr->shr_type, cmnt_offs) < 0) {
630 			smb_avl_release(share_avl, shr);
631 			break;
632 		}
633 
634 		if (smb_msgbuf_encode(&cmnt_mb, "s", shr->shr_cmnt) < 0) {
635 			smb_avl_release(share_avl, shr);
636 			break;
637 		}
638 
639 		cmnt_offs += strlen(shr->shr_cmnt) + 1;
640 		esi->es_nsent++;
641 
642 		smb_avl_release(share_avl, shr);
643 	}
644 
645 	smb_msgbuf_term(&info_mb);
646 	smb_msgbuf_term(&cmnt_mb);
647 }
648 
649 /*
650  * Looks up the given share and returns a pointer
651  * to its definition if it's found. A hold on the
652  * object is taken before the pointer is returned
653  * in which case the caller MUST always call
654  * smb_kshare_release().
655  */
656 smb_kshare_t *
657 smb_kshare_lookup(smb_server_t *sv, const char *shrname)
658 {
659 	smb_kshare_t key;
660 	smb_kshare_t *shr;
661 
662 	ASSERT(shrname);
663 
664 	if (!smb_export_isready(sv))
665 		return (NULL);
666 
667 	key.shr_name = (char *)shrname;
668 	shr = smb_avl_lookup(&sv->sv_export.e_share_avl, &key);
669 	return (shr);
670 }
671 
672 /*
673  * Releases the hold taken on the specified share object
674  */
675 void
676 smb_kshare_release(smb_server_t *sv, smb_kshare_t *shr)
677 {
678 	ASSERT(shr);
679 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
680 
681 	smb_avl_release(&sv->sv_export.e_share_avl, shr);
682 }
683 
684 /*
685  * Add the given share in the specified server.
686  * If the share is a disk share, smb_vfs_hold() is
687  * invoked to ensure that there is a hold on the
688  * corresponding file system before the share is
689  * added to shares AVL.
690  *
691  * If the share is an Autohome share and it is
692  * already in the AVL only a reference count for
693  * that share is incremented.
694  */
695 static int
696 smb_kshare_export(smb_server_t *sv, smb_kshare_t *shr)
697 {
698 	smb_avl_t	*share_avl;
699 	smb_kshare_t	*auto_shr;
700 	vnode_t		*vp;
701 	int		rc = 0;
702 
703 	share_avl = &sv->sv_export.e_share_avl;
704 
705 	if (!STYPE_ISDSK(shr->shr_type)) {
706 		if ((rc = smb_avl_add(share_avl, shr)) != 0) {
707 			cmn_err(CE_WARN, "export[%s]: failed caching (%d)",
708 			    shr->shr_name, rc);
709 		}
710 
711 		return (rc);
712 	}
713 
714 	if ((auto_shr = smb_avl_lookup(share_avl, shr)) != NULL) {
715 		if ((auto_shr->shr_flags & SMB_SHRF_AUTOHOME) == 0) {
716 			smb_avl_release(share_avl, auto_shr);
717 			return (EEXIST);
718 		}
719 
720 		mutex_enter(&auto_shr->shr_mutex);
721 		auto_shr->shr_autocnt++;
722 		mutex_exit(&auto_shr->shr_mutex);
723 		smb_avl_release(share_avl, auto_shr);
724 		return (0);
725 	}
726 
727 	if ((rc = smb_server_sharevp(sv, shr->shr_path, &vp)) != 0) {
728 		cmn_err(CE_WARN, "export[%s(%s)]: failed obtaining vnode (%d)",
729 		    shr->shr_name, shr->shr_path, rc);
730 		return (rc);
731 	}
732 
733 	if ((rc = smb_vfs_hold(&sv->sv_export, vp->v_vfsp)) == 0) {
734 		if ((rc = smb_avl_add(share_avl, shr)) != 0) {
735 			cmn_err(CE_WARN, "export[%s]: failed caching (%d)",
736 			    shr->shr_name, rc);
737 			smb_vfs_rele(&sv->sv_export, vp->v_vfsp);
738 		}
739 	} else {
740 		cmn_err(CE_WARN, "export[%s(%s)]: failed holding VFS (%d)",
741 		    shr->shr_name, shr->shr_path, rc);
742 	}
743 
744 	VN_RELE(vp);
745 	return (rc);
746 }
747 
748 /*
749  * Removes the share specified by 'shrname' from the AVL
750  * tree of the given server if it's there.
751  *
752  * If the share is an Autohome share, the autohome count
753  * is decremented and the share is only removed if the
754  * count goes to zero.
755  *
756  * If the share is a disk share, the hold on the corresponding
757  * file system is released before removing the share from
758  * the AVL tree.
759  */
760 static int
761 smb_kshare_unexport(smb_server_t *sv, const char *shrname)
762 {
763 	smb_avl_t	*share_avl;
764 	smb_kshare_t	key;
765 	smb_kshare_t	*shr;
766 	vnode_t		*vp;
767 	int		rc;
768 	boolean_t	auto_unexport;
769 
770 	share_avl = &sv->sv_export.e_share_avl;
771 
772 	key.shr_name = (char *)shrname;
773 	if ((shr = smb_avl_lookup(share_avl, &key)) == NULL)
774 		return (ENOENT);
775 
776 	if ((shr->shr_flags & SMB_SHRF_AUTOHOME) != 0) {
777 		mutex_enter(&shr->shr_mutex);
778 		shr->shr_autocnt--;
779 		auto_unexport = (shr->shr_autocnt == 0);
780 		mutex_exit(&shr->shr_mutex);
781 		if (!auto_unexport) {
782 			smb_avl_release(share_avl, shr);
783 			return (0);
784 		}
785 	}
786 
787 	if (STYPE_ISDSK(shr->shr_type)) {
788 		if ((rc = smb_server_sharevp(sv, shr->shr_path, &vp)) != 0) {
789 			smb_avl_release(share_avl, shr);
790 			cmn_err(CE_WARN, "unexport[%s]: failed obtaining vnode"
791 			    " (%d)", shrname, rc);
792 			return (rc);
793 		}
794 
795 		smb_vfs_rele(&sv->sv_export, vp->v_vfsp);
796 		VN_RELE(vp);
797 	}
798 
799 	smb_avl_remove(share_avl, shr);
800 	smb_avl_release(share_avl, shr);
801 
802 	return (0);
803 }
804 
805 /*
806  * Exports IPC$ or Admin shares
807  */
808 static int
809 smb_kshare_export_trans(smb_server_t *sv, char *name, char *path, char *cmnt)
810 {
811 	smb_kshare_t *shr;
812 
813 	ASSERT(name);
814 	ASSERT(path);
815 
816 	shr = kmem_cache_alloc(smb_kshare_cache_share, KM_SLEEP);
817 	bzero(shr, sizeof (smb_kshare_t));
818 
819 	shr->shr_magic = SMB_SHARE_MAGIC;
820 	shr->shr_refcnt = 1;
821 	shr->shr_flags = SMB_SHRF_TRANS | smb_kshare_is_admin(shr->shr_name);
822 	if (strcasecmp(name, "IPC$") == 0)
823 		shr->shr_type = STYPE_IPC;
824 	else
825 		shr->shr_type = STYPE_DISKTREE;
826 
827 	shr->shr_type |= smb_kshare_is_special(shr->shr_name);
828 
829 	shr->shr_name = smb_mem_strdup(name);
830 	if (path)
831 		shr->shr_path = smb_mem_strdup(path);
832 	if (cmnt)
833 		shr->shr_cmnt = smb_mem_strdup(cmnt);
834 	shr->shr_oemname = smb_kshare_oemname(name);
835 
836 	return (smb_kshare_export(sv, shr));
837 }
838 
839 /*
840  * Decodes share information in an nvlist format into a smb_kshare_t
841  * structure.
842  *
843  * This is a temporary function and will be replaced by functions
844  * provided by libsharev2 code after it's available.
845  */
846 static smb_kshare_t *
847 smb_kshare_decode(nvlist_t *share)
848 {
849 	smb_kshare_t tmp;
850 	smb_kshare_t *shr;
851 	nvlist_t *smb;
852 	char *csc_name = NULL;
853 	int rc;
854 
855 	ASSERT(share);
856 
857 	bzero(&tmp, sizeof (smb_kshare_t));
858 
859 	rc = nvlist_lookup_string(share, "name", &tmp.shr_name);
860 	rc |= nvlist_lookup_string(share, "path", &tmp.shr_path);
861 	(void) nvlist_lookup_string(share, "desc", &tmp.shr_cmnt);
862 
863 	ASSERT(tmp.shr_name && tmp.shr_path);
864 
865 	rc |= nvlist_lookup_nvlist(share, "smb", &smb);
866 	if (rc != 0) {
867 		cmn_err(CE_WARN, "kshare: failed looking up SMB properties"
868 		    " (%d)", rc);
869 		return (NULL);
870 	}
871 
872 	rc = nvlist_lookup_uint32(smb, "type", &tmp.shr_type);
873 	if (rc != 0) {
874 		cmn_err(CE_WARN, "kshare[%s]: failed getting the share type"
875 		    " (%d)", tmp.shr_name, rc);
876 		return (NULL);
877 	}
878 
879 	(void) nvlist_lookup_string(smb, SHOPT_AD_CONTAINER,
880 	    &tmp.shr_container);
881 	(void) nvlist_lookup_string(smb, SHOPT_NONE, &tmp.shr_access_none);
882 	(void) nvlist_lookup_string(smb, SHOPT_RO, &tmp.shr_access_ro);
883 	(void) nvlist_lookup_string(smb, SHOPT_RW, &tmp.shr_access_rw);
884 
885 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_ABE, SMB_SHRF_ABE);
886 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_CATIA,
887 	    SMB_SHRF_CATIA);
888 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_GUEST,
889 	    SMB_SHRF_GUEST_OK);
890 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_DFSROOT,
891 	    SMB_SHRF_DFSROOT);
892 	tmp.shr_flags |= smb_kshare_decode_bool(smb, "Autohome",
893 	    SMB_SHRF_AUTOHOME);
894 
895 	if ((tmp.shr_flags & SMB_SHRF_AUTOHOME) == SMB_SHRF_AUTOHOME) {
896 		rc = nvlist_lookup_uint32(smb, "uid", &tmp.shr_uid);
897 		rc |= nvlist_lookup_uint32(smb, "gid", &tmp.shr_gid);
898 		if (rc != 0) {
899 			cmn_err(CE_WARN, "kshare: failed looking up uid/gid"
900 			    " (%d)", rc);
901 			return (NULL);
902 		}
903 	}
904 
905 	(void) nvlist_lookup_string(smb, SHOPT_CSC, &csc_name);
906 	smb_kshare_csc_flags(&tmp, csc_name);
907 
908 	shr = kmem_cache_alloc(smb_kshare_cache_share, KM_SLEEP);
909 	bzero(shr, sizeof (smb_kshare_t));
910 
911 	shr->shr_magic = SMB_SHARE_MAGIC;
912 	shr->shr_refcnt = 1;
913 
914 	shr->shr_name = smb_mem_strdup(tmp.shr_name);
915 	shr->shr_path = smb_mem_strdup(tmp.shr_path);
916 	if (tmp.shr_cmnt)
917 		shr->shr_cmnt = smb_mem_strdup(tmp.shr_cmnt);
918 	if (tmp.shr_container)
919 		shr->shr_container = smb_mem_strdup(tmp.shr_container);
920 	if (tmp.shr_access_none)
921 		shr->shr_access_none = smb_mem_strdup(tmp.shr_access_none);
922 	if (tmp.shr_access_ro)
923 		shr->shr_access_ro = smb_mem_strdup(tmp.shr_access_ro);
924 	if (tmp.shr_access_rw)
925 		shr->shr_access_rw = smb_mem_strdup(tmp.shr_access_rw);
926 
927 	shr->shr_oemname = smb_kshare_oemname(shr->shr_name);
928 	shr->shr_flags = tmp.shr_flags | smb_kshare_is_admin(shr->shr_name);
929 	shr->shr_type = tmp.shr_type | smb_kshare_is_special(shr->shr_name);
930 
931 	shr->shr_uid = tmp.shr_uid;
932 	shr->shr_gid = tmp.shr_gid;
933 
934 	if ((shr->shr_flags & SMB_SHRF_AUTOHOME) == SMB_SHRF_AUTOHOME)
935 		shr->shr_autocnt = 1;
936 
937 	return (shr);
938 }
939 
940 #if 0
941 static void
942 smb_kshare_log(smb_kshare_t *shr)
943 {
944 	cmn_err(CE_NOTE, "Share info:");
945 	cmn_err(CE_NOTE, "\tname: %s", (shr->shr_name) ? shr->shr_name : "");
946 	cmn_err(CE_NOTE, "\tpath: %s", (shr->shr_path) ? shr->shr_path : "");
947 	cmn_err(CE_NOTE, "\tcmnt: (%s)",
948 	    (shr->shr_cmnt) ? shr->shr_cmnt : "NULL");
949 	cmn_err(CE_NOTE, "\toemname: (%s)",
950 	    (shr->shr_oemname) ? shr->shr_oemname : "NULL");
951 	cmn_err(CE_NOTE, "\tflags: %X", shr->shr_flags);
952 	cmn_err(CE_NOTE, "\ttype: %d", shr->shr_type);
953 }
954 #endif
955 
956 /*
957  * Compare function used by shares AVL
958  */
959 static int
960 smb_kshare_cmp(const void *p1, const void *p2)
961 {
962 	smb_kshare_t *shr1 = (smb_kshare_t *)p1;
963 	smb_kshare_t *shr2 = (smb_kshare_t *)p2;
964 	int rc;
965 
966 	ASSERT(shr1);
967 	ASSERT(shr1->shr_name);
968 
969 	ASSERT(shr2);
970 	ASSERT(shr2->shr_name);
971 
972 	rc = smb_strcasecmp(shr1->shr_name, shr2->shr_name, 0);
973 
974 	if (rc < 0)
975 		return (-1);
976 
977 	if (rc > 0)
978 		return (1);
979 
980 	return (0);
981 }
982 
983 /*
984  * This function is called by smb_avl routines whenever
985  * there is a need to take a hold on a share structure
986  * inside AVL
987  */
988 static void
989 smb_kshare_hold(const void *p)
990 {
991 	smb_kshare_t *shr = (smb_kshare_t *)p;
992 
993 	ASSERT(shr);
994 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
995 
996 	mutex_enter(&shr->shr_mutex);
997 	shr->shr_refcnt++;
998 	mutex_exit(&shr->shr_mutex);
999 }
1000 
1001 /*
1002  * This function must be called by smb_avl routines whenever
1003  * smb_kshare_hold is called and the hold needs to be released.
1004  */
1005 static boolean_t
1006 smb_kshare_rele(const void *p)
1007 {
1008 	smb_kshare_t *shr = (smb_kshare_t *)p;
1009 	boolean_t destroy;
1010 
1011 	ASSERT(shr);
1012 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
1013 
1014 	mutex_enter(&shr->shr_mutex);
1015 	ASSERT(shr->shr_refcnt > 0);
1016 	shr->shr_refcnt--;
1017 	destroy = (shr->shr_refcnt == 0);
1018 	mutex_exit(&shr->shr_mutex);
1019 
1020 	return (destroy);
1021 }
1022 
1023 /*
1024  * Frees all the memory allocated for the given
1025  * share structure. It also removes the structure
1026  * from the share cache.
1027  */
1028 static void
1029 smb_kshare_destroy(void *p)
1030 {
1031 	smb_kshare_t *shr = (smb_kshare_t *)p;
1032 
1033 	ASSERT(shr);
1034 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
1035 
1036 	smb_mem_free(shr->shr_name);
1037 	smb_mem_free(shr->shr_path);
1038 	smb_mem_free(shr->shr_cmnt);
1039 	smb_mem_free(shr->shr_container);
1040 	smb_mem_free(shr->shr_oemname);
1041 	smb_mem_free(shr->shr_access_none);
1042 	smb_mem_free(shr->shr_access_ro);
1043 	smb_mem_free(shr->shr_access_rw);
1044 
1045 	kmem_cache_free(smb_kshare_cache_share, shr);
1046 }
1047 
1048 
1049 /*
1050  * Generate an OEM name for the given share name.  If the name is
1051  * shorter than 13 bytes the oemname will be returned; otherwise NULL
1052  * is returned.
1053  */
1054 static char *
1055 smb_kshare_oemname(const char *shrname)
1056 {
1057 	smb_wchar_t *unibuf;
1058 	char *oem_name;
1059 	int length;
1060 
1061 	length = strlen(shrname) + 1;
1062 
1063 	oem_name = smb_mem_alloc(length);
1064 	unibuf = smb_mem_alloc(length * sizeof (smb_wchar_t));
1065 
1066 	(void) smb_mbstowcs(unibuf, shrname, length);
1067 
1068 	if (ucstooem(oem_name, unibuf, length, OEM_CPG_850) == 0)
1069 		(void) strcpy(oem_name, shrname);
1070 
1071 	smb_mem_free(unibuf);
1072 
1073 	if (strlen(oem_name) + 1 > SMB_SHARE_OEMNAME_MAX) {
1074 		smb_mem_free(oem_name);
1075 		return (NULL);
1076 	}
1077 
1078 	return (oem_name);
1079 }
1080 
1081 /*
1082  * Special share reserved for interprocess communication (IPC$) or
1083  * remote administration of the server (ADMIN$). Can also refer to
1084  * administrative shares such as C$, D$, E$, and so forth.
1085  */
1086 static int
1087 smb_kshare_is_special(const char *sharename)
1088 {
1089 	int len;
1090 
1091 	if (sharename == NULL)
1092 		return (0);
1093 
1094 	if ((len = strlen(sharename)) == 0)
1095 		return (0);
1096 
1097 	if (sharename[len - 1] == '$')
1098 		return (STYPE_SPECIAL);
1099 
1100 	return (0);
1101 }
1102 
1103 /*
1104  * Check whether or not this is a default admin share: C$, D$ etc.
1105  */
1106 static boolean_t
1107 smb_kshare_is_admin(const char *sharename)
1108 {
1109 	if (sharename == NULL)
1110 		return (B_FALSE);
1111 
1112 	if (strlen(sharename) == 2 &&
1113 	    smb_isalpha(sharename[0]) && sharename[1] == '$') {
1114 		return (B_TRUE);
1115 	}
1116 
1117 	return (B_FALSE);
1118 }
1119 
1120 /*
1121  * Decodes the given boolean share option.
1122  * If the option is present in the nvlist and it's value is true
1123  * returns the corresponding flag value, otherwise returns 0.
1124  */
1125 static uint32_t
1126 smb_kshare_decode_bool(nvlist_t *nvl, const char *propname, uint32_t flag)
1127 {
1128 	char *boolp;
1129 
1130 	if (nvlist_lookup_string(nvl, propname, &boolp) == 0)
1131 		if (strcasecmp(boolp, "true") == 0)
1132 			return (flag);
1133 
1134 	return (0);
1135 }
1136 
1137 /*
1138  * Map a client-side caching (CSC) option to the appropriate share
1139  * flag.  Only one option is allowed; an error will be logged if
1140  * multiple options have been specified.  We don't need to do anything
1141  * about multiple values here because the SRVSVC will not recognize
1142  * a value containing multiple flags and will return the default value.
1143  *
1144  * If the option value is not recognized, it will be ignored: invalid
1145  * values will typically be caught and rejected by sharemgr.
1146  */
1147 static void
1148 smb_kshare_csc_flags(smb_kshare_t *shr, const char *value)
1149 {
1150 	int i;
1151 	static struct {
1152 		char *value;
1153 		uint32_t flag;
1154 	} cscopt[] = {
1155 		{ "disabled",	SMB_SHRF_CSC_DISABLED },
1156 		{ "manual",	SMB_SHRF_CSC_MANUAL },
1157 		{ "auto",	SMB_SHRF_CSC_AUTO },
1158 		{ "vdo",	SMB_SHRF_CSC_VDO }
1159 	};
1160 
1161 	if (value == NULL)
1162 		return;
1163 
1164 	for (i = 0; i < (sizeof (cscopt) / sizeof (cscopt[0])); ++i) {
1165 		if (strcasecmp(value, cscopt[i].value) == 0) {
1166 			shr->shr_flags |= cscopt[i].flag;
1167 			break;
1168 		}
1169 	}
1170 
1171 	switch (shr->shr_flags & SMB_SHRF_CSC_MASK) {
1172 	case 0:
1173 	case SMB_SHRF_CSC_DISABLED:
1174 	case SMB_SHRF_CSC_MANUAL:
1175 	case SMB_SHRF_CSC_AUTO:
1176 	case SMB_SHRF_CSC_VDO:
1177 		break;
1178 
1179 	default:
1180 		cmn_err(CE_NOTE, "csc option conflict: 0x%08x",
1181 		    shr->shr_flags & SMB_SHRF_CSC_MASK);
1182 		break;
1183 	}
1184 }
1185 
1186 /*
1187  * This function processes the unexport event list and disconnects shares
1188  * asynchronously.  The function executes as a zone-specific thread.
1189  *
1190  * The server arg passed in is safe to use without a reference count, because
1191  * the server cannot be deleted until smb_thread_stop()/destroy() return,
1192  * which is also when the thread exits.
1193  */
1194 /*ARGSUSED*/
1195 static void
1196 smb_kshare_unexport_thread(smb_thread_t *thread, void *arg)
1197 {
1198 	smb_server_t	*sv = arg;
1199 	smb_unshare_t	*ux;
1200 
1201 	while (smb_thread_continue(thread)) {
1202 		while ((ux = list_head(&sv->sv_export.e_unexport_list.sl_list))
1203 		    != NULL) {
1204 			smb_slist_remove(&sv->sv_export.e_unexport_list, ux);
1205 			(void) smb_server_unshare(ux->us_sharename);
1206 			kmem_cache_free(smb_kshare_cache_unexport, ux);
1207 		}
1208 	}
1209 }
1210 
1211 static boolean_t
1212 smb_export_isready(smb_server_t *sv)
1213 {
1214 	boolean_t ready;
1215 
1216 	mutex_enter(&sv->sv_export.e_mutex);
1217 	ready = sv->sv_export.e_ready;
1218 	mutex_exit(&sv->sv_export.e_mutex);
1219 
1220 	return (ready);
1221 }
1222 
1223 #ifdef	_KERNEL
1224 /*
1225  * Return 0 upon success. Otherwise > 0
1226  */
1227 static int
1228 smb_kshare_chk_dsrv_status(int opcode, smb_dr_ctx_t *dec_ctx)
1229 {
1230 	int status = smb_dr_get_int32(dec_ctx);
1231 	int err;
1232 
1233 	switch (status) {
1234 	case SMB_SHARE_DSUCCESS:
1235 		return (0);
1236 
1237 	case SMB_SHARE_DERROR:
1238 		err = smb_dr_get_uint32(dec_ctx);
1239 		cmn_err(CE_WARN, "%d: Encountered door server error %d",
1240 		    opcode, err);
1241 		(void) smb_dr_decode_finish(dec_ctx);
1242 		return (err);
1243 	}
1244 
1245 	ASSERT(0);
1246 	return (EINVAL);
1247 }
1248 #endif	/* _KERNEL */
1249