xref: /illumos-gate/usr/src/uts/common/fs/smbsrv/smb_kshare.c (revision 59d65d3175825093531e82f44269d948ed510a00)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2017 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright 2017 Joyent, Inc.
26  * Copyright 2022 RackTop Systems.
27  */
28 
29 #include <smbsrv/smb_door.h>
30 #include <smbsrv/smb_ktypes.h>
31 #include <smbsrv/smb2_kproto.h>
32 #include <smbsrv/smb_kstat.h>
33 
34 typedef struct smb_unshare {
35 	list_node_t	us_lnd;
36 	char		us_sharename[MAXNAMELEN];
37 } smb_unshare_t;
38 
39 static kmem_cache_t	*smb_kshare_cache_share;
40 static kmem_cache_t	*smb_kshare_cache_unexport;
41 
42 static int smb_kshare_cmp(const void *, const void *);
43 static void smb_kshare_hold(const void *);
44 static boolean_t smb_kshare_rele(const void *);
45 static void smb_kshare_destroy(void *);
46 static char *smb_kshare_oemname(const char *);
47 static int smb_kshare_is_special(const char *);
48 static boolean_t smb_kshare_is_admin(const char *);
49 static smb_kshare_t *smb_kshare_decode(nvlist_t *);
50 static uint32_t smb_kshare_decode_bool(nvlist_t *, const char *, uint32_t);
51 static void smb_kshare_unexport_thread(smb_thread_t *, void *);
52 static int smb_kshare_export(smb_server_t *, smb_kshare_t *);
53 static int smb_kshare_unexport(smb_server_t *, const char *);
54 static int smb_kshare_export_trans(smb_server_t *, char *, char *, char *);
55 static void smb_kshare_csc_flags(smb_kshare_t *, const char *);
56 
57 static boolean_t smb_export_isready(smb_server_t *);
58 
59 #ifdef	_KERNEL
60 static int smb_kshare_chk_dsrv_status(int, smb_dr_ctx_t *);
61 #endif	/* _KERNEL */
62 
63 static const smb_avl_nops_t smb_kshare_avlops = {
64 	smb_kshare_cmp,
65 	smb_kshare_hold,
66 	smb_kshare_rele,
67 	smb_kshare_destroy
68 };
69 
70 #ifdef	_KERNEL
71 /*
72  * This function is not MultiThread safe. The caller has to make sure only one
73  * thread calls this function.
74  */
75 door_handle_t
76 smb_kshare_door_init(int door_id)
77 {
78 	return (door_ki_lookup(door_id));
79 }
80 
81 /*
82  * This function is not MultiThread safe. The caller has to make sure only one
83  * thread calls this function.
84  */
85 void
86 smb_kshare_door_fini(door_handle_t dhdl)
87 {
88 	if (dhdl)
89 		door_ki_rele(dhdl);
90 }
91 
92 /*
93  * This is a special interface that will be utilized by ZFS to cause
94  * a share to be added/removed
95  *
96  * arg is either a smb_share_t or share_name from userspace.
97  * It will need to be copied into the kernel.   It is smb_share_t
98  * for add operations and share_name for delete operations.
99  */
100 int
101 smb_kshare_upcall(door_handle_t dhdl, void *arg, boolean_t add_share)
102 {
103 	door_arg_t	doorarg = { 0 };
104 	char		*buf = NULL;
105 	char		*str = NULL;
106 	int		error;
107 	int		rc;
108 	unsigned int	used;
109 	smb_dr_ctx_t	*dec_ctx;
110 	smb_dr_ctx_t	*enc_ctx;
111 	smb_share_t	*lmshare = NULL;
112 	int		opcode;
113 
114 	opcode = (add_share) ? SMB_SHROP_ADD : SMB_SHROP_DELETE;
115 
116 	buf = kmem_alloc(SMB_SHARE_DSIZE, KM_SLEEP);
117 	enc_ctx = smb_dr_encode_start(buf, SMB_SHARE_DSIZE);
118 	smb_dr_put_uint32(enc_ctx, opcode);
119 
120 	switch (opcode) {
121 	case SMB_SHROP_ADD:
122 		lmshare = kmem_alloc(sizeof (smb_share_t), KM_SLEEP);
123 		error = xcopyin(arg, lmshare, sizeof (smb_share_t));
124 		if (error != 0) {
125 			kmem_free(lmshare, sizeof (smb_share_t));
126 			kmem_free(buf, SMB_SHARE_DSIZE);
127 			return (error);
128 		}
129 		smb_dr_put_share(enc_ctx, lmshare);
130 		break;
131 
132 	case SMB_SHROP_DELETE:
133 		str = kmem_alloc(MAXPATHLEN, KM_SLEEP);
134 		error = copyinstr(arg, str, MAXPATHLEN, NULL);
135 		if (error != 0) {
136 			kmem_free(str, MAXPATHLEN);
137 			kmem_free(buf, SMB_SHARE_DSIZE);
138 			return (error);
139 		}
140 		smb_dr_put_string(enc_ctx, str);
141 		kmem_free(str, MAXPATHLEN);
142 		break;
143 	}
144 
145 	if ((error = smb_dr_encode_finish(enc_ctx, &used)) != 0) {
146 		kmem_free(buf, SMB_SHARE_DSIZE);
147 		if (lmshare)
148 			kmem_free(lmshare, sizeof (smb_share_t));
149 		return (NERR_InternalError);
150 	}
151 
152 	doorarg.data_ptr = buf;
153 	doorarg.data_size = used;
154 	doorarg.rbuf = buf;
155 	doorarg.rsize = SMB_SHARE_DSIZE;
156 
157 	error = door_ki_upcall_limited(dhdl, &doorarg, NULL, SIZE_MAX, 0);
158 
159 	if (error) {
160 		kmem_free(buf, SMB_SHARE_DSIZE);
161 		if (lmshare)
162 			kmem_free(lmshare, sizeof (smb_share_t));
163 		return (error);
164 	}
165 
166 	dec_ctx = smb_dr_decode_start(doorarg.data_ptr, doorarg.data_size);
167 	if (smb_kshare_chk_dsrv_status(opcode, dec_ctx) != 0) {
168 		kmem_free(buf, SMB_SHARE_DSIZE);
169 		if (lmshare)
170 			kmem_free(lmshare, sizeof (smb_share_t));
171 		return (NERR_InternalError);
172 	}
173 
174 	rc = smb_dr_get_uint32(dec_ctx);
175 	if (opcode == SMB_SHROP_ADD)
176 		smb_dr_get_share(dec_ctx, lmshare);
177 
178 	if (smb_dr_decode_finish(dec_ctx))
179 		rc = NERR_InternalError;
180 
181 	kmem_free(buf, SMB_SHARE_DSIZE);
182 	if (lmshare)
183 		kmem_free(lmshare, sizeof (smb_share_t));
184 
185 	return ((rc == NERR_DuplicateShare && add_share) ? 0 : rc);
186 }
187 #endif	/* _KERNEL */
188 
189 /*
190  * Executes map and unmap command for shares.
191  */
192 int
193 smb_kshare_exec(smb_server_t *sv, smb_shr_execinfo_t *execinfo)
194 {
195 	int exec_rc = 0;
196 
197 	(void) smb_kdoor_upcall(sv, SMB_DR_SHR_EXEC,
198 	    execinfo, smb_shr_execinfo_xdr, &exec_rc, xdr_int);
199 
200 	return (exec_rc);
201 }
202 
203 /*
204  * Obtains any host access restriction on the specified
205  * share for the given host (ipaddr) by calling smbd
206  */
207 uint32_t
208 smb_kshare_hostaccess(smb_kshare_t *shr, smb_session_t *session)
209 {
210 	smb_shr_hostaccess_query_t req;
211 	smb_inaddr_t *ipaddr = &session->ipaddr;
212 	uint32_t host_access = SMB_SHRF_ACC_OPEN;
213 	uint32_t flag = SMB_SHRF_ACC_OPEN;
214 	uint32_t access;
215 
216 	if (smb_inet_iszero(ipaddr))
217 		return (ACE_ALL_PERMS);
218 
219 	if ((shr->shr_access_none == NULL || *shr->shr_access_none == '\0') &&
220 	    (shr->shr_access_ro == NULL || *shr->shr_access_ro == '\0') &&
221 	    (shr->shr_access_rw == NULL || *shr->shr_access_rw == '\0'))
222 		return (ACE_ALL_PERMS);
223 
224 	if (shr->shr_access_none != NULL)
225 		flag |= SMB_SHRF_ACC_NONE;
226 	if (shr->shr_access_ro != NULL)
227 		flag |= SMB_SHRF_ACC_RO;
228 	if (shr->shr_access_rw != NULL)
229 		flag |= SMB_SHRF_ACC_RW;
230 
231 	req.shq_none = shr->shr_access_none;
232 	req.shq_ro = shr->shr_access_ro;
233 	req.shq_rw = shr->shr_access_rw;
234 	req.shq_flag = flag;
235 	req.shq_ipaddr = *ipaddr;
236 
237 	(void) smb_kdoor_upcall(session->s_server, SMB_DR_SHR_HOSTACCESS,
238 	    &req, smb_shr_hostaccess_query_xdr, &host_access, xdr_uint32_t);
239 
240 	switch (host_access) {
241 	case SMB_SHRF_ACC_RO:
242 		access = ACE_ALL_PERMS & ~ACE_ALL_WRITE_PERMS;
243 		break;
244 	case SMB_SHRF_ACC_OPEN:
245 	case SMB_SHRF_ACC_RW:
246 		access = ACE_ALL_PERMS;
247 		break;
248 	case SMB_SHRF_ACC_NONE:
249 	default:
250 		access = 0;
251 	}
252 
253 	return (access);
254 }
255 
256 /*
257  * This function is called when smb_server_t is
258  * created which means smb/service is ready for
259  * exporting SMB shares
260  */
261 void
262 smb_export_start(smb_server_t *sv)
263 {
264 	mutex_enter(&sv->sv_export.e_mutex);
265 	if (sv->sv_export.e_ready) {
266 		mutex_exit(&sv->sv_export.e_mutex);
267 		return;
268 	}
269 
270 	sv->sv_export.e_ready = B_TRUE;
271 	mutex_exit(&sv->sv_export.e_mutex);
272 
273 	smb_avl_create(&sv->sv_export.e_share_avl, sizeof (smb_kshare_t),
274 	    offsetof(smb_kshare_t, shr_link), &smb_kshare_avlops);
275 
276 	(void) smb_kshare_export_trans(sv, "IPC$", "IPC$", "Remote IPC");
277 	(void) smb_kshare_export_trans(sv, "c$", SMB_CVOL, "Default Share");
278 	(void) smb_kshare_export_trans(sv, "vss$", SMB_VSS, "VSS");
279 }
280 
281 /*
282  * This function is called when smb_server_t goes
283  * away which means SMB shares should not be made
284  * available to clients
285  */
286 void
287 smb_export_stop(smb_server_t *sv)
288 {
289 	mutex_enter(&sv->sv_export.e_mutex);
290 	if (!sv->sv_export.e_ready) {
291 		mutex_exit(&sv->sv_export.e_mutex);
292 		return;
293 	}
294 	sv->sv_export.e_ready = B_FALSE;
295 	mutex_exit(&sv->sv_export.e_mutex);
296 
297 	smb_avl_destroy(&sv->sv_export.e_share_avl);
298 }
299 
300 void
301 smb_kshare_g_init(void)
302 {
303 	smb_kshare_cache_share = kmem_cache_create("smb_share_cache",
304 	    sizeof (smb_kshare_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
305 
306 	smb_kshare_cache_unexport = kmem_cache_create("smb_unexport_cache",
307 	    sizeof (smb_unshare_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
308 }
309 
310 void
311 smb_kshare_init(smb_server_t *sv)
312 {
313 
314 	smb_slist_constructor(&sv->sv_export.e_unexport_list,
315 	    sizeof (smb_unshare_t), offsetof(smb_unshare_t, us_lnd));
316 }
317 
318 int
319 smb_kshare_start(smb_server_t *sv)
320 {
321 	smb_thread_init(&sv->sv_export.e_unexport_thread, "smb_kshare_unexport",
322 	    smb_kshare_unexport_thread, sv, smbsrv_base_pri);
323 
324 	return (smb_thread_start(&sv->sv_export.e_unexport_thread));
325 }
326 
327 void
328 smb_kshare_stop(smb_server_t *sv)
329 {
330 	smb_thread_stop(&sv->sv_export.e_unexport_thread);
331 	smb_thread_destroy(&sv->sv_export.e_unexport_thread);
332 }
333 
334 void
335 smb_kshare_fini(smb_server_t *sv)
336 {
337 	smb_unshare_t *ux;
338 
339 	while ((ux = list_head(&sv->sv_export.e_unexport_list.sl_list))
340 	    != NULL) {
341 		smb_slist_remove(&sv->sv_export.e_unexport_list, ux);
342 		kmem_cache_free(smb_kshare_cache_unexport, ux);
343 	}
344 	smb_slist_destructor(&sv->sv_export.e_unexport_list);
345 }
346 
347 void
348 smb_kshare_g_fini(void)
349 {
350 	kmem_cache_destroy(smb_kshare_cache_unexport);
351 	kmem_cache_destroy(smb_kshare_cache_share);
352 }
353 
354 /*
355  * A list of shares in nvlist format can be sent down
356  * from userspace thourgh the IOCTL interface. The nvlist
357  * is unpacked here and all the shares in the list will
358  * be exported.
359  */
360 int
361 smb_kshare_export_list(smb_ioc_share_t *ioc)
362 {
363 	smb_server_t	*sv = NULL;
364 	nvlist_t	*shrlist = NULL;
365 	nvlist_t	 *share;
366 	nvpair_t	 *nvp;
367 	smb_kshare_t	 *shr;
368 	char		*shrname;
369 	int		rc;
370 
371 	if ((rc = smb_server_lookup(&sv)) != 0)
372 		return (rc);
373 
374 	if (!smb_export_isready(sv)) {
375 		rc = ENOTACTIVE;
376 		goto out;
377 	}
378 
379 	/*
380 	 * Reality check that the nvlist's reported length doesn't exceed the
381 	 * ioctl's total length.  We then assume the nvlist_unpack() will
382 	 * sanity check the nvlist itself.
383 	 */
384 	if ((ioc->shrlen + offsetof(smb_ioc_share_t, shr)) > ioc->hdr.len) {
385 		rc = EINVAL;
386 		goto out;
387 	}
388 	rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, KM_SLEEP);
389 	if (rc != 0)
390 		goto out;
391 
392 	for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL;
393 	    nvp = nvlist_next_nvpair(shrlist, nvp)) {
394 
395 		/*
396 		 * Since this loop can run for a while we want to exit
397 		 * as soon as the server state is anything but RUNNING
398 		 * to allow shutdown to proceed.
399 		 */
400 		if (sv->sv_state != SMB_SERVER_STATE_RUNNING)
401 			goto out;
402 
403 		if (nvpair_type(nvp) != DATA_TYPE_NVLIST)
404 			continue;
405 
406 		shrname = nvpair_name(nvp);
407 		ASSERT(shrname);
408 
409 		if ((rc = nvpair_value_nvlist(nvp, &share)) != 0) {
410 			cmn_err(CE_WARN, "export[%s]: failed accessing",
411 			    shrname);
412 			continue;
413 		}
414 
415 		if ((shr = smb_kshare_decode(share)) == NULL) {
416 			cmn_err(CE_WARN, "export[%s]: failed decoding",
417 			    shrname);
418 			continue;
419 		}
420 
421 		/* smb_kshare_export consumes shr so it's not leaked */
422 		if ((rc = smb_kshare_export(sv, shr)) != 0) {
423 			smb_kshare_destroy(shr);
424 			continue;
425 		}
426 	}
427 	rc = 0;
428 
429 out:
430 	nvlist_free(shrlist);
431 	smb_server_release(sv);
432 	return (rc);
433 }
434 
435 /*
436  * This function is invoked when a share is disabled to disconnect trees
437  * and close files.  Cleaning up may involve VOP and/or VFS calls, which
438  * may conflict/deadlock with stuck threads if something is amiss with the
439  * file system.  Queueing the request for asynchronous processing allows the
440  * call to return immediately so that, if the unshare is being done in the
441  * context of a forced unmount, the forced unmount will always be able to
442  * proceed (unblocking stuck I/O and eventually allowing all blocked unshare
443  * processes to complete).
444  *
445  * The path lookup to find the root vnode of the VFS in question and the
446  * release of this vnode are done synchronously prior to any associated
447  * unmount.  Doing these asynchronous to an associated unmount could run
448  * the risk of a spurious EBUSY for a standard unmount or an EIO during
449  * the path lookup due to a forced unmount finishing first.
450  */
451 int
452 smb_kshare_unexport_list(smb_ioc_share_t *ioc)
453 {
454 	smb_server_t	*sv = NULL;
455 	smb_unshare_t	*ux;
456 	nvlist_t	*shrlist = NULL;
457 	nvpair_t	*nvp;
458 	boolean_t	unexport = B_FALSE;
459 	char		*shrname;
460 	int		rc;
461 
462 	if ((rc = smb_server_lookup(&sv)) != 0)
463 		return (rc);
464 
465 	/*
466 	 * Reality check that the nvlist's reported length doesn't exceed the
467 	 * ioctl's total length.  We then assume the nvlist_unpack() will
468 	 * sanity check the nvlist itself.
469 	 */
470 	if ((ioc->shrlen + offsetof(smb_ioc_share_t, shr)) > ioc->hdr.len) {
471 		rc = EINVAL;
472 		goto out;
473 	}
474 	if ((rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, 0)) != 0)
475 		goto out;
476 
477 	for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL;
478 	    nvp = nvlist_next_nvpair(shrlist, nvp)) {
479 		if (nvpair_type(nvp) != DATA_TYPE_NVLIST)
480 			continue;
481 
482 		shrname = nvpair_name(nvp);
483 		ASSERT(shrname);
484 
485 		if ((rc = smb_kshare_unexport(sv, shrname)) != 0)
486 			continue;
487 
488 		ux = kmem_cache_alloc(smb_kshare_cache_unexport, KM_SLEEP);
489 		(void) strlcpy(ux->us_sharename, shrname, MAXNAMELEN);
490 
491 		smb_slist_insert_tail(&sv->sv_export.e_unexport_list, ux);
492 		unexport = B_TRUE;
493 	}
494 
495 	if (unexport)
496 		smb_thread_signal(&sv->sv_export.e_unexport_thread);
497 	rc = 0;
498 
499 out:
500 	nvlist_free(shrlist);
501 	smb_server_release(sv);
502 	return (rc);
503 }
504 
505 /*
506  * Get properties (currently only shortname enablement)
507  * of specified share.
508  */
509 int
510 smb_kshare_info(smb_ioc_shareinfo_t *ioc)
511 {
512 	smb_server_t	*sv;
513 	int		rc;
514 
515 	if ((rc = smb_server_lookup(&sv)) == 0) {
516 		ioc->shortnames = sv->sv_cfg.skc_short_names;
517 		smb_server_release(sv);
518 	}
519 	return (rc);
520 }
521 
522 /*
523  * This function builds a response for a NetShareEnum RAP request.
524  * List of shares is scanned twice. In the first round the total number
525  * of shares which their OEM name is shorter than 13 chars (esi->es_ntotal)
526  * and also the number of shares that fit in the given buffer are calculated.
527  * In the second round the shares data are encoded in the buffer.
528  *
529  * The data associated with each share has two parts, a fixed size part and
530  * a variable size part which is share's comment. The outline of the response
531  * buffer is so that fixed part for all the shares will appear first and follows
532  * with the comments for all those shares and that's why the data cannot be
533  * encoded in one round without unnecessarily complicating the code.
534  */
535 void
536 smb_kshare_enum(smb_server_t *sv, smb_enumshare_info_t *esi)
537 {
538 	smb_avl_t *share_avl;
539 	smb_avl_cursor_t cursor;
540 	smb_kshare_t *shr;
541 	int remained;
542 	uint16_t infolen = 0;
543 	uint16_t cmntlen = 0;
544 	uint16_t sharelen;
545 	uint16_t clen;
546 	uint32_t cmnt_offs;
547 	smb_msgbuf_t info_mb;
548 	smb_msgbuf_t cmnt_mb;
549 	boolean_t autohome_added = B_FALSE;
550 
551 	if (!smb_export_isready(sv)) {
552 		esi->es_ntotal = esi->es_nsent = 0;
553 		esi->es_datasize = 0;
554 		return;
555 	}
556 
557 	esi->es_ntotal = esi->es_nsent = 0;
558 	remained = esi->es_bufsize;
559 	share_avl = &sv->sv_export.e_share_avl;
560 
561 	/* Do the necessary calculations in the first round */
562 	smb_avl_iterinit(share_avl, &cursor);
563 
564 	while ((shr = smb_avl_iterate(share_avl, &cursor)) != NULL) {
565 		if (shr->shr_oemname == NULL) {
566 			smb_avl_release(share_avl, shr);
567 			continue;
568 		}
569 
570 		if ((shr->shr_flags & SMB_SHRF_AUTOHOME) && !autohome_added) {
571 			if (esi->es_posix_uid == shr->shr_uid) {
572 				autohome_added = B_TRUE;
573 			} else {
574 				smb_avl_release(share_avl, shr);
575 				continue;
576 			}
577 		}
578 
579 		esi->es_ntotal++;
580 
581 		if (remained <= 0) {
582 			smb_avl_release(share_avl, shr);
583 			continue;
584 		}
585 
586 		clen = strlen(shr->shr_cmnt) + 1;
587 		sharelen = SHARE_INFO_1_SIZE + clen;
588 
589 		if (sharelen <= remained) {
590 			infolen += SHARE_INFO_1_SIZE;
591 			cmntlen += clen;
592 		}
593 
594 		remained -= sharelen;
595 		smb_avl_release(share_avl, shr);
596 	}
597 
598 	esi->es_datasize = infolen + cmntlen;
599 
600 	smb_msgbuf_init(&info_mb, (uint8_t *)esi->es_buf, infolen, 0);
601 	smb_msgbuf_init(&cmnt_mb, (uint8_t *)esi->es_buf + infolen, cmntlen, 0);
602 	cmnt_offs = infolen;
603 
604 	/* Encode the data in the second round */
605 	smb_avl_iterinit(share_avl, &cursor);
606 	autohome_added = B_FALSE;
607 
608 	while ((shr = smb_avl_iterate(share_avl, &cursor)) != NULL) {
609 		if (shr->shr_oemname == NULL) {
610 			smb_avl_release(share_avl, shr);
611 			continue;
612 		}
613 
614 		if ((shr->shr_flags & SMB_SHRF_AUTOHOME) && !autohome_added) {
615 			if (esi->es_posix_uid == shr->shr_uid) {
616 				autohome_added = B_TRUE;
617 			} else {
618 				smb_avl_release(share_avl, shr);
619 				continue;
620 			}
621 		}
622 
623 		if (smb_msgbuf_encode(&info_mb, "13c.wl",
624 		    shr->shr_oemname, shr->shr_type, cmnt_offs) < 0) {
625 			smb_avl_release(share_avl, shr);
626 			break;
627 		}
628 
629 		if (smb_msgbuf_encode(&cmnt_mb, "s", shr->shr_cmnt) < 0) {
630 			smb_avl_release(share_avl, shr);
631 			break;
632 		}
633 
634 		cmnt_offs += strlen(shr->shr_cmnt) + 1;
635 		esi->es_nsent++;
636 
637 		smb_avl_release(share_avl, shr);
638 	}
639 
640 	smb_msgbuf_term(&info_mb);
641 	smb_msgbuf_term(&cmnt_mb);
642 }
643 
644 /*
645  * Looks up the given share and returns a pointer
646  * to its definition if it's found. A hold on the
647  * object is taken before the pointer is returned
648  * in which case the caller MUST always call
649  * smb_kshare_release().
650  */
651 smb_kshare_t *
652 smb_kshare_lookup(smb_server_t *sv, const char *shrname)
653 {
654 	smb_kshare_t key;
655 	smb_kshare_t *shr;
656 
657 	ASSERT(shrname);
658 
659 	if (!smb_export_isready(sv))
660 		return (NULL);
661 
662 	key.shr_name = (char *)shrname;
663 	shr = smb_avl_lookup(&sv->sv_export.e_share_avl, &key);
664 	return (shr);
665 }
666 
667 /*
668  * Releases the hold taken on the specified share object
669  */
670 void
671 smb_kshare_release(smb_server_t *sv, smb_kshare_t *shr)
672 {
673 	ASSERT(shr);
674 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
675 
676 	smb_avl_release(&sv->sv_export.e_share_avl, shr);
677 }
678 
679 /*
680  * Add the given share in the specified server.
681  * If the share is a disk share, lookup the share path
682  * and hold the smb_node_t for the share root.
683  *
684  * If the share is an Autohome share and it is
685  * already in the AVL only a reference count for
686  * that share is incremented.
687  */
688 static int
689 smb_kshare_export(smb_server_t *sv, smb_kshare_t *shr)
690 {
691 	smb_avl_t	*share_avl;
692 	smb_kshare_t	*auto_shr;
693 	smb_node_t	*snode = NULL;
694 	int		rc = 0;
695 
696 	share_avl = &sv->sv_export.e_share_avl;
697 
698 	if (!STYPE_ISDSK(shr->shr_type)) {
699 		if ((rc = smb_avl_add(share_avl, shr)) != 0) {
700 			cmn_err(CE_WARN, "export[%s]: failed caching (%d)",
701 			    shr->shr_name, rc);
702 		}
703 
704 		return (rc);
705 	}
706 
707 	if ((auto_shr = smb_avl_lookup(share_avl, shr)) != NULL) {
708 		rc = EEXIST;
709 		if ((auto_shr->shr_flags & SMB_SHRF_AUTOHOME) != 0) {
710 			mutex_enter(&auto_shr->shr_mutex);
711 			auto_shr->shr_autocnt++;
712 			mutex_exit(&auto_shr->shr_mutex);
713 			rc = 0;
714 		}
715 		smb_avl_release(share_avl, auto_shr);
716 		return (rc);
717 	}
718 
719 	/*
720 	 * Get the root smb_node_t for this share, held.
721 	 * This hold is normally released during AVL destroy,
722 	 * via the element destructor:  smb_kshare_destroy
723 	 */
724 	rc = smb_server_share_lookup(sv, shr->shr_path, &snode);
725 	if (rc != 0) {
726 		cmn_err(CE_WARN, "export[%s(%s)]: lookup failed (%d)",
727 		    shr->shr_name, shr->shr_path, rc);
728 		return (rc);
729 	}
730 
731 	shr->shr_root_node = snode;
732 	if ((rc = smb_avl_add(share_avl, shr)) != 0) {
733 		cmn_err(CE_WARN, "export[%s]: failed caching (%d)",
734 		    shr->shr_name, rc);
735 		shr->shr_root_node = NULL;
736 		smb_node_release(snode);
737 		return (rc);
738 	}
739 
740 	/*
741 	 * For CA shares, find or create the CA handle dir,
742 	 * and (if restarted) import persistent handles.
743 	 */
744 	if ((shr->shr_flags & SMB_SHRF_CA) != 0) {
745 		rc = smb2_dh_new_ca_share(sv, shr);
746 		if (rc != 0) {
747 			/* Just make it a non-CA share. */
748 			mutex_enter(&shr->shr_mutex);
749 			shr->shr_flags &= ~SMB_SHRF_CA;
750 			mutex_exit(&shr->shr_mutex);
751 			rc = 0;
752 		}
753 	}
754 
755 	return (rc);
756 }
757 
758 /*
759  * Removes the share specified by 'shrname' from the AVL
760  * tree of the given server if it's there.
761  *
762  * If the share is an Autohome share, the autohome count
763  * is decremented and the share is only removed if the
764  * count goes to zero.
765  *
766  * If the share is a disk share, the hold on the corresponding
767  * file system is released before removing the share from
768  * the AVL tree.
769  */
770 static int
771 smb_kshare_unexport(smb_server_t *sv, const char *shrname)
772 {
773 	smb_avl_t	*share_avl;
774 	smb_kshare_t	key;
775 	smb_kshare_t	*shr;
776 	boolean_t	auto_unexport;
777 
778 	share_avl = &sv->sv_export.e_share_avl;
779 
780 	key.shr_name = (char *)shrname;
781 	if ((shr = smb_avl_lookup(share_avl, &key)) == NULL)
782 		return (ENOENT);
783 
784 	if ((shr->shr_flags & SMB_SHRF_AUTOHOME) != 0) {
785 		mutex_enter(&shr->shr_mutex);
786 		shr->shr_autocnt--;
787 		auto_unexport = (shr->shr_autocnt == 0);
788 		mutex_exit(&shr->shr_mutex);
789 		if (!auto_unexport) {
790 			smb_avl_release(share_avl, shr);
791 			return (0);
792 		}
793 	}
794 
795 	smb_avl_remove(share_avl, shr);
796 
797 	mutex_enter(&shr->shr_mutex);
798 	shr->shr_flags |= SMB_SHRF_REMOVED;
799 	mutex_exit(&shr->shr_mutex);
800 
801 	smb_avl_release(share_avl, shr);
802 
803 	return (0);
804 }
805 
806 /*
807  * Exports IPC$ or Admin shares
808  */
809 static int
810 smb_kshare_export_trans(smb_server_t *sv, char *name, char *path, char *cmnt)
811 {
812 	smb_kshare_t *shr;
813 
814 	ASSERT(name);
815 	ASSERT(path);
816 
817 	shr = kmem_cache_alloc(smb_kshare_cache_share, KM_SLEEP);
818 	bzero(shr, sizeof (smb_kshare_t));
819 
820 	shr->shr_magic = SMB_SHARE_MAGIC;
821 	shr->shr_refcnt = 1;
822 	shr->shr_flags = SMB_SHRF_TRANS | smb_kshare_is_admin(shr->shr_name);
823 	if (strcasecmp(name, "IPC$") == 0)
824 		shr->shr_type = STYPE_IPC;
825 	else
826 		shr->shr_type = STYPE_DISKTREE;
827 
828 	shr->shr_type |= smb_kshare_is_special(shr->shr_name);
829 
830 	shr->shr_name = smb_mem_strdup(name);
831 	if (path)
832 		shr->shr_path = smb_mem_strdup(path);
833 	if (cmnt)
834 		shr->shr_cmnt = smb_mem_strdup(cmnt);
835 	shr->shr_oemname = smb_kshare_oemname(name);
836 
837 	return (smb_kshare_export(sv, shr));
838 }
839 
840 /*
841  * Decodes share information in an nvlist format into a smb_kshare_t
842  * structure.
843  *
844  * This is a temporary function and will be replaced by functions
845  * provided by libsharev2 code after it's available.
846  */
847 static smb_kshare_t *
848 smb_kshare_decode(nvlist_t *share)
849 {
850 	smb_kshare_t tmp;
851 	smb_kshare_t *shr;
852 	nvlist_t *smb;
853 	char *csc_name = NULL, *strbuf = NULL;
854 	int rc;
855 
856 	ASSERT(share);
857 
858 	bzero(&tmp, sizeof (smb_kshare_t));
859 
860 	rc = nvlist_lookup_string(share, "name", &tmp.shr_name);
861 	rc |= nvlist_lookup_string(share, "path", &tmp.shr_path);
862 	(void) nvlist_lookup_string(share, "desc", &tmp.shr_cmnt);
863 
864 	ASSERT(tmp.shr_name && tmp.shr_path);
865 
866 	rc |= nvlist_lookup_nvlist(share, "smb", &smb);
867 	if (rc != 0) {
868 		cmn_err(CE_WARN, "kshare: failed looking up SMB properties"
869 		    " (%d)", rc);
870 		return (NULL);
871 	}
872 
873 	rc = nvlist_lookup_uint32(smb, "type", &tmp.shr_type);
874 	if (rc != 0) {
875 		cmn_err(CE_WARN, "kshare[%s]: failed getting the share type"
876 		    " (%d)", tmp.shr_name, rc);
877 		return (NULL);
878 	}
879 
880 	(void) nvlist_lookup_string(smb, SHOPT_AD_CONTAINER,
881 	    &tmp.shr_container);
882 	(void) nvlist_lookup_string(smb, SHOPT_NONE, &tmp.shr_access_none);
883 	(void) nvlist_lookup_string(smb, SHOPT_RO, &tmp.shr_access_ro);
884 	(void) nvlist_lookup_string(smb, SHOPT_RW, &tmp.shr_access_rw);
885 
886 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_ABE, SMB_SHRF_ABE);
887 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_CATIA,
888 	    SMB_SHRF_CATIA);
889 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_GUEST,
890 	    SMB_SHRF_GUEST_OK);
891 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_DFSROOT,
892 	    SMB_SHRF_DFSROOT);
893 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_QUOTAS,
894 	    SMB_SHRF_QUOTAS);
895 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_CA, SMB_SHRF_CA);
896 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_FSO, SMB_SHRF_FSO);
897 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_AUTOHOME,
898 	    SMB_SHRF_AUTOHOME);
899 
900 	if ((tmp.shr_flags & SMB_SHRF_AUTOHOME) == SMB_SHRF_AUTOHOME) {
901 		rc = nvlist_lookup_uint32(smb, "uid", &tmp.shr_uid);
902 		rc |= nvlist_lookup_uint32(smb, "gid", &tmp.shr_gid);
903 		if (rc != 0) {
904 			cmn_err(CE_WARN, "kshare: failed looking up uid/gid"
905 			    " (%d)", rc);
906 			return (NULL);
907 		}
908 	}
909 
910 	(void) nvlist_lookup_string(smb, SHOPT_ENCRYPT, &strbuf);
911 	smb_cfg_set_require(strbuf, &tmp.shr_encrypt);
912 
913 	(void) nvlist_lookup_string(smb, SHOPT_CSC, &csc_name);
914 	smb_kshare_csc_flags(&tmp, csc_name);
915 
916 	shr = kmem_cache_alloc(smb_kshare_cache_share, KM_SLEEP);
917 	bzero(shr, sizeof (smb_kshare_t));
918 
919 	shr->shr_magic = SMB_SHARE_MAGIC;
920 	shr->shr_refcnt = 1;
921 
922 	shr->shr_name = smb_mem_strdup(tmp.shr_name);
923 	shr->shr_path = smb_mem_strdup(tmp.shr_path);
924 	if (tmp.shr_cmnt)
925 		shr->shr_cmnt = smb_mem_strdup(tmp.shr_cmnt);
926 	if (tmp.shr_container)
927 		shr->shr_container = smb_mem_strdup(tmp.shr_container);
928 	if (tmp.shr_access_none)
929 		shr->shr_access_none = smb_mem_strdup(tmp.shr_access_none);
930 	if (tmp.shr_access_ro)
931 		shr->shr_access_ro = smb_mem_strdup(tmp.shr_access_ro);
932 	if (tmp.shr_access_rw)
933 		shr->shr_access_rw = smb_mem_strdup(tmp.shr_access_rw);
934 
935 	shr->shr_oemname = smb_kshare_oemname(shr->shr_name);
936 	shr->shr_flags = tmp.shr_flags | smb_kshare_is_admin(shr->shr_name);
937 	shr->shr_type = tmp.shr_type | smb_kshare_is_special(shr->shr_name);
938 	shr->shr_encrypt = tmp.shr_encrypt;
939 
940 	shr->shr_uid = tmp.shr_uid;
941 	shr->shr_gid = tmp.shr_gid;
942 
943 	if ((shr->shr_flags & SMB_SHRF_AUTOHOME) == SMB_SHRF_AUTOHOME)
944 		shr->shr_autocnt = 1;
945 
946 	return (shr);
947 }
948 
949 #if 0
950 static void
951 smb_kshare_log(smb_kshare_t *shr)
952 {
953 	cmn_err(CE_NOTE, "Share info:");
954 	cmn_err(CE_NOTE, "\tname: %s", (shr->shr_name) ? shr->shr_name : "");
955 	cmn_err(CE_NOTE, "\tpath: %s", (shr->shr_path) ? shr->shr_path : "");
956 	cmn_err(CE_NOTE, "\tcmnt: (%s)",
957 	    (shr->shr_cmnt) ? shr->shr_cmnt : "NULL");
958 	cmn_err(CE_NOTE, "\toemname: (%s)",
959 	    (shr->shr_oemname) ? shr->shr_oemname : "NULL");
960 	cmn_err(CE_NOTE, "\tflags: %X", shr->shr_flags);
961 	cmn_err(CE_NOTE, "\ttype: %d", shr->shr_type);
962 }
963 #endif
964 
965 /*
966  * Compare function used by shares AVL
967  */
968 static int
969 smb_kshare_cmp(const void *p1, const void *p2)
970 {
971 	smb_kshare_t *shr1 = (smb_kshare_t *)p1;
972 	smb_kshare_t *shr2 = (smb_kshare_t *)p2;
973 	int rc;
974 
975 	ASSERT(shr1);
976 	ASSERT(shr1->shr_name);
977 
978 	ASSERT(shr2);
979 	ASSERT(shr2->shr_name);
980 
981 	rc = smb_strcasecmp(shr1->shr_name, shr2->shr_name, 0);
982 
983 	if (rc < 0)
984 		return (-1);
985 
986 	if (rc > 0)
987 		return (1);
988 
989 	return (0);
990 }
991 
992 /*
993  * This function is called by smb_avl routines whenever
994  * there is a need to take a hold on a share structure
995  * inside AVL
996  */
997 static void
998 smb_kshare_hold(const void *p)
999 {
1000 	smb_kshare_t *shr = (smb_kshare_t *)p;
1001 
1002 	ASSERT(shr);
1003 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
1004 
1005 	mutex_enter(&shr->shr_mutex);
1006 	shr->shr_refcnt++;
1007 	mutex_exit(&shr->shr_mutex);
1008 }
1009 
1010 /*
1011  * This function must be called by smb_avl routines whenever
1012  * smb_kshare_hold is called and the hold needs to be released.
1013  */
1014 static boolean_t
1015 smb_kshare_rele(const void *p)
1016 {
1017 	smb_kshare_t *shr = (smb_kshare_t *)p;
1018 	boolean_t destroy;
1019 
1020 	ASSERT(shr);
1021 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
1022 
1023 	mutex_enter(&shr->shr_mutex);
1024 	ASSERT(shr->shr_refcnt > 0);
1025 	shr->shr_refcnt--;
1026 	destroy = (shr->shr_refcnt == 0);
1027 	mutex_exit(&shr->shr_mutex);
1028 
1029 	return (destroy);
1030 }
1031 
1032 /*
1033  * Frees all the memory allocated for the given
1034  * share structure. It also removes the structure
1035  * from the share cache.
1036  */
1037 static void
1038 smb_kshare_destroy(void *p)
1039 {
1040 	smb_kshare_t *shr = (smb_kshare_t *)p;
1041 
1042 	ASSERT(shr);
1043 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
1044 
1045 	if (shr->shr_ca_dir != NULL)
1046 		smb_node_release(shr->shr_ca_dir);
1047 	if (shr->shr_root_node)
1048 		smb_node_release(shr->shr_root_node);
1049 
1050 	smb_mem_free(shr->shr_name);
1051 	smb_mem_free(shr->shr_path);
1052 	smb_mem_free(shr->shr_cmnt);
1053 	smb_mem_free(shr->shr_container);
1054 	smb_mem_free(shr->shr_oemname);
1055 	smb_mem_free(shr->shr_access_none);
1056 	smb_mem_free(shr->shr_access_ro);
1057 	smb_mem_free(shr->shr_access_rw);
1058 
1059 	kmem_cache_free(smb_kshare_cache_share, shr);
1060 }
1061 
1062 
1063 /*
1064  * Generate an OEM name for the given share name.  If the name is
1065  * shorter than 13 bytes the oemname will be returned; otherwise NULL
1066  * is returned.
1067  */
1068 static char *
1069 smb_kshare_oemname(const char *shrname)
1070 {
1071 	smb_wchar_t *unibuf;
1072 	char *oem_name;
1073 	int length;
1074 
1075 	length = strlen(shrname) + 1;
1076 
1077 	oem_name = smb_mem_alloc(length);
1078 	unibuf = smb_mem_alloc(length * sizeof (smb_wchar_t));
1079 
1080 	(void) smb_mbstowcs(unibuf, shrname, length);
1081 
1082 	if (ucstooem(oem_name, unibuf, length, OEM_CPG_850) == 0)
1083 		(void) strcpy(oem_name, shrname);
1084 
1085 	smb_mem_free(unibuf);
1086 
1087 	if (strlen(oem_name) + 1 > SMB_SHARE_OEMNAME_MAX) {
1088 		smb_mem_free(oem_name);
1089 		return (NULL);
1090 	}
1091 
1092 	return (oem_name);
1093 }
1094 
1095 /*
1096  * Special share reserved for interprocess communication (IPC$) or
1097  * remote administration of the server (ADMIN$). Can also refer to
1098  * administrative shares such as C$, D$, E$, and so forth.
1099  */
1100 static int
1101 smb_kshare_is_special(const char *sharename)
1102 {
1103 	int len;
1104 
1105 	if (sharename == NULL)
1106 		return (0);
1107 
1108 	if ((len = strlen(sharename)) == 0)
1109 		return (0);
1110 
1111 	if (sharename[len - 1] == '$')
1112 		return (STYPE_SPECIAL);
1113 
1114 	return (0);
1115 }
1116 
1117 /*
1118  * Check whether or not this is a default admin share: C$, D$ etc.
1119  */
1120 static boolean_t
1121 smb_kshare_is_admin(const char *sharename)
1122 {
1123 	if (sharename == NULL)
1124 		return (B_FALSE);
1125 
1126 	if (strlen(sharename) == 2 &&
1127 	    smb_isalpha(sharename[0]) && sharename[1] == '$') {
1128 		return (B_TRUE);
1129 	}
1130 
1131 	return (B_FALSE);
1132 }
1133 
1134 /*
1135  * Decodes the given boolean share option.
1136  * If the option is present in the nvlist and it's value is true
1137  * returns the corresponding flag value, otherwise returns 0.
1138  */
1139 static uint32_t
1140 smb_kshare_decode_bool(nvlist_t *nvl, const char *propname, uint32_t flag)
1141 {
1142 	char *boolp;
1143 
1144 	if (nvlist_lookup_string(nvl, propname, &boolp) == 0)
1145 		if (strcasecmp(boolp, "true") == 0)
1146 			return (flag);
1147 
1148 	return (0);
1149 }
1150 
1151 /*
1152  * Map a client-side caching (CSC) option to the appropriate share
1153  * flag.  Only one option is allowed; an error will be logged if
1154  * multiple options have been specified.  We don't need to do anything
1155  * about multiple values here because the SRVSVC will not recognize
1156  * a value containing multiple flags and will return the default value.
1157  *
1158  * If the option value is not recognized, it will be ignored: invalid
1159  * values will typically be caught and rejected by sharemgr.
1160  */
1161 static void
1162 smb_kshare_csc_flags(smb_kshare_t *shr, const char *value)
1163 {
1164 	int i;
1165 	static struct {
1166 		char *value;
1167 		uint32_t flag;
1168 	} cscopt[] = {
1169 		{ "disabled",	SMB_SHRF_CSC_DISABLED },
1170 		{ "manual",	SMB_SHRF_CSC_MANUAL },
1171 		{ "auto",	SMB_SHRF_CSC_AUTO },
1172 		{ "vdo",	SMB_SHRF_CSC_VDO }
1173 	};
1174 
1175 	if (value == NULL)
1176 		return;
1177 
1178 	for (i = 0; i < (sizeof (cscopt) / sizeof (cscopt[0])); ++i) {
1179 		if (strcasecmp(value, cscopt[i].value) == 0) {
1180 			shr->shr_flags |= cscopt[i].flag;
1181 			break;
1182 		}
1183 	}
1184 
1185 	switch (shr->shr_flags & SMB_SHRF_CSC_MASK) {
1186 	case 0:
1187 	case SMB_SHRF_CSC_DISABLED:
1188 	case SMB_SHRF_CSC_MANUAL:
1189 	case SMB_SHRF_CSC_AUTO:
1190 	case SMB_SHRF_CSC_VDO:
1191 		break;
1192 
1193 	default:
1194 		cmn_err(CE_NOTE, "csc option conflict: 0x%08x",
1195 		    shr->shr_flags & SMB_SHRF_CSC_MASK);
1196 		break;
1197 	}
1198 }
1199 
1200 /*
1201  * This function processes the unexport event list and disconnects shares
1202  * asynchronously.  The function executes as a zone-specific thread.
1203  *
1204  * The server arg passed in is safe to use without a reference count, because
1205  * the server cannot be deleted until smb_thread_stop()/destroy() return,
1206  * which is also when the thread exits.
1207  */
1208 /*ARGSUSED*/
1209 static void
1210 smb_kshare_unexport_thread(smb_thread_t *thread, void *arg)
1211 {
1212 	smb_server_t	*sv = arg;
1213 	smb_unshare_t	*ux;
1214 
1215 	while (smb_thread_continue(thread)) {
1216 		while ((ux = list_head(&sv->sv_export.e_unexport_list.sl_list))
1217 		    != NULL) {
1218 			smb_slist_remove(&sv->sv_export.e_unexport_list, ux);
1219 			(void) smb_server_unshare(ux->us_sharename);
1220 			kmem_cache_free(smb_kshare_cache_unexport, ux);
1221 		}
1222 	}
1223 }
1224 
1225 static boolean_t
1226 smb_export_isready(smb_server_t *sv)
1227 {
1228 	boolean_t ready;
1229 
1230 	mutex_enter(&sv->sv_export.e_mutex);
1231 	ready = sv->sv_export.e_ready;
1232 	mutex_exit(&sv->sv_export.e_mutex);
1233 
1234 	return (ready);
1235 }
1236 
1237 #ifdef	_KERNEL
1238 /*
1239  * Return 0 upon success. Otherwise > 0
1240  */
1241 static int
1242 smb_kshare_chk_dsrv_status(int opcode, smb_dr_ctx_t *dec_ctx)
1243 {
1244 	int status = smb_dr_get_int32(dec_ctx);
1245 	int err;
1246 
1247 	switch (status) {
1248 	case SMB_SHARE_DSUCCESS:
1249 		return (0);
1250 
1251 	case SMB_SHARE_DERROR:
1252 		err = smb_dr_get_uint32(dec_ctx);
1253 		cmn_err(CE_WARN, "%d: Encountered door server error %d",
1254 		    opcode, err);
1255 		(void) smb_dr_decode_finish(dec_ctx);
1256 		return (err);
1257 	}
1258 
1259 	ASSERT(0);
1260 	return (EINVAL);
1261 }
1262 #endif	/* _KERNEL */
1263