xref: /titanic_41/usr/src/uts/common/fs/smbsrv/smb_kshare.c (revision b64bfe7dc77dc5c5561cdcd10c80b0b550701a24)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 #include <smbsrv/smb_door.h>
27 #include <smbsrv/smb_kproto.h>
28 #include <smbsrv/smb_ktypes.h>
29 
30 typedef struct smb_unshare {
31 	list_node_t	us_lnd;
32 	char		us_sharename[MAXNAMELEN];
33 } smb_unshare_t;
34 
35 static smb_export_t smb_export;
36 
37 static int smb_kshare_cmp(const void *, const void *);
38 static void smb_kshare_hold(const void *);
39 static boolean_t smb_kshare_rele(const void *);
40 static void smb_kshare_destroy(void *);
41 static char *smb_kshare_oemname(const char *);
42 static int smb_kshare_is_special(const char *);
43 static boolean_t smb_kshare_is_admin(const char *);
44 static smb_kshare_t *smb_kshare_decode(nvlist_t *);
45 static uint32_t smb_kshare_decode_bool(nvlist_t *, const char *, uint32_t);
46 static void smb_kshare_unexport_thread(smb_thread_t *, void *);
47 static int smb_kshare_export(smb_kshare_t *);
48 static int smb_kshare_unexport(const char *);
49 static int smb_kshare_export_trans(char *, char *, char *);
50 static void smb_kshare_csc_flags(smb_kshare_t *, const char *);
51 
52 static boolean_t smb_export_isready(void);
53 
54 static int smb_kshare_chk_dsrv_status(int, smb_dr_ctx_t *);
55 
56 static smb_avl_nops_t smb_kshare_avlops = {
57 	smb_kshare_cmp,
58 	smb_kshare_hold,
59 	smb_kshare_rele,
60 	smb_kshare_destroy
61 };
62 
63 /*
64  * This function is not MultiThread safe. The caller has to make sure only one
65  * thread calls this function.
66  */
67 door_handle_t
68 smb_kshare_door_init(int door_id)
69 {
70 	return (door_ki_lookup(door_id));
71 }
72 
73 /*
74  * This function is not MultiThread safe. The caller has to make sure only one
75  * thread calls this function.
76  */
77 void
78 smb_kshare_door_fini(door_handle_t dhdl)
79 {
80 	if (dhdl)
81 		door_ki_rele(dhdl);
82 }
83 
84 /*
85  * This is a special interface that will be utilized by ZFS to cause
86  * a share to be added/removed
87  *
88  * arg is either a smb_share_t or share_name from userspace.
89  * It will need to be copied into the kernel.   It is smb_share_t
90  * for add operations and share_name for delete operations.
91  */
92 int
93 smb_kshare_upcall(door_handle_t dhdl, void *arg, boolean_t add_share)
94 {
95 	door_arg_t	doorarg = { 0 };
96 	char		*buf = NULL;
97 	char		*str = NULL;
98 	int		error;
99 	int		rc;
100 	unsigned int	used;
101 	smb_dr_ctx_t	*dec_ctx;
102 	smb_dr_ctx_t	*enc_ctx;
103 	smb_share_t	*lmshare = NULL;
104 	int		opcode;
105 
106 	opcode = (add_share) ? SMB_SHROP_ADD : SMB_SHROP_DELETE;
107 
108 	buf = kmem_alloc(SMB_SHARE_DSIZE, KM_SLEEP);
109 	enc_ctx = smb_dr_encode_start(buf, SMB_SHARE_DSIZE);
110 	smb_dr_put_uint32(enc_ctx, opcode);
111 
112 	switch (opcode) {
113 	case SMB_SHROP_ADD:
114 		lmshare = kmem_alloc(sizeof (smb_share_t), KM_SLEEP);
115 		if (error = xcopyin(arg, lmshare, sizeof (smb_share_t))) {
116 			kmem_free(lmshare, sizeof (smb_share_t));
117 			kmem_free(buf, SMB_SHARE_DSIZE);
118 			return (error);
119 		}
120 		smb_dr_put_share(enc_ctx, lmshare);
121 		break;
122 
123 	case SMB_SHROP_DELETE:
124 		str = kmem_alloc(MAXPATHLEN, KM_SLEEP);
125 		if (error = copyinstr(arg, str, MAXPATHLEN, NULL)) {
126 			kmem_free(str, MAXPATHLEN);
127 			kmem_free(buf, SMB_SHARE_DSIZE);
128 			return (error);
129 		}
130 		smb_dr_put_string(enc_ctx, str);
131 		kmem_free(str, MAXPATHLEN);
132 		break;
133 	}
134 
135 	if ((error = smb_dr_encode_finish(enc_ctx, &used)) != 0) {
136 		kmem_free(buf, SMB_SHARE_DSIZE);
137 		if (lmshare)
138 			kmem_free(lmshare, sizeof (smb_share_t));
139 		return (NERR_InternalError);
140 	}
141 
142 	doorarg.data_ptr = buf;
143 	doorarg.data_size = used;
144 	doorarg.rbuf = buf;
145 	doorarg.rsize = SMB_SHARE_DSIZE;
146 
147 	error = door_ki_upcall_limited(dhdl, &doorarg, NULL, SIZE_MAX, 0);
148 
149 	if (error) {
150 		kmem_free(buf, SMB_SHARE_DSIZE);
151 		if (lmshare)
152 			kmem_free(lmshare, sizeof (smb_share_t));
153 		return (error);
154 	}
155 
156 	dec_ctx = smb_dr_decode_start(doorarg.data_ptr, doorarg.data_size);
157 	if (smb_kshare_chk_dsrv_status(opcode, dec_ctx) != 0) {
158 		kmem_free(buf, SMB_SHARE_DSIZE);
159 		if (lmshare)
160 			kmem_free(lmshare, sizeof (smb_share_t));
161 		return (NERR_InternalError);
162 	}
163 
164 	rc = smb_dr_get_uint32(dec_ctx);
165 	if (opcode == SMB_SHROP_ADD)
166 		smb_dr_get_share(dec_ctx, lmshare);
167 
168 	if (smb_dr_decode_finish(dec_ctx))
169 		rc = NERR_InternalError;
170 
171 	kmem_free(buf, SMB_SHARE_DSIZE);
172 	if (lmshare)
173 		kmem_free(lmshare, sizeof (smb_share_t));
174 
175 	return ((rc == NERR_DuplicateShare && add_share) ? 0 : rc);
176 }
177 
178 /*
179  * Executes map and unmap command for shares.
180  */
181 int
182 smb_kshare_exec(smb_shr_execinfo_t *execinfo)
183 {
184 	int exec_rc = 0;
185 
186 	(void) smb_kdoor_upcall(SMB_DR_SHR_EXEC,
187 	    execinfo, smb_shr_execinfo_xdr, &exec_rc, xdr_int);
188 
189 	return (exec_rc);
190 }
191 
192 /*
193  * Obtains any host access restriction on the specified
194  * share for the given host (ipaddr) by calling smbd
195  */
196 uint32_t
197 smb_kshare_hostaccess(smb_kshare_t *shr, smb_inaddr_t *ipaddr)
198 {
199 	smb_shr_hostaccess_query_t req;
200 	uint32_t host_access = SMB_SHRF_ACC_OPEN;
201 	uint32_t flag = SMB_SHRF_ACC_OPEN;
202 	uint32_t access;
203 
204 	if (smb_inet_iszero(ipaddr))
205 		return (ACE_ALL_PERMS);
206 
207 	if ((shr->shr_access_none == NULL || *shr->shr_access_none == '\0') &&
208 	    (shr->shr_access_ro == NULL || *shr->shr_access_ro == '\0') &&
209 	    (shr->shr_access_rw == NULL || *shr->shr_access_rw == '\0'))
210 		return (ACE_ALL_PERMS);
211 
212 	if (shr->shr_access_none != NULL)
213 		flag |= SMB_SHRF_ACC_NONE;
214 	if (shr->shr_access_ro != NULL)
215 		flag |= SMB_SHRF_ACC_RO;
216 	if (shr->shr_access_rw != NULL)
217 		flag |= SMB_SHRF_ACC_RW;
218 
219 	req.shq_none = shr->shr_access_none;
220 	req.shq_ro = shr->shr_access_ro;
221 	req.shq_rw = shr->shr_access_rw;
222 	req.shq_flag = flag;
223 	req.shq_ipaddr = *ipaddr;
224 
225 	(void) smb_kdoor_upcall(SMB_DR_SHR_HOSTACCESS,
226 	    &req, smb_shr_hostaccess_query_xdr, &host_access, xdr_uint32_t);
227 
228 	switch (host_access) {
229 	case SMB_SHRF_ACC_RO:
230 		access = ACE_ALL_PERMS & ~ACE_ALL_WRITE_PERMS;
231 		break;
232 	case SMB_SHRF_ACC_OPEN:
233 	case SMB_SHRF_ACC_RW:
234 		access = ACE_ALL_PERMS;
235 		break;
236 	case SMB_SHRF_ACC_NONE:
237 	default:
238 		access = 0;
239 	}
240 
241 	return (access);
242 }
243 
244 /*
245  * This function is called when smb_server_t is
246  * created which means smb/service is ready for
247  * exporting SMB shares
248  */
249 void
250 smb_export_start(void)
251 {
252 	mutex_enter(&smb_export.e_mutex);
253 	if (smb_export.e_ready) {
254 		mutex_exit(&smb_export.e_mutex);
255 		return;
256 	}
257 
258 	smb_export.e_ready = B_TRUE;
259 	mutex_exit(&smb_export.e_mutex);
260 
261 	smb_avl_create(&smb_export.e_share_avl, sizeof (smb_kshare_t),
262 	    offsetof(smb_kshare_t, shr_link), &smb_kshare_avlops);
263 
264 	(void) smb_kshare_export_trans("IPC$",	"IPC$", "Remote IPC");
265 	(void) smb_kshare_export_trans("c$",	SMB_CVOL, "Default Share");
266 	(void) smb_kshare_export_trans("vss$",	SMB_VSS, "VSS");
267 }
268 
269 /*
270  * This function is called when smb_server_t goes
271  * away which means SMB shares should not be made
272  * available to clients
273  */
274 void
275 smb_export_stop(void)
276 {
277 	mutex_enter(&smb_export.e_mutex);
278 	if (!smb_export.e_ready) {
279 		mutex_exit(&smb_export.e_mutex);
280 		return;
281 	}
282 	smb_export.e_ready = B_FALSE;
283 	mutex_exit(&smb_export.e_mutex);
284 
285 	smb_avl_destroy(&smb_export.e_share_avl);
286 	smb_vfs_rele_all(&smb_export);
287 }
288 
289 int
290 smb_kshare_init(void)
291 {
292 	int rc;
293 
294 	smb_export.e_cache_share = kmem_cache_create("smb_share_cache",
295 	    sizeof (smb_kshare_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
296 
297 	smb_export.e_cache_unexport = kmem_cache_create("smb_unexport_cache",
298 	    sizeof (smb_unshare_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
299 
300 	smb_export.e_cache_vfs = kmem_cache_create("smb_vfs_cache",
301 	    sizeof (smb_vfs_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
302 
303 	smb_llist_constructor(&smb_export.e_vfs_list, sizeof (smb_vfs_t),
304 	    offsetof(smb_vfs_t, sv_lnd));
305 
306 	smb_slist_constructor(&smb_export.e_unexport_list,
307 	    sizeof (smb_unshare_t), offsetof(smb_unshare_t, us_lnd));
308 
309 	smb_thread_init(&smb_export.e_unexport_thread, "smb_thread_unexport",
310 	    smb_kshare_unexport_thread, NULL, NULL, NULL);
311 
312 	if ((rc = smb_thread_start(&smb_export.e_unexport_thread)) != 0)
313 		return (rc);
314 
315 	return (0);
316 }
317 
318 void
319 smb_kshare_fini(void)
320 {
321 	smb_unshare_t *ux;
322 
323 	smb_thread_stop(&smb_export.e_unexport_thread);
324 	smb_thread_destroy(&smb_export.e_unexport_thread);
325 
326 	while ((ux = list_head(&smb_export.e_unexport_list.sl_list)) != NULL) {
327 		smb_slist_remove(&smb_export.e_unexport_list, ux);
328 		kmem_cache_free(smb_export.e_cache_unexport, ux);
329 	}
330 	smb_slist_destructor(&smb_export.e_unexport_list);
331 
332 	smb_vfs_rele_all(&smb_export);
333 
334 	smb_llist_destructor(&smb_export.e_vfs_list);
335 
336 	kmem_cache_destroy(smb_export.e_cache_unexport);
337 	kmem_cache_destroy(smb_export.e_cache_share);
338 	kmem_cache_destroy(smb_export.e_cache_vfs);
339 }
340 
341 /*
342  * A list of shares in nvlist format can be sent down
343  * from userspace thourgh the IOCTL interface. The nvlist
344  * is unpacked here and all the shares in the list will
345  * be exported.
346  */
347 int
348 smb_kshare_export_list(smb_ioc_share_t *ioc)
349 {
350 	nvlist_t	*shrlist;
351 	nvlist_t	 *share;
352 	nvpair_t	 *nvp;
353 	smb_kshare_t	 *shr;
354 	char		*shrname;
355 	int		rc;
356 
357 	if (!smb_export_isready())
358 		return (ENOTACTIVE);
359 
360 	if ((rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, KM_SLEEP))
361 	    != 0)
362 		return (rc);
363 
364 	for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL;
365 	    nvp = nvlist_next_nvpair(shrlist, nvp)) {
366 		if (nvpair_type(nvp) != DATA_TYPE_NVLIST)
367 			continue;
368 
369 		shrname = nvpair_name(nvp);
370 		ASSERT(shrname);
371 
372 		if ((rc = nvpair_value_nvlist(nvp, &share)) != 0) {
373 			cmn_err(CE_WARN, "export[%s]: failed accessing",
374 			    shrname);
375 			continue;
376 		}
377 
378 		if ((shr = smb_kshare_decode(share)) == NULL) {
379 			cmn_err(CE_WARN, "export[%s]: failed decoding",
380 			    shrname);
381 			continue;
382 		}
383 
384 		if ((rc = smb_kshare_export(shr)) != 0) {
385 			smb_kshare_destroy(shr);
386 			continue;
387 		}
388 	}
389 
390 	nvlist_free(shrlist);
391 
392 	return (0);
393 }
394 
395 /*
396  * This function is invoked when a share is disabled to disconnect trees
397  * and close files.  Cleaning up may involve VOP and/or VFS calls, which
398  * may conflict/deadlock with stuck threads if something is amiss with the
399  * file system.  Queueing the request for asynchronous processing allows the
400  * call to return immediately so that, if the unshare is being done in the
401  * context of a forced unmount, the forced unmount will always be able to
402  * proceed (unblocking stuck I/O and eventually allowing all blocked unshare
403  * processes to complete).
404  *
405  * The path lookup to find the root vnode of the VFS in question and the
406  * release of this vnode are done synchronously prior to any associated
407  * unmount.  Doing these asynchronous to an associated unmount could run
408  * the risk of a spurious EBUSY for a standard unmount or an EIO during
409  * the path lookup due to a forced unmount finishing first.
410  */
411 int
412 smb_kshare_unexport_list(smb_ioc_share_t *ioc)
413 {
414 	smb_unshare_t	*ux;
415 	nvlist_t	*shrlist;
416 	nvpair_t	*nvp;
417 	boolean_t	unexport = B_FALSE;
418 	char		*shrname;
419 	int		rc;
420 
421 	if ((rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, 0)) != 0)
422 		return (rc);
423 
424 	for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL;
425 	    nvp = nvlist_next_nvpair(shrlist, nvp)) {
426 		if (nvpair_type(nvp) != DATA_TYPE_NVLIST)
427 			continue;
428 
429 		shrname = nvpair_name(nvp);
430 		ASSERT(shrname);
431 
432 		if ((rc = smb_kshare_unexport(shrname)) != 0)
433 			continue;
434 
435 		ux = kmem_cache_alloc(smb_export.e_cache_unexport, KM_SLEEP);
436 		(void) strlcpy(ux->us_sharename, shrname, MAXNAMELEN);
437 
438 		smb_slist_insert_tail(&smb_export.e_unexport_list, ux);
439 		unexport = B_TRUE;
440 	}
441 
442 	nvlist_free(shrlist);
443 
444 	if (unexport)
445 		smb_thread_signal(&smb_export.e_unexport_thread);
446 
447 	return (0);
448 }
449 
450 /*
451  * This function builds a response for a NetShareEnum RAP request.
452  * List of shares is scanned twice. In the first round the total number
453  * of shares which their OEM name is shorter than 13 chars (esi->es_ntotal)
454  * and also the number of shares that fit in the given buffer are calculated.
455  * In the second round the shares data are encoded in the buffer.
456  *
457  * The data associated with each share has two parts, a fixed size part and
458  * a variable size part which is share's comment. The outline of the response
459  * buffer is so that fixed part for all the shares will appear first and follows
460  * with the comments for all those shares and that's why the data cannot be
461  * encoded in one round without unnecessarily complicating the code.
462  */
463 void
464 smb_kshare_enum(smb_enumshare_info_t *esi)
465 {
466 	smb_avl_t *share_avl;
467 	smb_avl_cursor_t cursor;
468 	smb_kshare_t *shr;
469 	int remained;
470 	uint16_t infolen = 0;
471 	uint16_t cmntlen = 0;
472 	uint16_t sharelen;
473 	uint16_t clen;
474 	uint32_t cmnt_offs;
475 	smb_msgbuf_t info_mb;
476 	smb_msgbuf_t cmnt_mb;
477 	boolean_t autohome_added = B_FALSE;
478 
479 	if (!smb_export_isready()) {
480 		esi->es_ntotal = esi->es_nsent = 0;
481 		esi->es_datasize = 0;
482 		return;
483 	}
484 
485 	esi->es_ntotal = esi->es_nsent = 0;
486 	remained = esi->es_bufsize;
487 	share_avl = &smb_export.e_share_avl;
488 
489 	/* Do the necessary calculations in the first round */
490 	smb_avl_iterinit(share_avl, &cursor);
491 
492 	while ((shr = smb_avl_iterate(share_avl, &cursor)) != NULL) {
493 		if (shr->shr_oemname == NULL) {
494 			smb_avl_release(share_avl, shr);
495 			continue;
496 		}
497 
498 		if ((shr->shr_flags & SMB_SHRF_AUTOHOME) && !autohome_added) {
499 			if (esi->es_posix_uid == shr->shr_uid) {
500 				autohome_added = B_TRUE;
501 			} else {
502 				smb_avl_release(share_avl, shr);
503 				continue;
504 			}
505 		}
506 
507 		esi->es_ntotal++;
508 
509 		if (remained <= 0) {
510 			smb_avl_release(share_avl, shr);
511 			continue;
512 		}
513 
514 		clen = strlen(shr->shr_cmnt) + 1;
515 		sharelen = SHARE_INFO_1_SIZE + clen;
516 
517 		if (sharelen <= remained) {
518 			infolen += SHARE_INFO_1_SIZE;
519 			cmntlen += clen;
520 		}
521 
522 		remained -= sharelen;
523 		smb_avl_release(share_avl, shr);
524 	}
525 
526 	esi->es_datasize = infolen + cmntlen;
527 
528 	smb_msgbuf_init(&info_mb, (uint8_t *)esi->es_buf, infolen, 0);
529 	smb_msgbuf_init(&cmnt_mb, (uint8_t *)esi->es_buf + infolen, cmntlen, 0);
530 	cmnt_offs = infolen;
531 
532 	/* Encode the data in the second round */
533 	smb_avl_iterinit(share_avl, &cursor);
534 	autohome_added = B_FALSE;
535 
536 	while ((shr = smb_avl_iterate(share_avl, &cursor)) != NULL) {
537 		if (shr->shr_oemname == NULL) {
538 			smb_avl_release(share_avl, shr);
539 			continue;
540 		}
541 
542 		if ((shr->shr_flags & SMB_SHRF_AUTOHOME) && !autohome_added) {
543 			if (esi->es_posix_uid == shr->shr_uid) {
544 				autohome_added = B_TRUE;
545 			} else {
546 				smb_avl_release(share_avl, shr);
547 				continue;
548 			}
549 		}
550 
551 		if (smb_msgbuf_encode(&info_mb, "13c.wl",
552 		    shr->shr_oemname, shr->shr_type, cmnt_offs) < 0) {
553 			smb_avl_release(share_avl, shr);
554 			break;
555 		}
556 
557 		if (smb_msgbuf_encode(&cmnt_mb, "s", shr->shr_cmnt) < 0) {
558 			smb_avl_release(share_avl, shr);
559 			break;
560 		}
561 
562 		cmnt_offs += strlen(shr->shr_cmnt) + 1;
563 		esi->es_nsent++;
564 
565 		smb_avl_release(share_avl, shr);
566 	}
567 
568 	smb_msgbuf_term(&info_mb);
569 	smb_msgbuf_term(&cmnt_mb);
570 }
571 
572 /*
573  * Looks up the given share and returns a pointer
574  * to its definition if it's found. A hold on the
575  * object is taken before the pointer is returned
576  * in which case the caller MUST always call
577  * smb_kshare_release().
578  */
579 smb_kshare_t *
580 smb_kshare_lookup(const char *shrname)
581 {
582 	smb_kshare_t key;
583 	smb_kshare_t *shr;
584 
585 	ASSERT(shrname);
586 
587 	if (!smb_export_isready())
588 		return (NULL);
589 
590 	key.shr_name = (char *)shrname;
591 	shr = smb_avl_lookup(&smb_export.e_share_avl, &key);
592 
593 	return (shr);
594 }
595 
596 /*
597  * Releases the hold taken on the specified share object
598  */
599 void
600 smb_kshare_release(smb_kshare_t *shr)
601 {
602 	ASSERT(shr);
603 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
604 
605 	smb_avl_release(&smb_export.e_share_avl, shr);
606 }
607 
608 
609 /*
610  * Add the given share in the specified server.
611  * If the share is a disk share, smb_vfs_hold() is
612  * invoked to ensure that there is a hold on the
613  * corresponding file system before the share is
614  * added to shares AVL.
615  *
616  * If the share is an Autohome share and it is
617  * already in the AVL only a reference count for
618  * that share is incremented.
619  */
620 static int
621 smb_kshare_export(smb_kshare_t *shr)
622 {
623 	smb_avl_t	*share_avl;
624 	smb_kshare_t	*auto_shr;
625 	vnode_t		*vp;
626 	int		rc = 0;
627 
628 	share_avl = &smb_export.e_share_avl;
629 
630 	if (!STYPE_ISDSK(shr->shr_type)) {
631 		if ((rc = smb_avl_add(share_avl, shr)) != 0) {
632 			cmn_err(CE_WARN, "export[%s]: failed caching (%d)",
633 			    shr->shr_name, rc);
634 		}
635 
636 		return (rc);
637 	}
638 
639 	if ((auto_shr = smb_avl_lookup(share_avl, shr)) != NULL) {
640 		if ((auto_shr->shr_flags & SMB_SHRF_AUTOHOME) == 0) {
641 			smb_avl_release(share_avl, auto_shr);
642 			return (EEXIST);
643 		}
644 
645 		mutex_enter(&auto_shr->shr_mutex);
646 		auto_shr->shr_autocnt++;
647 		mutex_exit(&auto_shr->shr_mutex);
648 		smb_avl_release(share_avl, auto_shr);
649 		return (0);
650 	}
651 
652 	if ((rc = smb_server_sharevp(shr->shr_path, &vp)) != 0) {
653 		cmn_err(CE_WARN, "export[%s(%s)]: failed obtaining vnode (%d)",
654 		    shr->shr_name, shr->shr_path, rc);
655 		return (rc);
656 	}
657 
658 	if ((rc = smb_vfs_hold(&smb_export, vp->v_vfsp)) == 0) {
659 		if ((rc = smb_avl_add(share_avl, shr)) != 0) {
660 			cmn_err(CE_WARN, "export[%s]: failed caching (%d)",
661 			    shr->shr_name, rc);
662 			smb_vfs_rele(&smb_export, vp->v_vfsp);
663 		}
664 	} else {
665 		cmn_err(CE_WARN, "export[%s(%s)]: failed holding VFS (%d)",
666 		    shr->shr_name, shr->shr_path, rc);
667 	}
668 
669 	VN_RELE(vp);
670 	return (rc);
671 }
672 
673 /*
674  * Removes the share specified by 'shrname' from the AVL
675  * tree of the given server if it's there.
676  *
677  * If the share is an Autohome share, the autohome count
678  * is decremented and the share is only removed if the
679  * count goes to zero.
680  *
681  * If the share is a disk share, the hold on the corresponding
682  * file system is released before removing the share from
683  * the AVL tree.
684  */
685 static int
686 smb_kshare_unexport(const char *shrname)
687 {
688 	smb_avl_t	*share_avl;
689 	smb_kshare_t	key;
690 	smb_kshare_t	*shr;
691 	vnode_t		*vp;
692 	int		rc;
693 	boolean_t	auto_unexport;
694 
695 	share_avl = &smb_export.e_share_avl;
696 
697 	key.shr_name = (char *)shrname;
698 	if ((shr = smb_avl_lookup(share_avl, &key)) == NULL)
699 		return (ENOENT);
700 
701 	if ((shr->shr_flags & SMB_SHRF_AUTOHOME) != 0) {
702 		mutex_enter(&shr->shr_mutex);
703 		shr->shr_autocnt--;
704 		auto_unexport = (shr->shr_autocnt == 0);
705 		mutex_exit(&shr->shr_mutex);
706 		if (!auto_unexport) {
707 			smb_avl_release(share_avl, shr);
708 			return (0);
709 		}
710 	}
711 
712 	if (STYPE_ISDSK(shr->shr_type)) {
713 		if ((rc = smb_server_sharevp(shr->shr_path, &vp)) != 0) {
714 			smb_avl_release(share_avl, shr);
715 			cmn_err(CE_WARN, "unexport[%s]: failed obtaining vnode"
716 			    " (%d)", shrname, rc);
717 			return (rc);
718 		}
719 
720 		smb_vfs_rele(&smb_export, vp->v_vfsp);
721 		VN_RELE(vp);
722 	}
723 
724 	smb_avl_remove(share_avl, shr);
725 	smb_avl_release(share_avl, shr);
726 
727 	return (0);
728 }
729 
730 /*
731  * Exports IPC$ or Admin shares
732  */
733 static int
734 smb_kshare_export_trans(char *name, char *path, char *cmnt)
735 {
736 	smb_kshare_t *shr;
737 
738 	ASSERT(name);
739 	ASSERT(path);
740 
741 	shr = kmem_cache_alloc(smb_export.e_cache_share, KM_SLEEP);
742 	bzero(shr, sizeof (smb_kshare_t));
743 
744 	shr->shr_magic = SMB_SHARE_MAGIC;
745 	shr->shr_cache = smb_export.e_cache_share;
746 	shr->shr_refcnt = 1;
747 	shr->shr_flags = SMB_SHRF_TRANS | smb_kshare_is_admin(shr->shr_name);
748 	if (strcasecmp(name, "IPC$") == 0)
749 		shr->shr_type = STYPE_IPC;
750 	else
751 		shr->shr_type = STYPE_DISKTREE;
752 
753 	shr->shr_type |= smb_kshare_is_special(shr->shr_name);
754 
755 	shr->shr_name = smb_mem_strdup(name);
756 	if (path)
757 		shr->shr_path = smb_mem_strdup(path);
758 	if (cmnt)
759 		shr->shr_cmnt = smb_mem_strdup(cmnt);
760 	shr->shr_oemname = smb_kshare_oemname(name);
761 
762 	return (smb_kshare_export(shr));
763 }
764 
765 /*
766  * Decodes share information in an nvlist format into a smb_kshare_t
767  * structure.
768  *
769  * This is a temporary function and will be replaced by functions
770  * provided by libsharev2 code after it's available.
771  */
772 static smb_kshare_t *
773 smb_kshare_decode(nvlist_t *share)
774 {
775 	smb_kshare_t tmp;
776 	smb_kshare_t *shr;
777 	nvlist_t *smb;
778 	char *csc_name = NULL;
779 	int rc;
780 
781 	ASSERT(share);
782 
783 	bzero(&tmp, sizeof (smb_kshare_t));
784 
785 	rc = nvlist_lookup_string(share, "name", &tmp.shr_name);
786 	rc |= nvlist_lookup_string(share, "path", &tmp.shr_path);
787 	(void) nvlist_lookup_string(share, "desc", &tmp.shr_cmnt);
788 
789 	ASSERT(tmp.shr_name && tmp.shr_path);
790 
791 	rc |= nvlist_lookup_nvlist(share, "smb", &smb);
792 	if (rc != 0) {
793 		cmn_err(CE_WARN, "kshare: failed looking up SMB properties"
794 		    " (%d)", rc);
795 		return (NULL);
796 	}
797 
798 	(void) nvlist_lookup_string(smb, SHOPT_AD_CONTAINER,
799 	    &tmp.shr_container);
800 	(void) nvlist_lookup_string(smb, SHOPT_NONE, &tmp.shr_access_none);
801 	(void) nvlist_lookup_string(smb, SHOPT_RO, &tmp.shr_access_ro);
802 	(void) nvlist_lookup_string(smb, SHOPT_RW, &tmp.shr_access_rw);
803 
804 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_ABE, SMB_SHRF_ABE);
805 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_CATIA,
806 	    SMB_SHRF_CATIA);
807 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_GUEST,
808 	    SMB_SHRF_GUEST_OK);
809 	tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_DFSROOT,
810 	    SMB_SHRF_DFSROOT);
811 	tmp.shr_flags |= smb_kshare_decode_bool(smb, "Autohome",
812 	    SMB_SHRF_AUTOHOME);
813 
814 	if ((tmp.shr_flags & SMB_SHRF_AUTOHOME) == SMB_SHRF_AUTOHOME) {
815 		rc = nvlist_lookup_uint32(smb, "uid", &tmp.shr_uid);
816 		rc |= nvlist_lookup_uint32(smb, "gid", &tmp.shr_gid);
817 		if (rc != 0) {
818 			cmn_err(CE_WARN, "kshare: failed looking up UID/GID"
819 			    " (%d)", rc);
820 			return (NULL);
821 		}
822 	}
823 
824 	(void) nvlist_lookup_string(smb, SHOPT_CSC, &csc_name);
825 	smb_kshare_csc_flags(&tmp, csc_name);
826 
827 	shr = kmem_cache_alloc(smb_export.e_cache_share, KM_SLEEP);
828 	bzero(shr, sizeof (smb_kshare_t));
829 
830 	shr->shr_magic = SMB_SHARE_MAGIC;
831 	shr->shr_cache = smb_export.e_cache_share;
832 	shr->shr_refcnt = 1;
833 
834 	shr->shr_name = smb_mem_strdup(tmp.shr_name);
835 	shr->shr_path = smb_mem_strdup(tmp.shr_path);
836 	if (tmp.shr_cmnt)
837 		shr->shr_cmnt = smb_mem_strdup(tmp.shr_cmnt);
838 	if (tmp.shr_container)
839 		shr->shr_container = smb_mem_strdup(tmp.shr_container);
840 	if (tmp.shr_access_none)
841 		shr->shr_access_none = smb_mem_strdup(tmp.shr_access_none);
842 	if (tmp.shr_access_ro)
843 		shr->shr_access_ro = smb_mem_strdup(tmp.shr_access_ro);
844 	if (tmp.shr_access_rw)
845 		shr->shr_access_rw = smb_mem_strdup(tmp.shr_access_rw);
846 
847 	shr->shr_oemname = smb_kshare_oemname(shr->shr_name);
848 	shr->shr_flags = tmp.shr_flags | smb_kshare_is_admin(shr->shr_name);
849 	shr->shr_type = STYPE_DISKTREE | smb_kshare_is_special(shr->shr_name);
850 
851 	shr->shr_uid = tmp.shr_uid;
852 	shr->shr_gid = tmp.shr_gid;
853 
854 	if ((shr->shr_flags & SMB_SHRF_AUTOHOME) == SMB_SHRF_AUTOHOME)
855 		shr->shr_autocnt = 1;
856 
857 	return (shr);
858 }
859 
860 #if 0
861 static void
862 smb_kshare_log(smb_kshare_t *shr)
863 {
864 	cmn_err(CE_NOTE, "Share info:");
865 	cmn_err(CE_NOTE, "\tname: %s", (shr->shr_name) ? shr->shr_name : "");
866 	cmn_err(CE_NOTE, "\tpath: %s", (shr->shr_path) ? shr->shr_path : "");
867 	cmn_err(CE_NOTE, "\tcmnt: (%s)",
868 	    (shr->shr_cmnt) ? shr->shr_cmnt : "NULL");
869 	cmn_err(CE_NOTE, "\toemname: (%s)",
870 	    (shr->shr_oemname) ? shr->shr_oemname : "NULL");
871 	cmn_err(CE_NOTE, "\tflags: %X", shr->shr_flags);
872 	cmn_err(CE_NOTE, "\ttype: %d", shr->shr_type);
873 }
874 #endif
875 
876 /*
877  * Compare function used by shares AVL
878  */
879 static int
880 smb_kshare_cmp(const void *p1, const void *p2)
881 {
882 	smb_kshare_t *shr1 = (smb_kshare_t *)p1;
883 	smb_kshare_t *shr2 = (smb_kshare_t *)p2;
884 	int rc;
885 
886 	ASSERT(shr1);
887 	ASSERT(shr1->shr_name);
888 
889 	ASSERT(shr2);
890 	ASSERT(shr2->shr_name);
891 
892 	rc = smb_strcasecmp(shr1->shr_name, shr2->shr_name, 0);
893 
894 	if (rc < 0)
895 		return (-1);
896 
897 	if (rc > 0)
898 		return (1);
899 
900 	return (0);
901 }
902 
903 /*
904  * This function is called by smb_avl routines whenever
905  * there is a need to take a hold on a share structure
906  * inside AVL
907  */
908 static void
909 smb_kshare_hold(const void *p)
910 {
911 	smb_kshare_t *shr = (smb_kshare_t *)p;
912 
913 	ASSERT(shr);
914 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
915 
916 	mutex_enter(&shr->shr_mutex);
917 	shr->shr_refcnt++;
918 	mutex_exit(&shr->shr_mutex);
919 }
920 
921 /*
922  * This function must be called by smb_avl routines whenever
923  * smb_kshare_hold is called and the hold needs to be released.
924  */
925 static boolean_t
926 smb_kshare_rele(const void *p)
927 {
928 	smb_kshare_t *shr = (smb_kshare_t *)p;
929 	boolean_t destroy;
930 
931 	ASSERT(shr);
932 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
933 
934 	mutex_enter(&shr->shr_mutex);
935 	ASSERT(shr->shr_refcnt > 0);
936 	shr->shr_refcnt--;
937 	destroy = (shr->shr_refcnt == 0);
938 	mutex_exit(&shr->shr_mutex);
939 
940 	return (destroy);
941 }
942 
943 /*
944  * Frees all the memory allocated for the given
945  * share structure. It also removes the structure
946  * from the share cache.
947  */
948 static void
949 smb_kshare_destroy(void *p)
950 {
951 	smb_kshare_t *shr = (smb_kshare_t *)p;
952 
953 	ASSERT(shr);
954 	ASSERT(shr->shr_magic == SMB_SHARE_MAGIC);
955 
956 	smb_mem_free(shr->shr_name);
957 	smb_mem_free(shr->shr_path);
958 	smb_mem_free(shr->shr_cmnt);
959 	smb_mem_free(shr->shr_container);
960 	smb_mem_free(shr->shr_oemname);
961 	smb_mem_free(shr->shr_access_none);
962 	smb_mem_free(shr->shr_access_ro);
963 	smb_mem_free(shr->shr_access_rw);
964 
965 	kmem_cache_free(shr->shr_cache, shr);
966 }
967 
968 
969 /*
970  * Generate an OEM name for the given share name.  If the name is
971  * shorter than 13 bytes the oemname will be returned; otherwise NULL
972  * is returned.
973  */
974 static char *
975 smb_kshare_oemname(const char *shrname)
976 {
977 	smb_wchar_t *unibuf;
978 	char *oem_name;
979 	int length;
980 
981 	length = strlen(shrname) + 1;
982 
983 	oem_name = smb_mem_alloc(length);
984 	unibuf = smb_mem_alloc(length * sizeof (smb_wchar_t));
985 
986 	(void) smb_mbstowcs(unibuf, shrname, length);
987 
988 	if (ucstooem(oem_name, unibuf, length, OEM_CPG_850) == 0)
989 		(void) strcpy(oem_name, shrname);
990 
991 	smb_mem_free(unibuf);
992 
993 	if (strlen(oem_name) + 1 > SMB_SHARE_OEMNAME_MAX) {
994 		smb_mem_free(oem_name);
995 		return (NULL);
996 	}
997 
998 	return (oem_name);
999 }
1000 
1001 /*
1002  * Special share reserved for interprocess communication (IPC$) or
1003  * remote administration of the server (ADMIN$). Can also refer to
1004  * administrative shares such as C$, D$, E$, and so forth.
1005  */
1006 static int
1007 smb_kshare_is_special(const char *sharename)
1008 {
1009 	int len;
1010 
1011 	if (sharename == NULL)
1012 		return (0);
1013 
1014 	if ((len = strlen(sharename)) == 0)
1015 		return (0);
1016 
1017 	if (sharename[len - 1] == '$')
1018 		return (STYPE_SPECIAL);
1019 
1020 	return (0);
1021 }
1022 
1023 /*
1024  * Check whether or not this is a default admin share: C$, D$ etc.
1025  */
1026 static boolean_t
1027 smb_kshare_is_admin(const char *sharename)
1028 {
1029 	if (sharename == NULL)
1030 		return (B_FALSE);
1031 
1032 	if (strlen(sharename) == 2 &&
1033 	    smb_isalpha(sharename[0]) && sharename[1] == '$') {
1034 		return (B_TRUE);
1035 	}
1036 
1037 	return (B_FALSE);
1038 }
1039 
1040 /*
1041  * Decodes the given boolean share option.
1042  * If the option is present in the nvlist and it's value is true
1043  * returns the corresponding flag value, otherwise returns 0.
1044  */
1045 static uint32_t
1046 smb_kshare_decode_bool(nvlist_t *nvl, const char *propname, uint32_t flag)
1047 {
1048 	char *boolp;
1049 
1050 	if (nvlist_lookup_string(nvl, propname, &boolp) == 0)
1051 		if (strcasecmp(boolp, "true") == 0)
1052 			return (flag);
1053 
1054 	return (0);
1055 }
1056 
1057 /*
1058  * Map a client-side caching (CSC) option to the appropriate share
1059  * flag.  Only one option is allowed; an error will be logged if
1060  * multiple options have been specified.  We don't need to do anything
1061  * about multiple values here because the SRVSVC will not recognize
1062  * a value containing multiple flags and will return the default value.
1063  *
1064  * If the option value is not recognized, it will be ignored: invalid
1065  * values will typically be caught and rejected by sharemgr.
1066  */
1067 static void
1068 smb_kshare_csc_flags(smb_kshare_t *shr, const char *value)
1069 {
1070 	int i;
1071 	static struct {
1072 		char *value;
1073 		uint32_t flag;
1074 	} cscopt[] = {
1075 		{ "disabled",	SMB_SHRF_CSC_DISABLED },
1076 		{ "manual",	SMB_SHRF_CSC_MANUAL },
1077 		{ "auto",	SMB_SHRF_CSC_AUTO },
1078 		{ "vdo",	SMB_SHRF_CSC_VDO }
1079 	};
1080 
1081 	if (value == NULL)
1082 		return;
1083 
1084 	for (i = 0; i < (sizeof (cscopt) / sizeof (cscopt[0])); ++i) {
1085 		if (strcasecmp(value, cscopt[i].value) == 0) {
1086 			shr->shr_flags |= cscopt[i].flag;
1087 			break;
1088 		}
1089 	}
1090 
1091 	switch (shr->shr_flags & SMB_SHRF_CSC_MASK) {
1092 	case 0:
1093 	case SMB_SHRF_CSC_DISABLED:
1094 	case SMB_SHRF_CSC_MANUAL:
1095 	case SMB_SHRF_CSC_AUTO:
1096 	case SMB_SHRF_CSC_VDO:
1097 		break;
1098 
1099 	default:
1100 		cmn_err(CE_NOTE, "csc option conflict: 0x%08x",
1101 		    shr->shr_flags & SMB_SHRF_CSC_MASK);
1102 		break;
1103 	}
1104 }
1105 
1106 /*
1107  * This function processes the unexport event list and disconnects shares
1108  * asynchronously.  The function executes as a zone-specific thread.
1109  *
1110  * The server arg passed in is safe to use without a reference count, because
1111  * the server cannot be deleted until smb_thread_stop()/destroy() return,
1112  * which is also when the thread exits.
1113  */
1114 /*ARGSUSED*/
1115 static void
1116 smb_kshare_unexport_thread(smb_thread_t *thread, void *arg)
1117 {
1118 	smb_unshare_t	*ux;
1119 
1120 	while (smb_thread_continue(thread)) {
1121 		while ((ux = list_head(&smb_export.e_unexport_list.sl_list))
1122 		    != NULL) {
1123 			smb_slist_remove(&smb_export.e_unexport_list, ux);
1124 			(void) smb_server_unshare(ux->us_sharename);
1125 			kmem_cache_free(smb_export.e_cache_unexport, ux);
1126 		}
1127 	}
1128 }
1129 
1130 static boolean_t
1131 smb_export_isready(void)
1132 {
1133 	boolean_t ready;
1134 
1135 	mutex_enter(&smb_export.e_mutex);
1136 	ready = smb_export.e_ready;
1137 	mutex_exit(&smb_export.e_mutex);
1138 
1139 	return (ready);
1140 }
1141 
1142 /*
1143  * Return 0 upon success. Otherwise > 0
1144  */
1145 static int
1146 smb_kshare_chk_dsrv_status(int opcode, smb_dr_ctx_t *dec_ctx)
1147 {
1148 	int status = smb_dr_get_int32(dec_ctx);
1149 	int err;
1150 
1151 	switch (status) {
1152 	case SMB_SHARE_DSUCCESS:
1153 		return (0);
1154 
1155 	case SMB_SHARE_DERROR:
1156 		err = smb_dr_get_uint32(dec_ctx);
1157 		cmn_err(CE_WARN, "%d: Encountered door server error %d",
1158 		    opcode, err);
1159 		(void) smb_dr_decode_finish(dec_ctx);
1160 		return (err);
1161 	}
1162 
1163 	ASSERT(0);
1164 	return (EINVAL);
1165 }
1166