1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2017 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2017 Joyent, Inc. 26 * Copyright 2020-2023 RackTop Systems, Inc. 27 */ 28 29 #include <smbsrv/smb_door.h> 30 #include <smbsrv/smb_ktypes.h> 31 #include <smbsrv/smb2_kproto.h> 32 #include <smbsrv/smb_kstat.h> 33 34 typedef struct smb_unshare { 35 list_node_t us_lnd; 36 char us_sharename[MAXNAMELEN]; 37 } smb_unshare_t; 38 39 static kmem_cache_t *smb_kshare_cache_share; 40 static kmem_cache_t *smb_kshare_cache_unexport; 41 42 static int smb_kshare_cmp(const void *, const void *); 43 static void smb_kshare_hold(const void *); 44 static boolean_t smb_kshare_rele(const void *); 45 static void smb_kshare_destroy(void *); 46 static char *smb_kshare_oemname(const char *); 47 static int smb_kshare_is_special(const char *); 48 static int smb_kshare_is_admin(const char *); 49 static smb_kshare_t *smb_kshare_decode(nvlist_t *); 50 static uint32_t smb_kshare_decode_bool(nvlist_t *, const char *, uint32_t); 51 static void smb_kshare_unexport_thread(smb_thread_t *, void *); 52 static int smb_kshare_export(smb_server_t *, smb_kshare_t *); 53 static int smb_kshare_unexport(smb_server_t *, const char *); 54 static int smb_kshare_export_trans(smb_server_t *, char *, char *, char *); 55 static void smb_kshare_csc_flags(smb_kshare_t *, const char *); 56 57 static boolean_t smb_export_isready(smb_server_t *); 58 59 #ifdef _KERNEL 60 static int smb_kshare_chk_dsrv_status(int, smb_dr_ctx_t *); 61 #endif /* _KERNEL */ 62 63 static const smb_avl_nops_t smb_kshare_avlops = { 64 smb_kshare_cmp, 65 smb_kshare_hold, 66 smb_kshare_rele, 67 smb_kshare_destroy 68 }; 69 70 #ifdef _KERNEL 71 /* 72 * This function is not MultiThread safe. The caller has to make sure only one 73 * thread calls this function. 74 */ 75 door_handle_t 76 smb_kshare_door_init(int door_id) 77 { 78 return (door_ki_lookup(door_id)); 79 } 80 81 /* 82 * This function is not MultiThread safe. The caller has to make sure only one 83 * thread calls this function. 84 */ 85 void 86 smb_kshare_door_fini(door_handle_t dhdl) 87 { 88 if (dhdl) 89 door_ki_rele(dhdl); 90 } 91 92 /* 93 * This is a special interface that will be utilized by ZFS to cause 94 * a share to be added/removed 95 * 96 * arg is either a smb_share_t or share_name from userspace. 97 * It will need to be copied into the kernel. It is smb_share_t 98 * for add operations and share_name for delete operations. 99 */ 100 int 101 smb_kshare_upcall(door_handle_t dhdl, void *arg, boolean_t add_share) 102 { 103 door_arg_t doorarg = { 0 }; 104 char *buf = NULL; 105 char *str = NULL; 106 int error; 107 int rc; 108 unsigned int used; 109 smb_dr_ctx_t *dec_ctx; 110 smb_dr_ctx_t *enc_ctx; 111 smb_share_t *lmshare = NULL; 112 int opcode; 113 114 opcode = (add_share) ? SMB_SHROP_ADD : SMB_SHROP_DELETE; 115 116 buf = kmem_alloc(SMB_SHARE_DSIZE, KM_SLEEP); 117 enc_ctx = smb_dr_encode_start(buf, SMB_SHARE_DSIZE); 118 smb_dr_put_uint32(enc_ctx, opcode); 119 120 switch (opcode) { 121 case SMB_SHROP_ADD: 122 lmshare = kmem_alloc(sizeof (smb_share_t), KM_SLEEP); 123 error = xcopyin(arg, lmshare, sizeof (smb_share_t)); 124 if (error != 0) { 125 kmem_free(lmshare, sizeof (smb_share_t)); 126 kmem_free(buf, SMB_SHARE_DSIZE); 127 return (error); 128 } 129 smb_dr_put_share(enc_ctx, lmshare); 130 break; 131 132 case SMB_SHROP_DELETE: 133 str = kmem_alloc(MAXPATHLEN, KM_SLEEP); 134 error = copyinstr(arg, str, MAXPATHLEN, NULL); 135 if (error != 0) { 136 kmem_free(str, MAXPATHLEN); 137 kmem_free(buf, SMB_SHARE_DSIZE); 138 return (error); 139 } 140 smb_dr_put_string(enc_ctx, str); 141 kmem_free(str, MAXPATHLEN); 142 break; 143 } 144 145 if ((error = smb_dr_encode_finish(enc_ctx, &used)) != 0) { 146 kmem_free(buf, SMB_SHARE_DSIZE); 147 if (lmshare) 148 kmem_free(lmshare, sizeof (smb_share_t)); 149 return (NERR_InternalError); 150 } 151 152 doorarg.data_ptr = buf; 153 doorarg.data_size = used; 154 doorarg.rbuf = buf; 155 doorarg.rsize = SMB_SHARE_DSIZE; 156 157 error = door_ki_upcall_limited(dhdl, &doorarg, NULL, SIZE_MAX, 0); 158 159 if (error) { 160 kmem_free(buf, SMB_SHARE_DSIZE); 161 if (lmshare) 162 kmem_free(lmshare, sizeof (smb_share_t)); 163 return (error); 164 } 165 166 dec_ctx = smb_dr_decode_start(doorarg.data_ptr, doorarg.data_size); 167 if (smb_kshare_chk_dsrv_status(opcode, dec_ctx) != 0) { 168 kmem_free(buf, SMB_SHARE_DSIZE); 169 if (lmshare) 170 kmem_free(lmshare, sizeof (smb_share_t)); 171 return (NERR_InternalError); 172 } 173 174 rc = smb_dr_get_uint32(dec_ctx); 175 if (opcode == SMB_SHROP_ADD) 176 smb_dr_get_share(dec_ctx, lmshare); 177 178 if (smb_dr_decode_finish(dec_ctx)) 179 rc = NERR_InternalError; 180 181 kmem_free(buf, SMB_SHARE_DSIZE); 182 if (lmshare) 183 kmem_free(lmshare, sizeof (smb_share_t)); 184 185 return ((rc == NERR_DuplicateShare && add_share) ? 0 : rc); 186 } 187 #endif /* _KERNEL */ 188 189 /* 190 * Executes map and unmap command for shares. 191 */ 192 int 193 smb_kshare_exec(smb_server_t *sv, smb_shr_execinfo_t *execinfo) 194 { 195 int exec_rc = 0; 196 197 (void) smb_kdoor_upcall(sv, SMB_DR_SHR_EXEC, 198 execinfo, smb_shr_execinfo_xdr, &exec_rc, xdr_int); 199 200 return (exec_rc); 201 } 202 203 /* 204 * Obtains any host access restriction on the specified 205 * share for the given host (ipaddr) by calling smbd 206 */ 207 uint32_t 208 smb_kshare_hostaccess(smb_kshare_t *shr, smb_session_t *session) 209 { 210 smb_shr_hostaccess_query_t req; 211 smb_inaddr_t *ipaddr = &session->ipaddr; 212 uint32_t host_access = SMB_SHRF_ACC_OPEN; 213 uint32_t flag = SMB_SHRF_ACC_OPEN; 214 uint32_t access; 215 216 if (smb_inet_iszero(ipaddr)) 217 return (ACE_ALL_PERMS); 218 219 if ((shr->shr_access_none == NULL || *shr->shr_access_none == '\0') && 220 (shr->shr_access_ro == NULL || *shr->shr_access_ro == '\0') && 221 (shr->shr_access_rw == NULL || *shr->shr_access_rw == '\0')) 222 return (ACE_ALL_PERMS); 223 224 if (shr->shr_access_none != NULL) 225 flag |= SMB_SHRF_ACC_NONE; 226 if (shr->shr_access_ro != NULL) 227 flag |= SMB_SHRF_ACC_RO; 228 if (shr->shr_access_rw != NULL) 229 flag |= SMB_SHRF_ACC_RW; 230 231 req.shq_none = shr->shr_access_none; 232 req.shq_ro = shr->shr_access_ro; 233 req.shq_rw = shr->shr_access_rw; 234 req.shq_flag = flag; 235 req.shq_ipaddr = *ipaddr; 236 237 (void) smb_kdoor_upcall(session->s_server, SMB_DR_SHR_HOSTACCESS, 238 &req, smb_shr_hostaccess_query_xdr, &host_access, xdr_uint32_t); 239 240 switch (host_access) { 241 case SMB_SHRF_ACC_RO: 242 access = ACE_ALL_PERMS & ~ACE_ALL_WRITE_PERMS; 243 break; 244 case SMB_SHRF_ACC_OPEN: 245 case SMB_SHRF_ACC_RW: 246 access = ACE_ALL_PERMS; 247 break; 248 case SMB_SHRF_ACC_NONE: 249 default: 250 access = 0; 251 } 252 253 return (access); 254 } 255 256 /* 257 * This function is called when smb_server_t is 258 * created which means smb/service is ready for 259 * exporting SMB shares 260 */ 261 void 262 smb_export_start(smb_server_t *sv) 263 { 264 mutex_enter(&sv->sv_export.e_mutex); 265 if (sv->sv_export.e_ready) { 266 mutex_exit(&sv->sv_export.e_mutex); 267 return; 268 } 269 270 sv->sv_export.e_ready = B_TRUE; 271 mutex_exit(&sv->sv_export.e_mutex); 272 273 smb_avl_create(&sv->sv_export.e_share_avl, sizeof (smb_kshare_t), 274 offsetof(smb_kshare_t, shr_link), &smb_kshare_avlops); 275 276 (void) smb_kshare_export_trans(sv, "IPC$", "IPC$", "Remote IPC"); 277 (void) smb_kshare_export_trans(sv, "c$", SMB_CVOL, "Default Share"); 278 (void) smb_kshare_export_trans(sv, "vss$", SMB_VSS, "VSS"); 279 } 280 281 /* 282 * This function is called when smb_server_t goes 283 * away which means SMB shares should not be made 284 * available to clients 285 */ 286 void 287 smb_export_stop(smb_server_t *sv) 288 { 289 mutex_enter(&sv->sv_export.e_mutex); 290 if (!sv->sv_export.e_ready) { 291 mutex_exit(&sv->sv_export.e_mutex); 292 return; 293 } 294 sv->sv_export.e_ready = B_FALSE; 295 mutex_exit(&sv->sv_export.e_mutex); 296 297 smb_avl_destroy(&sv->sv_export.e_share_avl); 298 } 299 300 void 301 smb_kshare_g_init(void) 302 { 303 smb_kshare_cache_share = kmem_cache_create("smb_share_cache", 304 sizeof (smb_kshare_t), 8, NULL, NULL, NULL, NULL, NULL, 0); 305 306 smb_kshare_cache_unexport = kmem_cache_create("smb_unexport_cache", 307 sizeof (smb_unshare_t), 8, NULL, NULL, NULL, NULL, NULL, 0); 308 } 309 310 void 311 smb_kshare_init(smb_server_t *sv) 312 { 313 314 smb_slist_constructor(&sv->sv_export.e_unexport_list, 315 sizeof (smb_unshare_t), offsetof(smb_unshare_t, us_lnd)); 316 } 317 318 int 319 smb_kshare_start(smb_server_t *sv) 320 { 321 smb_thread_init(&sv->sv_export.e_unexport_thread, "smb_kshare_unexport", 322 smb_kshare_unexport_thread, sv, smbsrv_base_pri, sv); 323 324 return (smb_thread_start(&sv->sv_export.e_unexport_thread)); 325 } 326 327 void 328 smb_kshare_stop(smb_server_t *sv) 329 { 330 smb_thread_stop(&sv->sv_export.e_unexport_thread); 331 smb_thread_destroy(&sv->sv_export.e_unexport_thread); 332 } 333 334 void 335 smb_kshare_fini(smb_server_t *sv) 336 { 337 smb_unshare_t *ux; 338 339 while ((ux = list_head(&sv->sv_export.e_unexport_list.sl_list)) 340 != NULL) { 341 smb_slist_remove(&sv->sv_export.e_unexport_list, ux); 342 kmem_cache_free(smb_kshare_cache_unexport, ux); 343 } 344 smb_slist_destructor(&sv->sv_export.e_unexport_list); 345 } 346 347 void 348 smb_kshare_g_fini(void) 349 { 350 kmem_cache_destroy(smb_kshare_cache_unexport); 351 kmem_cache_destroy(smb_kshare_cache_share); 352 } 353 354 /* 355 * A list of shares in nvlist format can be sent down 356 * from userspace thourgh the IOCTL interface. The nvlist 357 * is unpacked here and all the shares in the list will 358 * be exported. 359 */ 360 int 361 smb_kshare_export_list(smb_server_t *sv, smb_ioc_share_t *ioc) 362 { 363 nvlist_t *shrlist = NULL; 364 nvlist_t *share; 365 nvpair_t *nvp; 366 smb_kshare_t *shr; 367 char *shrname; 368 int rc; 369 370 if (!smb_export_isready(sv)) { 371 rc = ENOTACTIVE; 372 goto out; 373 } 374 375 /* 376 * Reality check that the nvlist's reported length doesn't exceed the 377 * ioctl's total length. We then assume the nvlist_unpack() will 378 * sanity check the nvlist itself. 379 */ 380 if ((ioc->shrlen + offsetof(smb_ioc_share_t, shr)) > ioc->hdr.len) { 381 rc = EINVAL; 382 goto out; 383 } 384 rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, KM_SLEEP); 385 if (rc != 0) 386 goto out; 387 388 for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL; 389 nvp = nvlist_next_nvpair(shrlist, nvp)) { 390 391 /* 392 * Since this loop can run for a while we want to exit 393 * as soon as the server state is anything but RUNNING 394 * to allow shutdown to proceed. 395 */ 396 if (sv->sv_state != SMB_SERVER_STATE_RUNNING) 397 goto out; 398 399 if (nvpair_type(nvp) != DATA_TYPE_NVLIST) 400 continue; 401 402 shrname = nvpair_name(nvp); 403 ASSERT(shrname); 404 405 if ((rc = nvpair_value_nvlist(nvp, &share)) != 0) { 406 cmn_err(CE_WARN, "export[%s]: failed accessing", 407 shrname); 408 continue; 409 } 410 411 if ((shr = smb_kshare_decode(share)) == NULL) { 412 cmn_err(CE_WARN, "export[%s]: failed decoding", 413 shrname); 414 continue; 415 } 416 417 /* smb_kshare_export consumes shr so it's not leaked */ 418 if ((rc = smb_kshare_export(sv, shr)) != 0) { 419 smb_kshare_destroy(shr); 420 continue; 421 } 422 } 423 rc = 0; 424 425 out: 426 nvlist_free(shrlist); 427 return (rc); 428 } 429 430 /* 431 * This function is invoked when a share is disabled to disconnect trees 432 * and close files. Cleaning up may involve VOP and/or VFS calls, which 433 * may conflict/deadlock with stuck threads if something is amiss with the 434 * file system. Queueing the request for asynchronous processing allows the 435 * call to return immediately so that, if the unshare is being done in the 436 * context of a forced unmount, the forced unmount will always be able to 437 * proceed (unblocking stuck I/O and eventually allowing all blocked unshare 438 * processes to complete). 439 * 440 * The path lookup to find the root vnode of the VFS in question and the 441 * release of this vnode are done synchronously prior to any associated 442 * unmount. Doing these asynchronous to an associated unmount could run 443 * the risk of a spurious EBUSY for a standard unmount or an EIO during 444 * the path lookup due to a forced unmount finishing first. 445 */ 446 int 447 smb_kshare_unexport_list(smb_server_t *sv, smb_ioc_share_t *ioc) 448 { 449 smb_unshare_t *ux; 450 nvlist_t *shrlist = NULL; 451 nvpair_t *nvp; 452 boolean_t unexport = B_FALSE; 453 char *shrname; 454 int rc; 455 456 /* 457 * Reality check that the nvlist's reported length doesn't exceed the 458 * ioctl's total length. We then assume the nvlist_unpack() will 459 * sanity check the nvlist itself. 460 */ 461 if ((ioc->shrlen + offsetof(smb_ioc_share_t, shr)) > ioc->hdr.len) { 462 rc = EINVAL; 463 goto out; 464 } 465 if ((rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, 0)) != 0) 466 goto out; 467 468 for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL; 469 nvp = nvlist_next_nvpair(shrlist, nvp)) { 470 if (nvpair_type(nvp) != DATA_TYPE_NVLIST) 471 continue; 472 473 shrname = nvpair_name(nvp); 474 ASSERT(shrname); 475 476 if ((rc = smb_kshare_unexport(sv, shrname)) != 0) 477 continue; 478 479 ux = kmem_cache_alloc(smb_kshare_cache_unexport, KM_SLEEP); 480 (void) strlcpy(ux->us_sharename, shrname, MAXNAMELEN); 481 482 smb_slist_insert_tail(&sv->sv_export.e_unexport_list, ux); 483 unexport = B_TRUE; 484 } 485 486 if (unexport) 487 smb_thread_signal(&sv->sv_export.e_unexport_thread); 488 rc = 0; 489 490 out: 491 nvlist_free(shrlist); 492 return (rc); 493 } 494 495 /* 496 * Get properties (currently only shortname enablement) 497 * of specified share. 498 */ 499 int 500 smb_kshare_info(smb_server_t *sv, smb_ioc_shareinfo_t *ioc) 501 { 502 503 ioc->shortnames = sv->sv_cfg.skc_short_names; 504 505 return (0); 506 } 507 508 /* 509 * smb_kshare_access 510 * 511 * Does this user have access to the share? 512 * returns: 0 (access OK) or errno 513 * 514 * SMB users always have VEXEC (traverse) via privileges, 515 * so just check for READ or WRITE permissions. 516 */ 517 int 518 smb_kshare_access(smb_server_t *sv, smb_ioc_shareaccess_t *ioc) 519 { 520 smb_user_t *user = NULL; 521 smb_kshare_t *shr = NULL; 522 smb_node_t *shroot = NULL; 523 vnode_t *vp = NULL; 524 int rc = EACCES; 525 526 shr = smb_kshare_lookup(sv, ioc->shrname); 527 if (shr == NULL) { 528 rc = ENOENT; 529 goto out; 530 } 531 if ((shroot = shr->shr_root_node) == NULL) { 532 /* Only "file" shares have shr_root_node */ 533 rc = 0; 534 goto out; 535 } 536 vp = shroot->vp; 537 538 user = smb_server_lookup_user(sv, ioc->session_id, ioc->user_id); 539 if (user == NULL) { 540 rc = EINVAL; 541 goto out; 542 } 543 ASSERT(user->u_cred != NULL); 544 545 rc = smb_vop_access(vp, VREAD, 0, NULL, user->u_cred); 546 if (rc != 0) 547 rc = smb_vop_access(vp, VWRITE, 0, NULL, user->u_cred); 548 549 out: 550 if (user != NULL) 551 smb_user_release(user); 552 if (shr != NULL) 553 smb_kshare_release(sv, shr); 554 555 return (rc); 556 } 557 558 /* 559 * This function builds a response for a NetShareEnum RAP request. 560 * List of shares is scanned twice. In the first round the total number 561 * of shares which their OEM name is shorter than 13 chars (esi->es_ntotal) 562 * and also the number of shares that fit in the given buffer are calculated. 563 * In the second round the shares data are encoded in the buffer. 564 * 565 * The data associated with each share has two parts, a fixed size part and 566 * a variable size part which is share's comment. The outline of the response 567 * buffer is so that fixed part for all the shares will appear first and follows 568 * with the comments for all those shares and that's why the data cannot be 569 * encoded in one round without unnecessarily complicating the code. 570 */ 571 void 572 smb_kshare_enum(smb_server_t *sv, smb_enumshare_info_t *esi) 573 { 574 smb_avl_t *share_avl; 575 smb_avl_cursor_t cursor; 576 smb_kshare_t *shr; 577 int remained; 578 uint16_t infolen = 0; 579 uint16_t cmntlen = 0; 580 uint16_t sharelen; 581 uint16_t clen; 582 uint32_t cmnt_offs; 583 smb_msgbuf_t info_mb; 584 smb_msgbuf_t cmnt_mb; 585 boolean_t autohome_added = B_FALSE; 586 587 if (!smb_export_isready(sv)) { 588 esi->es_ntotal = esi->es_nsent = 0; 589 esi->es_datasize = 0; 590 return; 591 } 592 593 esi->es_ntotal = esi->es_nsent = 0; 594 remained = esi->es_bufsize; 595 share_avl = &sv->sv_export.e_share_avl; 596 597 /* Do the necessary calculations in the first round */ 598 smb_avl_iterinit(share_avl, &cursor); 599 600 while ((shr = smb_avl_iterate(share_avl, &cursor)) != NULL) { 601 if (shr->shr_oemname == NULL) { 602 smb_avl_release(share_avl, shr); 603 continue; 604 } 605 606 if ((shr->shr_flags & SMB_SHRF_AUTOHOME) && !autohome_added) { 607 if (esi->es_posix_uid == shr->shr_uid) { 608 autohome_added = B_TRUE; 609 } else { 610 smb_avl_release(share_avl, shr); 611 continue; 612 } 613 } 614 615 esi->es_ntotal++; 616 617 if (remained <= 0) { 618 smb_avl_release(share_avl, shr); 619 continue; 620 } 621 622 clen = strlen(shr->shr_cmnt) + 1; 623 sharelen = SHARE_INFO_1_SIZE + clen; 624 625 if (sharelen <= remained) { 626 infolen += SHARE_INFO_1_SIZE; 627 cmntlen += clen; 628 } 629 630 remained -= sharelen; 631 smb_avl_release(share_avl, shr); 632 } 633 634 esi->es_datasize = infolen + cmntlen; 635 636 smb_msgbuf_init(&info_mb, (uint8_t *)esi->es_buf, infolen, 0); 637 smb_msgbuf_init(&cmnt_mb, (uint8_t *)esi->es_buf + infolen, cmntlen, 0); 638 cmnt_offs = infolen; 639 640 /* Encode the data in the second round */ 641 smb_avl_iterinit(share_avl, &cursor); 642 autohome_added = B_FALSE; 643 644 while ((shr = smb_avl_iterate(share_avl, &cursor)) != NULL) { 645 if (shr->shr_oemname == NULL) { 646 smb_avl_release(share_avl, shr); 647 continue; 648 } 649 650 if ((shr->shr_flags & SMB_SHRF_AUTOHOME) && !autohome_added) { 651 if (esi->es_posix_uid == shr->shr_uid) { 652 autohome_added = B_TRUE; 653 } else { 654 smb_avl_release(share_avl, shr); 655 continue; 656 } 657 } 658 659 if (smb_msgbuf_encode(&info_mb, "13c.wl", 660 shr->shr_oemname, shr->shr_type, cmnt_offs) < 0) { 661 smb_avl_release(share_avl, shr); 662 break; 663 } 664 665 if (smb_msgbuf_encode(&cmnt_mb, "s", shr->shr_cmnt) < 0) { 666 smb_avl_release(share_avl, shr); 667 break; 668 } 669 670 cmnt_offs += strlen(shr->shr_cmnt) + 1; 671 esi->es_nsent++; 672 673 smb_avl_release(share_avl, shr); 674 } 675 676 smb_msgbuf_term(&info_mb); 677 smb_msgbuf_term(&cmnt_mb); 678 } 679 680 /* 681 * Looks up the given share and returns a pointer 682 * to its definition if it's found. A hold on the 683 * object is taken before the pointer is returned 684 * in which case the caller MUST always call 685 * smb_kshare_release(). 686 */ 687 smb_kshare_t * 688 smb_kshare_lookup(smb_server_t *sv, const char *shrname) 689 { 690 smb_kshare_t key; 691 smb_kshare_t *shr; 692 693 ASSERT(shrname); 694 695 if (!smb_export_isready(sv)) 696 return (NULL); 697 698 key.shr_name = (char *)shrname; 699 shr = smb_avl_lookup(&sv->sv_export.e_share_avl, &key); 700 return (shr); 701 } 702 703 /* 704 * Releases the hold taken on the specified share object 705 */ 706 void 707 smb_kshare_release(smb_server_t *sv, smb_kshare_t *shr) 708 { 709 ASSERT(shr); 710 ASSERT(shr->shr_magic == SMB_SHARE_MAGIC); 711 712 smb_avl_release(&sv->sv_export.e_share_avl, shr); 713 } 714 715 /* 716 * Add the given share in the specified server. 717 * If the share is a disk share, lookup the share path 718 * and hold the smb_node_t for the share root. 719 * 720 * If the share is an Autohome share and it is 721 * already in the AVL only a reference count for 722 * that share is incremented. 723 */ 724 static int 725 smb_kshare_export(smb_server_t *sv, smb_kshare_t *shr) 726 { 727 smb_avl_t *share_avl; 728 smb_kshare_t *auto_shr; 729 smb_node_t *snode = NULL; 730 int rc = 0; 731 732 share_avl = &sv->sv_export.e_share_avl; 733 734 if (!STYPE_ISDSK(shr->shr_type)) { 735 if ((rc = smb_avl_add(share_avl, shr)) != 0) { 736 cmn_err(CE_WARN, "export[%s]: failed caching (%d)", 737 shr->shr_name, rc); 738 } 739 740 return (rc); 741 } 742 743 if ((auto_shr = smb_avl_lookup(share_avl, shr)) != NULL) { 744 rc = EEXIST; 745 if ((auto_shr->shr_flags & SMB_SHRF_AUTOHOME) != 0) { 746 mutex_enter(&auto_shr->shr_mutex); 747 auto_shr->shr_autocnt++; 748 mutex_exit(&auto_shr->shr_mutex); 749 rc = 0; 750 } 751 smb_avl_release(share_avl, auto_shr); 752 return (rc); 753 } 754 755 /* 756 * Get the root smb_node_t for this share, held. 757 * This hold is normally released during AVL destroy, 758 * via the element destructor: smb_kshare_destroy 759 */ 760 rc = smb_server_share_lookup(sv, shr->shr_path, &snode); 761 if (rc != 0) { 762 cmn_err(CE_WARN, "export[%s(%s)]: lookup failed (%d)", 763 shr->shr_name, shr->shr_path, rc); 764 return (rc); 765 } 766 767 shr->shr_root_node = snode; 768 if ((rc = smb_avl_add(share_avl, shr)) != 0) { 769 cmn_err(CE_WARN, "export[%s]: failed caching (%d)", 770 shr->shr_name, rc); 771 shr->shr_root_node = NULL; 772 smb_node_release(snode); 773 return (rc); 774 } 775 776 /* 777 * For CA shares, find or create the CA handle dir, 778 * and (if restarted) import persistent handles. 779 */ 780 if ((shr->shr_flags & SMB_SHRF_CA) != 0) { 781 rc = smb2_dh_new_ca_share(sv, shr); 782 if (rc != 0) { 783 /* Just make it a non-CA share. */ 784 mutex_enter(&shr->shr_mutex); 785 shr->shr_flags &= ~SMB_SHRF_CA; 786 mutex_exit(&shr->shr_mutex); 787 rc = 0; 788 } 789 } 790 791 return (rc); 792 } 793 794 /* 795 * Removes the share specified by 'shrname' from the AVL 796 * tree of the given server if it's there. 797 * 798 * If the share is an Autohome share, the autohome count 799 * is decremented and the share is only removed if the 800 * count goes to zero. 801 * 802 * If the share is a disk share, the hold on the corresponding 803 * file system is released before removing the share from 804 * the AVL tree. 805 */ 806 static int 807 smb_kshare_unexport(smb_server_t *sv, const char *shrname) 808 { 809 smb_avl_t *share_avl; 810 smb_kshare_t key; 811 smb_kshare_t *shr; 812 boolean_t auto_unexport; 813 814 share_avl = &sv->sv_export.e_share_avl; 815 816 key.shr_name = (char *)shrname; 817 if ((shr = smb_avl_lookup(share_avl, &key)) == NULL) 818 return (ENOENT); 819 820 if ((shr->shr_flags & SMB_SHRF_AUTOHOME) != 0) { 821 mutex_enter(&shr->shr_mutex); 822 shr->shr_autocnt--; 823 auto_unexport = (shr->shr_autocnt == 0); 824 mutex_exit(&shr->shr_mutex); 825 if (!auto_unexport) { 826 smb_avl_release(share_avl, shr); 827 return (0); 828 } 829 } 830 831 smb_avl_remove(share_avl, shr); 832 833 mutex_enter(&shr->shr_mutex); 834 shr->shr_flags |= SMB_SHRF_REMOVED; 835 mutex_exit(&shr->shr_mutex); 836 837 smb_avl_release(share_avl, shr); 838 839 return (0); 840 } 841 842 /* 843 * Exports IPC$ or Admin shares 844 */ 845 static int 846 smb_kshare_export_trans(smb_server_t *sv, char *name, char *path, char *cmnt) 847 { 848 smb_kshare_t *shr; 849 850 ASSERT(name); 851 ASSERT(path); 852 853 shr = kmem_cache_alloc(smb_kshare_cache_share, KM_SLEEP); 854 bzero(shr, sizeof (smb_kshare_t)); 855 856 shr->shr_magic = SMB_SHARE_MAGIC; 857 shr->shr_refcnt = 1; 858 shr->shr_flags = SMB_SHRF_TRANS | smb_kshare_is_admin(name); 859 if (strcasecmp(name, "IPC$") == 0) 860 shr->shr_type = STYPE_IPC; 861 else 862 shr->shr_type = STYPE_DISKTREE; 863 864 shr->shr_type |= smb_kshare_is_special(name); 865 866 shr->shr_name = smb_mem_strdup(name); 867 if (path) 868 shr->shr_path = smb_mem_strdup(path); 869 if (cmnt) 870 shr->shr_cmnt = smb_mem_strdup(cmnt); 871 shr->shr_oemname = smb_kshare_oemname(name); 872 873 return (smb_kshare_export(sv, shr)); 874 } 875 876 /* 877 * Decodes share information in an nvlist format into a smb_kshare_t 878 * structure. 879 * 880 * This is a temporary function and will be replaced by functions 881 * provided by libsharev2 code after it's available. 882 */ 883 static smb_kshare_t * 884 smb_kshare_decode(nvlist_t *share) 885 { 886 smb_kshare_t tmp; 887 smb_kshare_t *shr; 888 nvlist_t *smb; 889 char *csc_name = NULL, *strbuf = NULL; 890 int rc; 891 892 ASSERT(share); 893 894 bzero(&tmp, sizeof (smb_kshare_t)); 895 896 rc = nvlist_lookup_string(share, "name", &tmp.shr_name); 897 rc |= nvlist_lookup_string(share, "path", &tmp.shr_path); 898 (void) nvlist_lookup_string(share, "desc", &tmp.shr_cmnt); 899 900 ASSERT(tmp.shr_name && tmp.shr_path); 901 902 rc |= nvlist_lookup_nvlist(share, "smb", &smb); 903 if (rc != 0) { 904 cmn_err(CE_WARN, "kshare: failed looking up SMB properties" 905 " (%d)", rc); 906 return (NULL); 907 } 908 909 rc = nvlist_lookup_uint32(smb, "type", &tmp.shr_type); 910 if (rc != 0) { 911 cmn_err(CE_WARN, "kshare[%s]: failed getting the share type" 912 " (%d)", tmp.shr_name, rc); 913 return (NULL); 914 } 915 916 (void) nvlist_lookup_string(smb, SHOPT_AD_CONTAINER, 917 &tmp.shr_container); 918 (void) nvlist_lookup_string(smb, SHOPT_NONE, &tmp.shr_access_none); 919 (void) nvlist_lookup_string(smb, SHOPT_RO, &tmp.shr_access_ro); 920 (void) nvlist_lookup_string(smb, SHOPT_RW, &tmp.shr_access_rw); 921 922 tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_ABE, SMB_SHRF_ABE); 923 tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_CATIA, 924 SMB_SHRF_CATIA); 925 tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_GUEST, 926 SMB_SHRF_GUEST_OK); 927 tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_DFSROOT, 928 SMB_SHRF_DFSROOT); 929 tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_QUOTAS, 930 SMB_SHRF_QUOTAS); 931 tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_CA, SMB_SHRF_CA); 932 tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_FSO, SMB_SHRF_FSO); 933 tmp.shr_flags |= smb_kshare_decode_bool(smb, SHOPT_AUTOHOME, 934 SMB_SHRF_AUTOHOME); 935 936 if ((tmp.shr_flags & SMB_SHRF_AUTOHOME) == SMB_SHRF_AUTOHOME) { 937 rc = nvlist_lookup_uint32(smb, "uid", &tmp.shr_uid); 938 rc |= nvlist_lookup_uint32(smb, "gid", &tmp.shr_gid); 939 if (rc != 0) { 940 cmn_err(CE_WARN, "kshare: failed looking up uid/gid" 941 " (%d)", rc); 942 return (NULL); 943 } 944 } 945 946 (void) nvlist_lookup_string(smb, SHOPT_ENCRYPT, &strbuf); 947 smb_cfg_set_require(strbuf, &tmp.shr_encrypt); 948 949 (void) nvlist_lookup_string(smb, SHOPT_CSC, &csc_name); 950 smb_kshare_csc_flags(&tmp, csc_name); 951 952 shr = kmem_cache_alloc(smb_kshare_cache_share, KM_SLEEP); 953 bzero(shr, sizeof (smb_kshare_t)); 954 955 shr->shr_magic = SMB_SHARE_MAGIC; 956 shr->shr_refcnt = 1; 957 958 shr->shr_name = smb_mem_strdup(tmp.shr_name); 959 shr->shr_path = smb_mem_strdup(tmp.shr_path); 960 if (tmp.shr_cmnt) 961 shr->shr_cmnt = smb_mem_strdup(tmp.shr_cmnt); 962 if (tmp.shr_container) 963 shr->shr_container = smb_mem_strdup(tmp.shr_container); 964 if (tmp.shr_access_none) 965 shr->shr_access_none = smb_mem_strdup(tmp.shr_access_none); 966 if (tmp.shr_access_ro) 967 shr->shr_access_ro = smb_mem_strdup(tmp.shr_access_ro); 968 if (tmp.shr_access_rw) 969 shr->shr_access_rw = smb_mem_strdup(tmp.shr_access_rw); 970 971 shr->shr_oemname = smb_kshare_oemname(shr->shr_name); 972 shr->shr_flags = tmp.shr_flags | smb_kshare_is_admin(shr->shr_name); 973 shr->shr_type = tmp.shr_type | smb_kshare_is_special(shr->shr_name); 974 shr->shr_encrypt = tmp.shr_encrypt; 975 976 shr->shr_uid = tmp.shr_uid; 977 shr->shr_gid = tmp.shr_gid; 978 979 if ((shr->shr_flags & SMB_SHRF_AUTOHOME) == SMB_SHRF_AUTOHOME) 980 shr->shr_autocnt = 1; 981 982 return (shr); 983 } 984 985 #if 0 986 static void 987 smb_kshare_log(smb_kshare_t *shr) 988 { 989 cmn_err(CE_NOTE, "Share info:"); 990 cmn_err(CE_NOTE, "\tname: %s", (shr->shr_name) ? shr->shr_name : ""); 991 cmn_err(CE_NOTE, "\tpath: %s", (shr->shr_path) ? shr->shr_path : ""); 992 cmn_err(CE_NOTE, "\tcmnt: (%s)", 993 (shr->shr_cmnt) ? shr->shr_cmnt : "NULL"); 994 cmn_err(CE_NOTE, "\toemname: (%s)", 995 (shr->shr_oemname) ? shr->shr_oemname : "NULL"); 996 cmn_err(CE_NOTE, "\tflags: %X", shr->shr_flags); 997 cmn_err(CE_NOTE, "\ttype: %d", shr->shr_type); 998 } 999 #endif 1000 1001 /* 1002 * Compare function used by shares AVL 1003 */ 1004 static int 1005 smb_kshare_cmp(const void *p1, const void *p2) 1006 { 1007 smb_kshare_t *shr1 = (smb_kshare_t *)p1; 1008 smb_kshare_t *shr2 = (smb_kshare_t *)p2; 1009 int rc; 1010 1011 ASSERT(shr1); 1012 ASSERT(shr1->shr_name); 1013 1014 ASSERT(shr2); 1015 ASSERT(shr2->shr_name); 1016 1017 rc = smb_strcasecmp(shr1->shr_name, shr2->shr_name, 0); 1018 1019 if (rc < 0) 1020 return (-1); 1021 1022 if (rc > 0) 1023 return (1); 1024 1025 return (0); 1026 } 1027 1028 /* 1029 * This function is called by smb_avl routines whenever 1030 * there is a need to take a hold on a share structure 1031 * inside AVL 1032 */ 1033 static void 1034 smb_kshare_hold(const void *p) 1035 { 1036 smb_kshare_t *shr = (smb_kshare_t *)p; 1037 1038 ASSERT(shr); 1039 ASSERT(shr->shr_magic == SMB_SHARE_MAGIC); 1040 1041 mutex_enter(&shr->shr_mutex); 1042 shr->shr_refcnt++; 1043 mutex_exit(&shr->shr_mutex); 1044 } 1045 1046 /* 1047 * This function must be called by smb_avl routines whenever 1048 * smb_kshare_hold is called and the hold needs to be released. 1049 */ 1050 static boolean_t 1051 smb_kshare_rele(const void *p) 1052 { 1053 smb_kshare_t *shr = (smb_kshare_t *)p; 1054 boolean_t destroy; 1055 1056 ASSERT(shr); 1057 ASSERT(shr->shr_magic == SMB_SHARE_MAGIC); 1058 1059 mutex_enter(&shr->shr_mutex); 1060 ASSERT(shr->shr_refcnt > 0); 1061 shr->shr_refcnt--; 1062 destroy = (shr->shr_refcnt == 0); 1063 mutex_exit(&shr->shr_mutex); 1064 1065 return (destroy); 1066 } 1067 1068 /* 1069 * Frees all the memory allocated for the given 1070 * share structure. It also removes the structure 1071 * from the share cache. 1072 */ 1073 static void 1074 smb_kshare_destroy(void *p) 1075 { 1076 smb_kshare_t *shr = (smb_kshare_t *)p; 1077 1078 ASSERT(shr); 1079 ASSERT(shr->shr_magic == SMB_SHARE_MAGIC); 1080 1081 if (shr->shr_ca_dir != NULL) 1082 smb_node_release(shr->shr_ca_dir); 1083 if (shr->shr_root_node) 1084 smb_node_release(shr->shr_root_node); 1085 1086 smb_mem_free(shr->shr_name); 1087 smb_mem_free(shr->shr_path); 1088 smb_mem_free(shr->shr_cmnt); 1089 smb_mem_free(shr->shr_container); 1090 smb_mem_free(shr->shr_oemname); 1091 smb_mem_free(shr->shr_access_none); 1092 smb_mem_free(shr->shr_access_ro); 1093 smb_mem_free(shr->shr_access_rw); 1094 1095 kmem_cache_free(smb_kshare_cache_share, shr); 1096 } 1097 1098 1099 /* 1100 * Generate an OEM name for the given share name. If the name is 1101 * shorter than 13 bytes the oemname will be returned; otherwise NULL 1102 * is returned. 1103 */ 1104 static char * 1105 smb_kshare_oemname(const char *shrname) 1106 { 1107 smb_wchar_t *unibuf; 1108 char *oem_name; 1109 int length; 1110 1111 length = strlen(shrname) + 1; 1112 1113 oem_name = smb_mem_alloc(length); 1114 unibuf = smb_mem_alloc(length * sizeof (smb_wchar_t)); 1115 1116 (void) smb_mbstowcs(unibuf, shrname, length); 1117 1118 if (ucstooem(oem_name, unibuf, length, OEM_CPG_850) == 0) 1119 (void) strcpy(oem_name, shrname); 1120 1121 smb_mem_free(unibuf); 1122 1123 if (strlen(oem_name) + 1 > SMB_SHARE_OEMNAME_MAX) { 1124 smb_mem_free(oem_name); 1125 return (NULL); 1126 } 1127 1128 return (oem_name); 1129 } 1130 1131 /* 1132 * Special share reserved for interprocess communication (IPC$) or 1133 * remote administration of the server (ADMIN$). Can also refer to 1134 * administrative shares such as C$, D$, E$, and so forth. 1135 */ 1136 static int 1137 smb_kshare_is_special(const char *sharename) 1138 { 1139 int len; 1140 1141 if (sharename == NULL) 1142 return (0); 1143 1144 if ((len = strlen(sharename)) == 0) 1145 return (0); 1146 1147 if (sharename[len - 1] == '$') 1148 return (STYPE_SPECIAL); 1149 1150 return (0); 1151 } 1152 1153 /* 1154 * Check whether or not this is a default admin share: C$, D$ etc. 1155 */ 1156 static int 1157 smb_kshare_is_admin(const char *sharename) 1158 { 1159 if (sharename == NULL) 1160 return (0); 1161 1162 if (strlen(sharename) == 2 && 1163 smb_isalpha(sharename[0]) && sharename[1] == '$') { 1164 return (SMB_SHRF_ADMIN); 1165 } 1166 1167 return (0); 1168 } 1169 1170 /* 1171 * Decodes the given boolean share option. 1172 * If the option is present in the nvlist and it's value is true 1173 * returns the corresponding flag value, otherwise returns 0. 1174 */ 1175 static uint32_t 1176 smb_kshare_decode_bool(nvlist_t *nvl, const char *propname, uint32_t flag) 1177 { 1178 char *boolp; 1179 1180 if (nvlist_lookup_string(nvl, propname, &boolp) == 0) 1181 if (strcasecmp(boolp, "true") == 0) 1182 return (flag); 1183 1184 return (0); 1185 } 1186 1187 /* 1188 * Map a client-side caching (CSC) option to the appropriate share 1189 * flag. Only one option is allowed; an error will be logged if 1190 * multiple options have been specified. We don't need to do anything 1191 * about multiple values here because the SRVSVC will not recognize 1192 * a value containing multiple flags and will return the default value. 1193 * 1194 * If the option value is not recognized, it will be ignored: invalid 1195 * values will typically be caught and rejected by sharemgr. 1196 */ 1197 static void 1198 smb_kshare_csc_flags(smb_kshare_t *shr, const char *value) 1199 { 1200 int i; 1201 static struct { 1202 char *value; 1203 uint32_t flag; 1204 } cscopt[] = { 1205 { "disabled", SMB_SHRF_CSC_DISABLED }, 1206 { "manual", SMB_SHRF_CSC_MANUAL }, 1207 { "auto", SMB_SHRF_CSC_AUTO }, 1208 { "vdo", SMB_SHRF_CSC_VDO } 1209 }; 1210 1211 if (value == NULL) 1212 return; 1213 1214 for (i = 0; i < (sizeof (cscopt) / sizeof (cscopt[0])); ++i) { 1215 if (strcasecmp(value, cscopt[i].value) == 0) { 1216 shr->shr_flags |= cscopt[i].flag; 1217 break; 1218 } 1219 } 1220 1221 switch (shr->shr_flags & SMB_SHRF_CSC_MASK) { 1222 case 0: 1223 case SMB_SHRF_CSC_DISABLED: 1224 case SMB_SHRF_CSC_MANUAL: 1225 case SMB_SHRF_CSC_AUTO: 1226 case SMB_SHRF_CSC_VDO: 1227 break; 1228 1229 default: 1230 cmn_err(CE_NOTE, "csc option conflict: 0x%08x", 1231 shr->shr_flags & SMB_SHRF_CSC_MASK); 1232 break; 1233 } 1234 } 1235 1236 /* 1237 * This function processes the unexport event list and disconnects shares 1238 * asynchronously. The function executes as a zone-specific thread. 1239 * 1240 * The server arg passed in is safe to use without a reference count, because 1241 * the server cannot be deleted until smb_thread_stop()/destroy() return, 1242 * which is also when the thread exits. 1243 */ 1244 /*ARGSUSED*/ 1245 static void 1246 smb_kshare_unexport_thread(smb_thread_t *thread, void *arg) 1247 { 1248 smb_server_t *sv = arg; 1249 smb_unshare_t *ux; 1250 1251 while (smb_thread_continue(thread)) { 1252 while ((ux = list_head(&sv->sv_export.e_unexport_list.sl_list)) 1253 != NULL) { 1254 smb_slist_remove(&sv->sv_export.e_unexport_list, ux); 1255 (void) smb_server_unshare(ux->us_sharename); 1256 kmem_cache_free(smb_kshare_cache_unexport, ux); 1257 } 1258 } 1259 } 1260 1261 static boolean_t 1262 smb_export_isready(smb_server_t *sv) 1263 { 1264 boolean_t ready; 1265 1266 mutex_enter(&sv->sv_export.e_mutex); 1267 ready = sv->sv_export.e_ready; 1268 mutex_exit(&sv->sv_export.e_mutex); 1269 1270 return (ready); 1271 } 1272 1273 #ifdef _KERNEL 1274 /* 1275 * Return 0 upon success. Otherwise > 0 1276 */ 1277 static int 1278 smb_kshare_chk_dsrv_status(int opcode, smb_dr_ctx_t *dec_ctx) 1279 { 1280 int status = smb_dr_get_int32(dec_ctx); 1281 int err; 1282 1283 switch (status) { 1284 case SMB_SHARE_DSUCCESS: 1285 return (0); 1286 1287 case SMB_SHARE_DERROR: 1288 err = smb_dr_get_uint32(dec_ctx); 1289 cmn_err(CE_WARN, "%d: Encountered door server error %d", 1290 opcode, err); 1291 (void) smb_dr_decode_finish(dec_ctx); 1292 return (err); 1293 } 1294 1295 ASSERT(0); 1296 return (EINVAL); 1297 } 1298 #endif /* _KERNEL */ 1299