1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 25 * Copyright (c) 2014, 2017 by Delphix. All rights reserved. 26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 27 * Copyright 2017 Joyent, Inc. 28 * Copyright 2017 RackTop Systems. 29 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association. 30 */ 31 32 /* 33 * Routines to manage ZFS mounts. We separate all the nasty routines that have 34 * to deal with the OS. The following functions are the main entry points -- 35 * they are used by mount and unmount and when changing a filesystem's 36 * mountpoint. 37 * 38 * zfs_is_mounted() 39 * zfs_mount() 40 * zfs_unmount() 41 * zfs_unmountall() 42 * 43 * This file also contains the functions used to manage sharing filesystems via 44 * NFS and iSCSI: 45 * 46 * zfs_is_shared() 47 * zfs_share() 48 * zfs_unshare() 49 * 50 * zfs_is_shared_nfs() 51 * zfs_is_shared_smb() 52 * zfs_share_proto() 53 * zfs_shareall(); 54 * zfs_unshare_nfs() 55 * zfs_unshare_smb() 56 * zfs_unshareall_nfs() 57 * zfs_unshareall_smb() 58 * zfs_unshareall() 59 * zfs_unshareall_bypath() 60 * 61 * The following functions are available for pool consumers, and will 62 * mount/unmount and share/unshare all datasets within pool: 63 * 64 * zpool_enable_datasets() 65 * zpool_disable_datasets() 66 */ 67 68 #include <dirent.h> 69 #include <dlfcn.h> 70 #include <errno.h> 71 #include <fcntl.h> 72 #include <libgen.h> 73 #include <libintl.h> 74 #include <stdio.h> 75 #include <stdlib.h> 76 #include <strings.h> 77 #include <unistd.h> 78 #include <zone.h> 79 #include <sys/mntent.h> 80 #include <sys/mount.h> 81 #include <sys/stat.h> 82 #include <sys/statvfs.h> 83 #include <sys/dsl_crypt.h> 84 85 #include <libzfs.h> 86 87 #include "libzfs_impl.h" 88 #include "libzfs_taskq.h" 89 90 #include <libshare.h> 91 #include <sys/systeminfo.h> 92 #define MAXISALEN 257 /* based on sysinfo(2) man page */ 93 94 static int mount_tq_nthr = 512; /* taskq threads for multi-threaded mounting */ 95 96 static void zfs_mount_task(void *); 97 static int zfs_share_proto(zfs_handle_t *, zfs_share_proto_t *); 98 zfs_share_type_t zfs_is_shared_proto(zfs_handle_t *, char **, 99 zfs_share_proto_t); 100 101 /* 102 * The share protocols table must be in the same order as the zfs_share_proto_t 103 * enum in libzfs_impl.h 104 */ 105 typedef struct { 106 zfs_prop_t p_prop; 107 char *p_name; 108 int p_share_err; 109 int p_unshare_err; 110 } proto_table_t; 111 112 proto_table_t proto_table[PROTO_END] = { 113 {ZFS_PROP_SHARENFS, "nfs", EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED}, 114 {ZFS_PROP_SHARESMB, "smb", EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED}, 115 }; 116 117 zfs_share_proto_t nfs_only[] = { 118 PROTO_NFS, 119 PROTO_END 120 }; 121 122 zfs_share_proto_t smb_only[] = { 123 PROTO_SMB, 124 PROTO_END 125 }; 126 zfs_share_proto_t share_all_proto[] = { 127 PROTO_NFS, 128 PROTO_SMB, 129 PROTO_END 130 }; 131 132 /* 133 * Search the sharetab for the given mountpoint and protocol, returning 134 * a zfs_share_type_t value. 135 */ 136 static zfs_share_type_t 137 is_shared(libzfs_handle_t *hdl, const char *mountpoint, zfs_share_proto_t proto) 138 { 139 char buf[MAXPATHLEN], *tab; 140 char *ptr; 141 142 if (hdl->libzfs_sharetab == NULL) 143 return (SHARED_NOT_SHARED); 144 145 (void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET); 146 147 while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) { 148 149 /* the mountpoint is the first entry on each line */ 150 if ((tab = strchr(buf, '\t')) == NULL) 151 continue; 152 153 *tab = '\0'; 154 if (strcmp(buf, mountpoint) == 0) { 155 /* 156 * the protocol field is the third field 157 * skip over second field 158 */ 159 ptr = ++tab; 160 if ((tab = strchr(ptr, '\t')) == NULL) 161 continue; 162 ptr = ++tab; 163 if ((tab = strchr(ptr, '\t')) == NULL) 164 continue; 165 *tab = '\0'; 166 if (strcmp(ptr, 167 proto_table[proto].p_name) == 0) { 168 switch (proto) { 169 case PROTO_NFS: 170 return (SHARED_NFS); 171 case PROTO_SMB: 172 return (SHARED_SMB); 173 default: 174 return (0); 175 } 176 } 177 } 178 } 179 180 return (SHARED_NOT_SHARED); 181 } 182 183 static boolean_t 184 dir_is_empty_stat(const char *dirname) 185 { 186 struct stat st; 187 188 /* 189 * We only want to return false if the given path is a non empty 190 * directory, all other errors are handled elsewhere. 191 */ 192 if (stat(dirname, &st) < 0 || !S_ISDIR(st.st_mode)) { 193 return (B_TRUE); 194 } 195 196 /* 197 * An empty directory will still have two entries in it, one 198 * entry for each of "." and "..". 199 */ 200 if (st.st_size > 2) { 201 return (B_FALSE); 202 } 203 204 return (B_TRUE); 205 } 206 207 static boolean_t 208 dir_is_empty_readdir(const char *dirname) 209 { 210 DIR *dirp; 211 struct dirent64 *dp; 212 int dirfd; 213 214 if ((dirfd = openat(AT_FDCWD, dirname, 215 O_RDONLY | O_NDELAY | O_LARGEFILE | O_CLOEXEC, 0)) < 0) { 216 return (B_TRUE); 217 } 218 219 if ((dirp = fdopendir(dirfd)) == NULL) { 220 (void) close(dirfd); 221 return (B_TRUE); 222 } 223 224 while ((dp = readdir64(dirp)) != NULL) { 225 226 if (strcmp(dp->d_name, ".") == 0 || 227 strcmp(dp->d_name, "..") == 0) 228 continue; 229 230 (void) closedir(dirp); 231 return (B_FALSE); 232 } 233 234 (void) closedir(dirp); 235 return (B_TRUE); 236 } 237 238 /* 239 * Returns true if the specified directory is empty. If we can't open the 240 * directory at all, return true so that the mount can fail with a more 241 * informative error message. 242 */ 243 static boolean_t 244 dir_is_empty(const char *dirname) 245 { 246 struct statvfs64 st; 247 248 /* 249 * If the statvfs call fails or the filesystem is not a ZFS 250 * filesystem, fall back to the slow path which uses readdir. 251 */ 252 if ((statvfs64(dirname, &st) != 0) || 253 (strcmp(st.f_basetype, "zfs") != 0)) { 254 return (dir_is_empty_readdir(dirname)); 255 } 256 257 /* 258 * At this point, we know the provided path is on a ZFS 259 * filesystem, so we can use stat instead of readdir to 260 * determine if the directory is empty or not. We try to avoid 261 * using readdir because that requires opening "dirname"; this 262 * open file descriptor can potentially end up in a child 263 * process if there's a concurrent fork, thus preventing the 264 * zfs_mount() from otherwise succeeding (the open file 265 * descriptor inherited by the child process will cause the 266 * parent's mount to fail with EBUSY). The performance 267 * implications of replacing the open, read, and close with a 268 * single stat is nice; but is not the main motivation for the 269 * added complexity. 270 */ 271 return (dir_is_empty_stat(dirname)); 272 } 273 274 /* 275 * Checks to see if the mount is active. If the filesystem is mounted, we fill 276 * in 'where' with the current mountpoint, and return 1. Otherwise, we return 277 * 0. 278 */ 279 boolean_t 280 is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where) 281 { 282 struct mnttab entry; 283 284 if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0) 285 return (B_FALSE); 286 287 if (where != NULL) 288 *where = zfs_strdup(zfs_hdl, entry.mnt_mountp); 289 290 return (B_TRUE); 291 } 292 293 boolean_t 294 zfs_is_mounted(zfs_handle_t *zhp, char **where) 295 { 296 return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where)); 297 } 298 299 /* 300 * Returns true if the given dataset is mountable, false otherwise. Returns the 301 * mountpoint in 'buf'. 302 */ 303 static boolean_t 304 zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen, 305 zprop_source_t *source) 306 { 307 char sourceloc[MAXNAMELEN]; 308 zprop_source_t sourcetype; 309 310 if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type)) 311 return (B_FALSE); 312 313 verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen, 314 &sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0); 315 316 if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 || 317 strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0) 318 return (B_FALSE); 319 320 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF) 321 return (B_FALSE); 322 323 if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) && 324 getzoneid() == GLOBAL_ZONEID) 325 return (B_FALSE); 326 327 if (source) 328 *source = sourcetype; 329 330 return (B_TRUE); 331 } 332 333 /* 334 * Mount the given filesystem. 335 */ 336 int 337 zfs_mount(zfs_handle_t *zhp, const char *options, int flags) 338 { 339 struct stat buf; 340 char mountpoint[ZFS_MAXPROPLEN]; 341 char mntopts[MNT_LINE_MAX]; 342 libzfs_handle_t *hdl = zhp->zfs_hdl; 343 uint64_t keystatus; 344 int rc; 345 346 if (options == NULL) 347 mntopts[0] = '\0'; 348 else 349 (void) strlcpy(mntopts, options, sizeof (mntopts)); 350 351 /* 352 * If the pool is imported read-only then all mounts must be read-only 353 */ 354 if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL)) 355 flags |= MS_RDONLY; 356 357 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL)) 358 return (0); 359 360 /* 361 * If the filesystem is encrypted the key must be loaded in order to 362 * mount. If the key isn't loaded, the MS_CRYPT flag decides whether 363 * or not we attempt to load the keys. Note: we must call 364 * zfs_refresh_properties() here since some callers of this function 365 * (most notably zpool_enable_datasets()) may implicitly load our key 366 * by loading the parent's key first. 367 */ 368 if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) { 369 zfs_refresh_properties(zhp); 370 keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS); 371 372 /* 373 * If the key is unavailable and MS_CRYPT is set give the 374 * user a chance to enter the key. Otherwise just fail 375 * immediately. 376 */ 377 if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) { 378 if (flags & MS_CRYPT) { 379 rc = zfs_crypto_load_key(zhp, B_FALSE, NULL); 380 if (rc != 0) 381 return (rc); 382 } else { 383 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 384 "encryption key not loaded")); 385 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, 386 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), 387 mountpoint)); 388 } 389 } 390 391 } 392 393 /* Create the directory if it doesn't already exist */ 394 if (lstat(mountpoint, &buf) != 0) { 395 if (mkdirp(mountpoint, 0755) != 0) { 396 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 397 "failed to create mountpoint")); 398 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, 399 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), 400 mountpoint)); 401 } 402 } 403 404 /* 405 * Determine if the mountpoint is empty. If so, refuse to perform the 406 * mount. We don't perform this check if MS_OVERLAY is specified, which 407 * would defeat the point. We also avoid this check if 'remount' is 408 * specified. 409 */ 410 if ((flags & MS_OVERLAY) == 0 && 411 strstr(mntopts, MNTOPT_REMOUNT) == NULL && 412 !dir_is_empty(mountpoint)) { 413 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 414 "directory is not empty")); 415 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, 416 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint)); 417 } 418 419 /* perform the mount */ 420 if (mount(zfs_get_name(zhp), mountpoint, MS_OPTIONSTR | flags, 421 MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) { 422 /* 423 * Generic errors are nasty, but there are just way too many 424 * from mount(), and they're well-understood. We pick a few 425 * common ones to improve upon. 426 */ 427 if (errno == EBUSY) { 428 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 429 "mountpoint or dataset is busy")); 430 } else if (errno == EPERM) { 431 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 432 "Insufficient privileges")); 433 } else if (errno == ENOTSUP) { 434 char buf[256]; 435 int spa_version; 436 437 VERIFY(zfs_spa_version(zhp, &spa_version) == 0); 438 (void) snprintf(buf, sizeof (buf), 439 dgettext(TEXT_DOMAIN, "Can't mount a version %lld " 440 "file system on a version %d pool. Pool must be" 441 " upgraded to mount this file system."), 442 (u_longlong_t)zfs_prop_get_int(zhp, 443 ZFS_PROP_VERSION), spa_version); 444 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, buf)); 445 } else { 446 zfs_error_aux(hdl, strerror(errno)); 447 } 448 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, 449 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), 450 zhp->zfs_name)); 451 } 452 453 /* add the mounted entry into our cache */ 454 libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint, 455 mntopts); 456 return (0); 457 } 458 459 /* 460 * Unmount a single filesystem. 461 */ 462 static int 463 unmount_one(libzfs_handle_t *hdl, const char *mountpoint, int flags) 464 { 465 if (umount2(mountpoint, flags) != 0) { 466 zfs_error_aux(hdl, strerror(errno)); 467 return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED, 468 dgettext(TEXT_DOMAIN, "cannot unmount '%s'"), 469 mountpoint)); 470 } 471 472 return (0); 473 } 474 475 /* 476 * Unmount the given filesystem. 477 */ 478 int 479 zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags) 480 { 481 libzfs_handle_t *hdl = zhp->zfs_hdl; 482 struct mnttab entry; 483 char *mntpt = NULL; 484 485 /* check to see if we need to unmount the filesystem */ 486 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) && 487 libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) { 488 /* 489 * mountpoint may have come from a call to 490 * getmnt/getmntany if it isn't NULL. If it is NULL, 491 * we know it comes from libzfs_mnttab_find which can 492 * then get freed later. We strdup it to play it safe. 493 */ 494 if (mountpoint == NULL) 495 mntpt = zfs_strdup(hdl, entry.mnt_mountp); 496 else 497 mntpt = zfs_strdup(hdl, mountpoint); 498 499 /* 500 * Unshare and unmount the filesystem 501 */ 502 if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0) 503 return (-1); 504 505 if (unmount_one(hdl, mntpt, flags) != 0) { 506 free(mntpt); 507 (void) zfs_shareall(zhp); 508 return (-1); 509 } 510 libzfs_mnttab_remove(hdl, zhp->zfs_name); 511 free(mntpt); 512 } 513 514 return (0); 515 } 516 517 /* 518 * Unmount this filesystem and any children inheriting the mountpoint property. 519 * To do this, just act like we're changing the mountpoint property, but don't 520 * remount the filesystems afterwards. 521 */ 522 int 523 zfs_unmountall(zfs_handle_t *zhp, int flags) 524 { 525 prop_changelist_t *clp; 526 int ret; 527 528 clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 0, flags); 529 if (clp == NULL) 530 return (-1); 531 532 ret = changelist_prefix(clp); 533 changelist_free(clp); 534 535 return (ret); 536 } 537 538 boolean_t 539 zfs_is_shared(zfs_handle_t *zhp) 540 { 541 zfs_share_type_t rc = 0; 542 zfs_share_proto_t *curr_proto; 543 544 if (ZFS_IS_VOLUME(zhp)) 545 return (B_FALSE); 546 547 for (curr_proto = share_all_proto; *curr_proto != PROTO_END; 548 curr_proto++) 549 rc |= zfs_is_shared_proto(zhp, NULL, *curr_proto); 550 551 return (rc ? B_TRUE : B_FALSE); 552 } 553 554 int 555 zfs_share(zfs_handle_t *zhp) 556 { 557 assert(!ZFS_IS_VOLUME(zhp)); 558 return (zfs_share_proto(zhp, share_all_proto)); 559 } 560 561 int 562 zfs_unshare(zfs_handle_t *zhp) 563 { 564 assert(!ZFS_IS_VOLUME(zhp)); 565 return (zfs_unshareall(zhp)); 566 } 567 568 /* 569 * Check to see if the filesystem is currently shared. 570 */ 571 zfs_share_type_t 572 zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto) 573 { 574 char *mountpoint; 575 zfs_share_type_t rc; 576 577 if (!zfs_is_mounted(zhp, &mountpoint)) 578 return (SHARED_NOT_SHARED); 579 580 if ((rc = is_shared(zhp->zfs_hdl, mountpoint, proto)) 581 != SHARED_NOT_SHARED) { 582 if (where != NULL) 583 *where = mountpoint; 584 else 585 free(mountpoint); 586 return (rc); 587 } else { 588 free(mountpoint); 589 return (SHARED_NOT_SHARED); 590 } 591 } 592 593 boolean_t 594 zfs_is_shared_nfs(zfs_handle_t *zhp, char **where) 595 { 596 return (zfs_is_shared_proto(zhp, where, 597 PROTO_NFS) != SHARED_NOT_SHARED); 598 } 599 600 boolean_t 601 zfs_is_shared_smb(zfs_handle_t *zhp, char **where) 602 { 603 return (zfs_is_shared_proto(zhp, where, 604 PROTO_SMB) != SHARED_NOT_SHARED); 605 } 606 607 /* 608 * Make sure things will work if libshare isn't installed by using 609 * wrapper functions that check to see that the pointers to functions 610 * initialized in _zfs_init_libshare() are actually present. 611 */ 612 613 static sa_handle_t (*_sa_init)(int); 614 static sa_handle_t (*_sa_init_arg)(int, void *); 615 static int (*_sa_service)(sa_handle_t); 616 static void (*_sa_fini)(sa_handle_t); 617 static sa_share_t (*_sa_find_share)(sa_handle_t, char *); 618 static int (*_sa_enable_share)(sa_share_t, char *); 619 static int (*_sa_disable_share)(sa_share_t, char *); 620 static char *(*_sa_errorstr)(int); 621 static int (*_sa_parse_legacy_options)(sa_group_t, char *, char *); 622 static boolean_t (*_sa_needs_refresh)(sa_handle_t *); 623 static libzfs_handle_t *(*_sa_get_zfs_handle)(sa_handle_t); 624 static int (*_sa_zfs_process_share)(sa_handle_t, sa_group_t, sa_share_t, 625 char *, char *, zprop_source_t, char *, char *, char *); 626 static void (*_sa_update_sharetab_ts)(sa_handle_t); 627 628 /* 629 * _zfs_init_libshare() 630 * 631 * Find the libshare.so.1 entry points that we use here and save the 632 * values to be used later. This is triggered by the runtime loader. 633 * Make sure the correct ISA version is loaded. 634 */ 635 636 #pragma init(_zfs_init_libshare) 637 static void 638 _zfs_init_libshare(void) 639 { 640 void *libshare; 641 char path[MAXPATHLEN]; 642 char isa[MAXISALEN]; 643 644 #if defined(_LP64) 645 if (sysinfo(SI_ARCHITECTURE_64, isa, MAXISALEN) == -1) 646 isa[0] = '\0'; 647 #else 648 isa[0] = '\0'; 649 #endif 650 (void) snprintf(path, MAXPATHLEN, 651 "/usr/lib/%s/libshare.so.1", isa); 652 653 if ((libshare = dlopen(path, RTLD_LAZY | RTLD_GLOBAL)) != NULL) { 654 _sa_init = (sa_handle_t (*)(int))dlsym(libshare, "sa_init"); 655 _sa_init_arg = (sa_handle_t (*)(int, void *))dlsym(libshare, 656 "sa_init_arg"); 657 _sa_fini = (void (*)(sa_handle_t))dlsym(libshare, "sa_fini"); 658 _sa_service = (int (*)(sa_handle_t))dlsym(libshare, 659 "sa_service"); 660 _sa_find_share = (sa_share_t (*)(sa_handle_t, char *)) 661 dlsym(libshare, "sa_find_share"); 662 _sa_enable_share = (int (*)(sa_share_t, char *))dlsym(libshare, 663 "sa_enable_share"); 664 _sa_disable_share = (int (*)(sa_share_t, char *))dlsym(libshare, 665 "sa_disable_share"); 666 _sa_errorstr = (char *(*)(int))dlsym(libshare, "sa_errorstr"); 667 _sa_parse_legacy_options = (int (*)(sa_group_t, char *, char *)) 668 dlsym(libshare, "sa_parse_legacy_options"); 669 _sa_needs_refresh = (boolean_t (*)(sa_handle_t *)) 670 dlsym(libshare, "sa_needs_refresh"); 671 _sa_get_zfs_handle = (libzfs_handle_t *(*)(sa_handle_t)) 672 dlsym(libshare, "sa_get_zfs_handle"); 673 _sa_zfs_process_share = (int (*)(sa_handle_t, sa_group_t, 674 sa_share_t, char *, char *, zprop_source_t, char *, 675 char *, char *))dlsym(libshare, "sa_zfs_process_share"); 676 _sa_update_sharetab_ts = (void (*)(sa_handle_t)) 677 dlsym(libshare, "sa_update_sharetab_ts"); 678 if (_sa_init == NULL || _sa_init_arg == NULL || 679 _sa_fini == NULL || _sa_find_share == NULL || 680 _sa_enable_share == NULL || _sa_disable_share == NULL || 681 _sa_errorstr == NULL || _sa_parse_legacy_options == NULL || 682 _sa_needs_refresh == NULL || _sa_get_zfs_handle == NULL || 683 _sa_zfs_process_share == NULL || _sa_service == NULL || 684 _sa_update_sharetab_ts == NULL) { 685 _sa_init = NULL; 686 _sa_init_arg = NULL; 687 _sa_service = NULL; 688 _sa_fini = NULL; 689 _sa_disable_share = NULL; 690 _sa_enable_share = NULL; 691 _sa_errorstr = NULL; 692 _sa_parse_legacy_options = NULL; 693 (void) dlclose(libshare); 694 _sa_needs_refresh = NULL; 695 _sa_get_zfs_handle = NULL; 696 _sa_zfs_process_share = NULL; 697 _sa_update_sharetab_ts = NULL; 698 } 699 } 700 } 701 702 /* 703 * zfs_init_libshare(zhandle, service) 704 * 705 * Initialize the libshare API if it hasn't already been initialized. 706 * In all cases it returns 0 if it succeeded and an error if not. The 707 * service value is which part(s) of the API to initialize and is a 708 * direct map to the libshare sa_init(service) interface. 709 */ 710 static int 711 zfs_init_libshare_impl(libzfs_handle_t *zhandle, int service, void *arg) 712 { 713 /* 714 * libshare is either not installed or we're in a branded zone. The 715 * rest of the wrapper functions around the libshare calls already 716 * handle NULL function pointers, but we don't want the callers of 717 * zfs_init_libshare() to fail prematurely if libshare is not available. 718 */ 719 if (_sa_init == NULL) 720 return (SA_OK); 721 722 /* 723 * Attempt to refresh libshare. This is necessary if there was a cache 724 * miss for a new ZFS dataset that was just created, or if state of the 725 * sharetab file has changed since libshare was last initialized. We 726 * want to make sure so check timestamps to see if a different process 727 * has updated any of the configuration. If there was some non-ZFS 728 * change, we need to re-initialize the internal cache. 729 */ 730 if (_sa_needs_refresh != NULL && 731 _sa_needs_refresh(zhandle->libzfs_sharehdl)) { 732 zfs_uninit_libshare(zhandle); 733 zhandle->libzfs_sharehdl = _sa_init_arg(service, arg); 734 } 735 736 if (zhandle && zhandle->libzfs_sharehdl == NULL) 737 zhandle->libzfs_sharehdl = _sa_init_arg(service, arg); 738 739 if (zhandle->libzfs_sharehdl == NULL) 740 return (SA_NO_MEMORY); 741 742 return (SA_OK); 743 } 744 int 745 zfs_init_libshare(libzfs_handle_t *zhandle, int service) 746 { 747 return (zfs_init_libshare_impl(zhandle, service, NULL)); 748 } 749 750 int 751 zfs_init_libshare_arg(libzfs_handle_t *zhandle, int service, void *arg) 752 { 753 return (zfs_init_libshare_impl(zhandle, service, arg)); 754 } 755 756 757 /* 758 * zfs_uninit_libshare(zhandle) 759 * 760 * Uninitialize the libshare API if it hasn't already been 761 * uninitialized. It is OK to call multiple times. 762 */ 763 void 764 zfs_uninit_libshare(libzfs_handle_t *zhandle) 765 { 766 if (zhandle != NULL && zhandle->libzfs_sharehdl != NULL) { 767 if (_sa_fini != NULL) 768 _sa_fini(zhandle->libzfs_sharehdl); 769 zhandle->libzfs_sharehdl = NULL; 770 } 771 } 772 773 /* 774 * zfs_parse_options(options, proto) 775 * 776 * Call the legacy parse interface to get the protocol specific 777 * options using the NULL arg to indicate that this is a "parse" only. 778 */ 779 int 780 zfs_parse_options(char *options, zfs_share_proto_t proto) 781 { 782 if (_sa_parse_legacy_options != NULL) { 783 return (_sa_parse_legacy_options(NULL, options, 784 proto_table[proto].p_name)); 785 } 786 return (SA_CONFIG_ERR); 787 } 788 789 /* 790 * zfs_sa_find_share(handle, path) 791 * 792 * wrapper around sa_find_share to find a share path in the 793 * configuration. 794 */ 795 static sa_share_t 796 zfs_sa_find_share(sa_handle_t handle, char *path) 797 { 798 if (_sa_find_share != NULL) 799 return (_sa_find_share(handle, path)); 800 return (NULL); 801 } 802 803 /* 804 * zfs_sa_enable_share(share, proto) 805 * 806 * Wrapper for sa_enable_share which enables a share for a specified 807 * protocol. 808 */ 809 static int 810 zfs_sa_enable_share(sa_share_t share, char *proto) 811 { 812 if (_sa_enable_share != NULL) 813 return (_sa_enable_share(share, proto)); 814 return (SA_CONFIG_ERR); 815 } 816 817 /* 818 * zfs_sa_disable_share(share, proto) 819 * 820 * Wrapper for sa_enable_share which disables a share for a specified 821 * protocol. 822 */ 823 static int 824 zfs_sa_disable_share(sa_share_t share, char *proto) 825 { 826 if (_sa_disable_share != NULL) 827 return (_sa_disable_share(share, proto)); 828 return (SA_CONFIG_ERR); 829 } 830 831 /* 832 * Share the given filesystem according to the options in the specified 833 * protocol specific properties (sharenfs, sharesmb). We rely 834 * on "libshare" to the dirty work for us. 835 */ 836 static int 837 zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto) 838 { 839 char mountpoint[ZFS_MAXPROPLEN]; 840 char shareopts[ZFS_MAXPROPLEN]; 841 char sourcestr[ZFS_MAXPROPLEN]; 842 libzfs_handle_t *hdl = zhp->zfs_hdl; 843 sa_share_t share; 844 zfs_share_proto_t *curr_proto; 845 zprop_source_t sourcetype; 846 int service = SA_INIT_ONE_SHARE_FROM_HANDLE; 847 int ret; 848 849 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL)) 850 return (0); 851 852 /* 853 * Function may be called in a loop from higher up stack, with libshare 854 * initialized for multiple shares (SA_INIT_SHARE_API_SELECTIVE). 855 * zfs_init_libshare_arg will refresh the handle's cache if necessary. 856 * In this case we do not want to switch to per share initialization. 857 * Specify SA_INIT_SHARE_API to do full refresh, if refresh required. 858 */ 859 if ((hdl->libzfs_sharehdl != NULL) && (_sa_service != NULL) && 860 (_sa_service(hdl->libzfs_sharehdl) == 861 SA_INIT_SHARE_API_SELECTIVE)) { 862 service = SA_INIT_SHARE_API; 863 } 864 865 for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) { 866 /* 867 * Return success if there are no share options. 868 */ 869 if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop, 870 shareopts, sizeof (shareopts), &sourcetype, sourcestr, 871 ZFS_MAXPROPLEN, B_FALSE) != 0 || 872 strcmp(shareopts, "off") == 0) 873 continue; 874 ret = zfs_init_libshare_arg(hdl, service, zhp); 875 if (ret != SA_OK) { 876 (void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED, 877 dgettext(TEXT_DOMAIN, "cannot share '%s': %s"), 878 zfs_get_name(zhp), _sa_errorstr != NULL ? 879 _sa_errorstr(ret) : ""); 880 return (-1); 881 } 882 883 /* 884 * If the 'zoned' property is set, then zfs_is_mountable() 885 * will have already bailed out if we are in the global zone. 886 * But local zones cannot be NFS servers, so we ignore it for 887 * local zones as well. 888 */ 889 if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) 890 continue; 891 892 share = zfs_sa_find_share(hdl->libzfs_sharehdl, mountpoint); 893 if (share == NULL) { 894 /* 895 * This may be a new file system that was just 896 * created so isn't in the internal cache 897 * (second time through). Rather than 898 * reloading the entire configuration, we can 899 * assume ZFS has done the checking and it is 900 * safe to add this to the internal 901 * configuration. 902 */ 903 if (_sa_zfs_process_share(hdl->libzfs_sharehdl, 904 NULL, NULL, mountpoint, 905 proto_table[*curr_proto].p_name, sourcetype, 906 shareopts, sourcestr, zhp->zfs_name) != SA_OK) { 907 (void) zfs_error_fmt(hdl, 908 proto_table[*curr_proto].p_share_err, 909 dgettext(TEXT_DOMAIN, "cannot share '%s'"), 910 zfs_get_name(zhp)); 911 return (-1); 912 } 913 share = zfs_sa_find_share(hdl->libzfs_sharehdl, 914 mountpoint); 915 } 916 if (share != NULL) { 917 int err; 918 err = zfs_sa_enable_share(share, 919 proto_table[*curr_proto].p_name); 920 if (err != SA_OK) { 921 (void) zfs_error_fmt(hdl, 922 proto_table[*curr_proto].p_share_err, 923 dgettext(TEXT_DOMAIN, "cannot share '%s'"), 924 zfs_get_name(zhp)); 925 return (-1); 926 } 927 } else { 928 (void) zfs_error_fmt(hdl, 929 proto_table[*curr_proto].p_share_err, 930 dgettext(TEXT_DOMAIN, "cannot share '%s'"), 931 zfs_get_name(zhp)); 932 return (-1); 933 } 934 935 } 936 return (0); 937 } 938 939 940 int 941 zfs_share_nfs(zfs_handle_t *zhp) 942 { 943 return (zfs_share_proto(zhp, nfs_only)); 944 } 945 946 int 947 zfs_share_smb(zfs_handle_t *zhp) 948 { 949 return (zfs_share_proto(zhp, smb_only)); 950 } 951 952 int 953 zfs_shareall(zfs_handle_t *zhp) 954 { 955 return (zfs_share_proto(zhp, share_all_proto)); 956 } 957 958 /* 959 * Unshare a filesystem by mountpoint. 960 */ 961 static int 962 unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint, 963 zfs_share_proto_t proto) 964 { 965 sa_share_t share; 966 int err; 967 char *mntpt; 968 int service = SA_INIT_ONE_SHARE_FROM_NAME; 969 970 /* 971 * Mountpoint could get trashed if libshare calls getmntany 972 * which it does during API initialization, so strdup the 973 * value. 974 */ 975 mntpt = zfs_strdup(hdl, mountpoint); 976 977 /* 978 * Function may be called in a loop from higher up stack, with libshare 979 * initialized for multiple shares (SA_INIT_SHARE_API_SELECTIVE). 980 * zfs_init_libshare_arg will refresh the handle's cache if necessary. 981 * In this case we do not want to switch to per share initialization. 982 * Specify SA_INIT_SHARE_API to do full refresh, if refresh required. 983 */ 984 if ((hdl->libzfs_sharehdl != NULL) && (_sa_service != NULL) && 985 (_sa_service(hdl->libzfs_sharehdl) == 986 SA_INIT_SHARE_API_SELECTIVE)) { 987 service = SA_INIT_SHARE_API; 988 } 989 990 err = zfs_init_libshare_arg(hdl, service, (void *)name); 991 if (err != SA_OK) { 992 free(mntpt); /* don't need the copy anymore */ 993 return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err, 994 dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"), 995 name, _sa_errorstr(err))); 996 } 997 998 share = zfs_sa_find_share(hdl->libzfs_sharehdl, mntpt); 999 free(mntpt); /* don't need the copy anymore */ 1000 1001 if (share != NULL) { 1002 err = zfs_sa_disable_share(share, proto_table[proto].p_name); 1003 if (err != SA_OK) { 1004 return (zfs_error_fmt(hdl, 1005 proto_table[proto].p_unshare_err, 1006 dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"), 1007 name, _sa_errorstr(err))); 1008 } 1009 } else { 1010 return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err, 1011 dgettext(TEXT_DOMAIN, "cannot unshare '%s': not found"), 1012 name)); 1013 } 1014 return (0); 1015 } 1016 1017 /* 1018 * Unshare the given filesystem. 1019 */ 1020 int 1021 zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint, 1022 zfs_share_proto_t *proto) 1023 { 1024 libzfs_handle_t *hdl = zhp->zfs_hdl; 1025 struct mnttab entry; 1026 char *mntpt = NULL; 1027 1028 /* check to see if need to unmount the filesystem */ 1029 rewind(zhp->zfs_hdl->libzfs_mnttab); 1030 if (mountpoint != NULL) 1031 mountpoint = mntpt = zfs_strdup(hdl, mountpoint); 1032 1033 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) && 1034 libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) { 1035 zfs_share_proto_t *curr_proto; 1036 1037 if (mountpoint == NULL) 1038 mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp); 1039 1040 for (curr_proto = proto; *curr_proto != PROTO_END; 1041 curr_proto++) { 1042 1043 if (is_shared(hdl, mntpt, *curr_proto) && 1044 unshare_one(hdl, zhp->zfs_name, 1045 mntpt, *curr_proto) != 0) { 1046 if (mntpt != NULL) 1047 free(mntpt); 1048 return (-1); 1049 } 1050 } 1051 } 1052 if (mntpt != NULL) 1053 free(mntpt); 1054 1055 return (0); 1056 } 1057 1058 int 1059 zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint) 1060 { 1061 return (zfs_unshare_proto(zhp, mountpoint, nfs_only)); 1062 } 1063 1064 int 1065 zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint) 1066 { 1067 return (zfs_unshare_proto(zhp, mountpoint, smb_only)); 1068 } 1069 1070 /* 1071 * Same as zfs_unmountall(), but for NFS and SMB unshares. 1072 */ 1073 int 1074 zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto) 1075 { 1076 prop_changelist_t *clp; 1077 int ret; 1078 1079 clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0); 1080 if (clp == NULL) 1081 return (-1); 1082 1083 ret = changelist_unshare(clp, proto); 1084 changelist_free(clp); 1085 1086 return (ret); 1087 } 1088 1089 int 1090 zfs_unshareall_nfs(zfs_handle_t *zhp) 1091 { 1092 return (zfs_unshareall_proto(zhp, nfs_only)); 1093 } 1094 1095 int 1096 zfs_unshareall_smb(zfs_handle_t *zhp) 1097 { 1098 return (zfs_unshareall_proto(zhp, smb_only)); 1099 } 1100 1101 int 1102 zfs_unshareall(zfs_handle_t *zhp) 1103 { 1104 return (zfs_unshareall_proto(zhp, share_all_proto)); 1105 } 1106 1107 int 1108 zfs_unshareall_bypath(zfs_handle_t *zhp, const char *mountpoint) 1109 { 1110 return (zfs_unshare_proto(zhp, mountpoint, share_all_proto)); 1111 } 1112 1113 /* 1114 * Remove the mountpoint associated with the current dataset, if necessary. 1115 * We only remove the underlying directory if: 1116 * 1117 * - The mountpoint is not 'none' or 'legacy' 1118 * - The mountpoint is non-empty 1119 * - The mountpoint is the default or inherited 1120 * - The 'zoned' property is set, or we're in a local zone 1121 * 1122 * Any other directories we leave alone. 1123 */ 1124 void 1125 remove_mountpoint(zfs_handle_t *zhp) 1126 { 1127 char mountpoint[ZFS_MAXPROPLEN]; 1128 zprop_source_t source; 1129 1130 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), 1131 &source)) 1132 return; 1133 1134 if (source == ZPROP_SRC_DEFAULT || 1135 source == ZPROP_SRC_INHERITED) { 1136 /* 1137 * Try to remove the directory, silently ignoring any errors. 1138 * The filesystem may have since been removed or moved around, 1139 * and this error isn't really useful to the administrator in 1140 * any way. 1141 */ 1142 (void) rmdir(mountpoint); 1143 } 1144 } 1145 1146 /* 1147 * Add the given zfs handle to the cb_handles array, dynamically reallocating 1148 * the array if it is out of space. 1149 */ 1150 void 1151 libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp) 1152 { 1153 if (cbp->cb_alloc == cbp->cb_used) { 1154 size_t newsz; 1155 zfs_handle_t **newhandles; 1156 1157 newsz = cbp->cb_alloc != 0 ? cbp->cb_alloc * 2 : 64; 1158 newhandles = zfs_realloc(zhp->zfs_hdl, 1159 cbp->cb_handles, cbp->cb_alloc * sizeof (zfs_handle_t *), 1160 newsz * sizeof (zfs_handle_t *)); 1161 cbp->cb_handles = newhandles; 1162 cbp->cb_alloc = newsz; 1163 } 1164 cbp->cb_handles[cbp->cb_used++] = zhp; 1165 } 1166 1167 /* 1168 * Recursive helper function used during file system enumeration 1169 */ 1170 static int 1171 zfs_iter_cb(zfs_handle_t *zhp, void *data) 1172 { 1173 get_all_cb_t *cbp = data; 1174 1175 if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) { 1176 zfs_close(zhp); 1177 return (0); 1178 } 1179 1180 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) { 1181 zfs_close(zhp); 1182 return (0); 1183 } 1184 1185 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) == 1186 ZFS_KEYSTATUS_UNAVAILABLE) { 1187 zfs_close(zhp); 1188 return (0); 1189 } 1190 1191 /* 1192 * If this filesystem is inconsistent and has a receive resume 1193 * token, we can not mount it. 1194 */ 1195 if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) && 1196 zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, 1197 NULL, 0, NULL, NULL, 0, B_TRUE) == 0) { 1198 zfs_close(zhp); 1199 return (0); 1200 } 1201 1202 libzfs_add_handle(cbp, zhp); 1203 if (zfs_iter_filesystems(zhp, zfs_iter_cb, cbp) != 0) { 1204 zfs_close(zhp); 1205 return (-1); 1206 } 1207 return (0); 1208 } 1209 1210 /* 1211 * Sort comparator that compares two mountpoint paths. We sort these paths so 1212 * that subdirectories immediately follow their parents. This means that we 1213 * effectively treat the '/' character as the lowest value non-nul char. 1214 * Since filesystems from non-global zones can have the same mountpoint 1215 * as other filesystems, the comparator sorts global zone filesystems to 1216 * the top of the list. This means that the global zone will traverse the 1217 * filesystem list in the correct order and can stop when it sees the 1218 * first zoned filesystem. In a non-global zone, only the delegated 1219 * filesystems are seen. 1220 * 1221 * An example sorted list using this comparator would look like: 1222 * 1223 * /foo 1224 * /foo/bar 1225 * /foo/bar/baz 1226 * /foo/baz 1227 * /foo.bar 1228 * /foo (NGZ1) 1229 * /foo (NGZ2) 1230 * 1231 * The mounting code depends on this ordering to deterministically iterate 1232 * over filesystems in order to spawn parallel mount tasks. 1233 */ 1234 static int 1235 mountpoint_cmp(const void *arga, const void *argb) 1236 { 1237 zfs_handle_t *const *zap = arga; 1238 zfs_handle_t *za = *zap; 1239 zfs_handle_t *const *zbp = argb; 1240 zfs_handle_t *zb = *zbp; 1241 char mounta[MAXPATHLEN]; 1242 char mountb[MAXPATHLEN]; 1243 const char *a = mounta; 1244 const char *b = mountb; 1245 boolean_t gota, gotb; 1246 uint64_t zoneda, zonedb; 1247 1248 zoneda = zfs_prop_get_int(za, ZFS_PROP_ZONED); 1249 zonedb = zfs_prop_get_int(zb, ZFS_PROP_ZONED); 1250 if (zoneda && !zonedb) 1251 return (1); 1252 if (!zoneda && zonedb) 1253 return (-1); 1254 1255 gota = (zfs_get_type(za) == ZFS_TYPE_FILESYSTEM); 1256 if (gota) { 1257 verify(zfs_prop_get(za, ZFS_PROP_MOUNTPOINT, mounta, 1258 sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0); 1259 } 1260 gotb = (zfs_get_type(zb) == ZFS_TYPE_FILESYSTEM); 1261 if (gotb) { 1262 verify(zfs_prop_get(zb, ZFS_PROP_MOUNTPOINT, mountb, 1263 sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0); 1264 } 1265 1266 if (gota && gotb) { 1267 while (*a != '\0' && (*a == *b)) { 1268 a++; 1269 b++; 1270 } 1271 if (*a == *b) 1272 return (0); 1273 if (*a == '\0') 1274 return (-1); 1275 if (*b == '\0') 1276 return (1); 1277 if (*a == '/') 1278 return (-1); 1279 if (*b == '/') 1280 return (1); 1281 return (*a < *b ? -1 : *a > *b); 1282 } 1283 1284 if (gota) 1285 return (-1); 1286 if (gotb) 1287 return (1); 1288 1289 /* 1290 * If neither filesystem has a mountpoint, revert to sorting by 1291 * dataset name. 1292 */ 1293 return (strcmp(zfs_get_name(za), zfs_get_name(zb))); 1294 } 1295 1296 /* 1297 * Return true if path2 is a child of path1. 1298 */ 1299 static boolean_t 1300 libzfs_path_contains(const char *path1, const char *path2) 1301 { 1302 return (strstr(path2, path1) == path2 && path2[strlen(path1)] == '/'); 1303 } 1304 1305 /* 1306 * Given a mountpoint specified by idx in the handles array, find the first 1307 * non-descendent of that mountpoint and return its index. Descendant paths 1308 * start with the parent's path. This function relies on the ordering 1309 * enforced by mountpoint_cmp(). 1310 */ 1311 static int 1312 non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx) 1313 { 1314 char parent[ZFS_MAXPROPLEN]; 1315 char child[ZFS_MAXPROPLEN]; 1316 int i; 1317 1318 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, parent, 1319 sizeof (parent), NULL, NULL, 0, B_FALSE) == 0); 1320 1321 for (i = idx + 1; i < num_handles; i++) { 1322 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, child, 1323 sizeof (child), NULL, NULL, 0, B_FALSE) == 0); 1324 if (!libzfs_path_contains(parent, child)) 1325 break; 1326 } 1327 return (i); 1328 } 1329 1330 typedef struct mnt_param { 1331 libzfs_handle_t *mnt_hdl; 1332 zfs_taskq_t *mnt_tq; 1333 zfs_handle_t **mnt_zhps; /* filesystems to mount */ 1334 size_t mnt_num_handles; 1335 int mnt_idx; /* Index of selected entry to mount */ 1336 zfs_iter_f mnt_func; 1337 void *mnt_data; 1338 } mnt_param_t; 1339 1340 /* 1341 * Allocate and populate the parameter struct for mount function, and 1342 * schedule mounting of the entry selected by idx. 1343 */ 1344 static void 1345 zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles, 1346 size_t num_handles, int idx, zfs_iter_f func, void *data, zfs_taskq_t *tq) 1347 { 1348 mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t)); 1349 1350 mnt_param->mnt_hdl = hdl; 1351 mnt_param->mnt_tq = tq; 1352 mnt_param->mnt_zhps = handles; 1353 mnt_param->mnt_num_handles = num_handles; 1354 mnt_param->mnt_idx = idx; 1355 mnt_param->mnt_func = func; 1356 mnt_param->mnt_data = data; 1357 1358 (void) zfs_taskq_dispatch(tq, zfs_mount_task, (void*)mnt_param, 1359 ZFS_TQ_SLEEP); 1360 } 1361 1362 /* 1363 * This is the structure used to keep state of mounting or sharing operations 1364 * during a call to zpool_enable_datasets(). 1365 */ 1366 typedef struct mount_state { 1367 /* 1368 * ms_mntstatus is set to -1 if any mount fails. While multiple threads 1369 * could update this variable concurrently, no synchronization is 1370 * needed as it's only ever set to -1. 1371 */ 1372 int ms_mntstatus; 1373 int ms_mntflags; 1374 const char *ms_mntopts; 1375 } mount_state_t; 1376 1377 static int 1378 zfs_mount_one(zfs_handle_t *zhp, void *arg) 1379 { 1380 mount_state_t *ms = arg; 1381 int ret = 0; 1382 1383 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) == 1384 ZFS_KEYSTATUS_UNAVAILABLE) 1385 return (0); 1386 1387 if (zfs_mount(zhp, ms->ms_mntopts, ms->ms_mntflags) != 0) 1388 ret = ms->ms_mntstatus = -1; 1389 return (ret); 1390 } 1391 1392 static int 1393 zfs_share_one(zfs_handle_t *zhp, void *arg) 1394 { 1395 mount_state_t *ms = arg; 1396 int ret = 0; 1397 1398 if (zfs_share(zhp) != 0) 1399 ret = ms->ms_mntstatus = -1; 1400 return (ret); 1401 } 1402 1403 /* 1404 * Task queue function to mount one file system. On completion, it finds and 1405 * schedules its children to be mounted. This depends on the sorting done in 1406 * zfs_foreach_mountpoint(). Note that the degenerate case (chain of entries 1407 * each descending from the previous) will have no parallelism since we always 1408 * have to wait for the parent to finish mounting before we can schedule 1409 * its children. 1410 */ 1411 static void 1412 zfs_mount_task(void *arg) 1413 { 1414 mnt_param_t *mp = arg; 1415 int idx = mp->mnt_idx; 1416 zfs_handle_t **handles = mp->mnt_zhps; 1417 size_t num_handles = mp->mnt_num_handles; 1418 char mountpoint[ZFS_MAXPROPLEN]; 1419 1420 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, mountpoint, 1421 sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0); 1422 1423 if (mp->mnt_func(handles[idx], mp->mnt_data) != 0) 1424 return; 1425 1426 /* 1427 * We dispatch tasks to mount filesystems with mountpoints underneath 1428 * this one. We do this by dispatching the next filesystem with a 1429 * descendant mountpoint of the one we just mounted, then skip all of 1430 * its descendants, dispatch the next descendant mountpoint, and so on. 1431 * The non_descendant_idx() function skips over filesystems that are 1432 * descendants of the filesystem we just dispatched. 1433 */ 1434 for (int i = idx + 1; i < num_handles; 1435 i = non_descendant_idx(handles, num_handles, i)) { 1436 char child[ZFS_MAXPROPLEN]; 1437 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, 1438 child, sizeof (child), NULL, NULL, 0, B_FALSE) == 0); 1439 1440 if (!libzfs_path_contains(mountpoint, child)) 1441 break; /* not a descendant, return */ 1442 zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i, 1443 mp->mnt_func, mp->mnt_data, mp->mnt_tq); 1444 } 1445 free(mp); 1446 } 1447 1448 /* 1449 * Issue the func callback for each ZFS handle contained in the handles 1450 * array. This function is used to mount all datasets, and so this function 1451 * guarantees that filesystems for parent mountpoints are called before their 1452 * children. As such, before issuing any callbacks, we first sort the array 1453 * of handles by mountpoint. 1454 * 1455 * Callbacks are issued in one of two ways: 1456 * 1457 * 1. Sequentially: If the parallel argument is B_FALSE or the ZFS_SERIAL_MOUNT 1458 * environment variable is set, then we issue callbacks sequentially. 1459 * 1460 * 2. In parallel: If the parallel argument is B_TRUE and the ZFS_SERIAL_MOUNT 1461 * environment variable is not set, then we use a taskq to dispatch threads 1462 * to mount filesystems is parallel. This function dispatches tasks to mount 1463 * the filesystems at the top-level mountpoints, and these tasks in turn 1464 * are responsible for recursively mounting filesystems in their children 1465 * mountpoints. 1466 */ 1467 void 1468 zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles, 1469 size_t num_handles, zfs_iter_f func, void *data, boolean_t parallel) 1470 { 1471 zoneid_t zoneid = getzoneid(); 1472 1473 /* 1474 * The ZFS_SERIAL_MOUNT environment variable is an undocumented 1475 * variable that can be used as a convenience to do a/b comparison 1476 * of serial vs. parallel mounting. 1477 */ 1478 boolean_t serial_mount = !parallel || 1479 (getenv("ZFS_SERIAL_MOUNT") != NULL); 1480 1481 /* 1482 * Sort the datasets by mountpoint. See mountpoint_cmp for details 1483 * of how these are sorted. 1484 */ 1485 qsort(handles, num_handles, sizeof (zfs_handle_t *), mountpoint_cmp); 1486 1487 if (serial_mount) { 1488 for (int i = 0; i < num_handles; i++) { 1489 func(handles[i], data); 1490 } 1491 return; 1492 } 1493 1494 /* 1495 * Issue the callback function for each dataset using a parallel 1496 * algorithm that uses a taskq to manage threads. 1497 */ 1498 zfs_taskq_t *tq = zfs_taskq_create("mount_taskq", mount_tq_nthr, 0, 1499 mount_tq_nthr, mount_tq_nthr, ZFS_TASKQ_PREPOPULATE); 1500 1501 /* 1502 * There may be multiple "top level" mountpoints outside of the pool's 1503 * root mountpoint, e.g.: /foo /bar. Dispatch a mount task for each of 1504 * these. 1505 */ 1506 for (int i = 0; i < num_handles; 1507 i = non_descendant_idx(handles, num_handles, i)) { 1508 /* 1509 * Since the mountpoints have been sorted so that the zoned 1510 * filesystems are at the end, a zoned filesystem seen from 1511 * the global zone means that we're done. 1512 */ 1513 if (zoneid == GLOBAL_ZONEID && 1514 zfs_prop_get_int(handles[i], ZFS_PROP_ZONED)) 1515 break; 1516 zfs_dispatch_mount(hdl, handles, num_handles, i, func, data, 1517 tq); 1518 } 1519 1520 zfs_taskq_wait(tq); /* wait for all scheduled mounts to complete */ 1521 zfs_taskq_destroy(tq); 1522 } 1523 1524 /* 1525 * Mount and share all datasets within the given pool. This assumes that no 1526 * datasets within the pool are currently mounted. 1527 */ 1528 #pragma weak zpool_mount_datasets = zpool_enable_datasets 1529 int 1530 zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags) 1531 { 1532 get_all_cb_t cb = { 0 }; 1533 mount_state_t ms = { 0 }; 1534 zfs_handle_t *zfsp; 1535 sa_init_selective_arg_t sharearg; 1536 int ret = 0; 1537 1538 if ((zfsp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, 1539 ZFS_TYPE_DATASET)) == NULL) 1540 goto out; 1541 1542 1543 /* 1544 * Gather all non-snapshot datasets within the pool. Start by adding 1545 * the root filesystem for this pool to the list, and then iterate 1546 * over all child filesystems. 1547 */ 1548 libzfs_add_handle(&cb, zfsp); 1549 if (zfs_iter_filesystems(zfsp, zfs_iter_cb, &cb) != 0) 1550 goto out; 1551 1552 ms.ms_mntopts = mntopts; 1553 ms.ms_mntflags = flags; 1554 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used, 1555 zfs_mount_one, &ms, B_TRUE); 1556 if (ms.ms_mntstatus != 0) 1557 ret = ms.ms_mntstatus; 1558 1559 /* 1560 * Initialize libshare SA_INIT_SHARE_API_SELECTIVE here 1561 * to avoid unnecessary load/unload of the libshare API 1562 * per shared dataset downstream. 1563 */ 1564 sharearg.zhandle_arr = cb.cb_handles; 1565 sharearg.zhandle_len = cb.cb_used; 1566 if ((ret = zfs_init_libshare_arg(zhp->zpool_hdl, 1567 SA_INIT_SHARE_API_SELECTIVE, &sharearg)) != 0) 1568 goto out; 1569 1570 ms.ms_mntstatus = 0; 1571 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used, 1572 zfs_share_one, &ms, B_FALSE); 1573 if (ms.ms_mntstatus != 0) 1574 ret = ms.ms_mntstatus; 1575 1576 out: 1577 for (int i = 0; i < cb.cb_used; i++) 1578 zfs_close(cb.cb_handles[i]); 1579 free(cb.cb_handles); 1580 1581 return (ret); 1582 } 1583 1584 static int 1585 mountpoint_compare(const void *a, const void *b) 1586 { 1587 const char *mounta = *((char **)a); 1588 const char *mountb = *((char **)b); 1589 1590 return (strcmp(mountb, mounta)); 1591 } 1592 1593 /* alias for 2002/240 */ 1594 #pragma weak zpool_unmount_datasets = zpool_disable_datasets 1595 /* 1596 * Unshare and unmount all datasets within the given pool. We don't want to 1597 * rely on traversing the DSL to discover the filesystems within the pool, 1598 * because this may be expensive (if not all of them are mounted), and can fail 1599 * arbitrarily (on I/O error, for example). Instead, we walk /etc/mnttab and 1600 * gather all the filesystems that are currently mounted. 1601 */ 1602 int 1603 zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force) 1604 { 1605 int used, alloc; 1606 struct mnttab entry; 1607 size_t namelen; 1608 char **mountpoints = NULL; 1609 zfs_handle_t **datasets = NULL; 1610 libzfs_handle_t *hdl = zhp->zpool_hdl; 1611 int i; 1612 int ret = -1; 1613 int flags = (force ? MS_FORCE : 0); 1614 sa_init_selective_arg_t sharearg; 1615 1616 namelen = strlen(zhp->zpool_name); 1617 1618 rewind(hdl->libzfs_mnttab); 1619 used = alloc = 0; 1620 while (getmntent(hdl->libzfs_mnttab, &entry) == 0) { 1621 /* 1622 * Ignore non-ZFS entries. 1623 */ 1624 if (entry.mnt_fstype == NULL || 1625 strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) 1626 continue; 1627 1628 /* 1629 * Ignore filesystems not within this pool. 1630 */ 1631 if (entry.mnt_mountp == NULL || 1632 strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 || 1633 (entry.mnt_special[namelen] != '/' && 1634 entry.mnt_special[namelen] != '\0')) 1635 continue; 1636 1637 /* 1638 * At this point we've found a filesystem within our pool. Add 1639 * it to our growing list. 1640 */ 1641 if (used == alloc) { 1642 if (alloc == 0) { 1643 if ((mountpoints = zfs_alloc(hdl, 1644 8 * sizeof (void *))) == NULL) 1645 goto out; 1646 1647 if ((datasets = zfs_alloc(hdl, 1648 8 * sizeof (void *))) == NULL) 1649 goto out; 1650 1651 alloc = 8; 1652 } else { 1653 void *ptr; 1654 1655 if ((ptr = zfs_realloc(hdl, mountpoints, 1656 alloc * sizeof (void *), 1657 alloc * 2 * sizeof (void *))) == NULL) 1658 goto out; 1659 mountpoints = ptr; 1660 1661 if ((ptr = zfs_realloc(hdl, datasets, 1662 alloc * sizeof (void *), 1663 alloc * 2 * sizeof (void *))) == NULL) 1664 goto out; 1665 datasets = ptr; 1666 1667 alloc *= 2; 1668 } 1669 } 1670 1671 if ((mountpoints[used] = zfs_strdup(hdl, 1672 entry.mnt_mountp)) == NULL) 1673 goto out; 1674 1675 /* 1676 * This is allowed to fail, in case there is some I/O error. It 1677 * is only used to determine if we need to remove the underlying 1678 * mountpoint, so failure is not fatal. 1679 */ 1680 datasets[used] = make_dataset_handle(hdl, entry.mnt_special); 1681 1682 used++; 1683 } 1684 1685 /* 1686 * At this point, we have the entire list of filesystems, so sort it by 1687 * mountpoint. 1688 */ 1689 sharearg.zhandle_arr = datasets; 1690 sharearg.zhandle_len = used; 1691 ret = zfs_init_libshare_arg(hdl, SA_INIT_SHARE_API_SELECTIVE, 1692 &sharearg); 1693 if (ret != 0) 1694 goto out; 1695 qsort(mountpoints, used, sizeof (char *), mountpoint_compare); 1696 1697 /* 1698 * Walk through and first unshare everything. 1699 */ 1700 for (i = 0; i < used; i++) { 1701 zfs_share_proto_t *curr_proto; 1702 for (curr_proto = share_all_proto; *curr_proto != PROTO_END; 1703 curr_proto++) { 1704 if (is_shared(hdl, mountpoints[i], *curr_proto) && 1705 unshare_one(hdl, mountpoints[i], 1706 mountpoints[i], *curr_proto) != 0) 1707 goto out; 1708 } 1709 } 1710 1711 /* 1712 * Now unmount everything, removing the underlying directories as 1713 * appropriate. 1714 */ 1715 for (i = 0; i < used; i++) { 1716 if (unmount_one(hdl, mountpoints[i], flags) != 0) 1717 goto out; 1718 } 1719 1720 for (i = 0; i < used; i++) { 1721 if (datasets[i]) 1722 remove_mountpoint(datasets[i]); 1723 } 1724 1725 ret = 0; 1726 out: 1727 for (i = 0; i < used; i++) { 1728 if (datasets[i]) 1729 zfs_close(datasets[i]); 1730 free(mountpoints[i]); 1731 } 1732 free(datasets); 1733 free(mountpoints); 1734 1735 return (ret); 1736 } 1737