1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 25 * Copyright (c) 2014, 2021 by Delphix. All rights reserved. 26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 27 * Copyright 2017 RackTop Systems. 28 * Copyright (c) 2018 Datto Inc. 29 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association. 30 */ 31 32 /* 33 * Routines to manage ZFS mounts. We separate all the nasty routines that have 34 * to deal with the OS. The following functions are the main entry points -- 35 * they are used by mount and unmount and when changing a filesystem's 36 * mountpoint. 37 * 38 * zfs_is_mounted() 39 * zfs_mount() 40 * zfs_mount_at() 41 * zfs_unmount() 42 * zfs_unmountall() 43 * 44 * This file also contains the functions used to manage sharing filesystems: 45 * 46 * zfs_is_shared() 47 * zfs_share() 48 * zfs_unshare() 49 * zfs_unshareall() 50 * zfs_commit_shares() 51 * 52 * The following functions are available for pool consumers, and will 53 * mount/unmount and share/unshare all datasets within pool: 54 * 55 * zpool_enable_datasets() 56 * zpool_disable_datasets() 57 */ 58 59 #include <dirent.h> 60 #include <dlfcn.h> 61 #include <errno.h> 62 #include <fcntl.h> 63 #include <libgen.h> 64 #include <libintl.h> 65 #include <stdio.h> 66 #include <stdlib.h> 67 #include <string.h> 68 #include <unistd.h> 69 #include <zone.h> 70 #include <sys/mntent.h> 71 #include <sys/mount.h> 72 #include <sys/stat.h> 73 #include <sys/vfs.h> 74 #include <sys/dsl_crypt.h> 75 76 #include <libzfs.h> 77 78 #include "libzfs_impl.h" 79 #include <thread_pool.h> 80 81 #include <libshare.h> 82 #include <sys/systeminfo.h> 83 #define MAXISALEN 257 /* based on sysinfo(2) man page */ 84 85 static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */ 86 87 static void zfs_mount_task(void *); 88 89 static const proto_table_t proto_table[SA_PROTOCOL_COUNT] = { 90 [SA_PROTOCOL_NFS] = 91 {ZFS_PROP_SHARENFS, EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED}, 92 [SA_PROTOCOL_SMB] = 93 {ZFS_PROP_SHARESMB, EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED}, 94 }; 95 96 static const enum sa_protocol share_all_proto[SA_PROTOCOL_COUNT + 1] = { 97 SA_PROTOCOL_NFS, 98 SA_PROTOCOL_SMB, 99 SA_NO_PROTOCOL 100 }; 101 102 103 104 static boolean_t 105 dir_is_empty_stat(const char *dirname) 106 { 107 struct stat st; 108 109 /* 110 * We only want to return false if the given path is a non empty 111 * directory, all other errors are handled elsewhere. 112 */ 113 if (stat(dirname, &st) < 0 || !S_ISDIR(st.st_mode)) { 114 return (B_TRUE); 115 } 116 117 /* 118 * An empty directory will still have two entries in it, one 119 * entry for each of "." and "..". 120 */ 121 if (st.st_size > 2) { 122 return (B_FALSE); 123 } 124 125 return (B_TRUE); 126 } 127 128 static boolean_t 129 dir_is_empty_readdir(const char *dirname) 130 { 131 DIR *dirp; 132 struct dirent64 *dp; 133 int dirfd; 134 135 if ((dirfd = openat(AT_FDCWD, dirname, 136 O_RDONLY | O_NDELAY | O_LARGEFILE | O_CLOEXEC, 0)) < 0) { 137 return (B_TRUE); 138 } 139 140 if ((dirp = fdopendir(dirfd)) == NULL) { 141 (void) close(dirfd); 142 return (B_TRUE); 143 } 144 145 while ((dp = readdir64(dirp)) != NULL) { 146 147 if (strcmp(dp->d_name, ".") == 0 || 148 strcmp(dp->d_name, "..") == 0) 149 continue; 150 151 (void) closedir(dirp); 152 return (B_FALSE); 153 } 154 155 (void) closedir(dirp); 156 return (B_TRUE); 157 } 158 159 /* 160 * Returns true if the specified directory is empty. If we can't open the 161 * directory at all, return true so that the mount can fail with a more 162 * informative error message. 163 */ 164 static boolean_t 165 dir_is_empty(const char *dirname) 166 { 167 struct statfs64 st; 168 169 /* 170 * If the statvfs call fails or the filesystem is not a ZFS 171 * filesystem, fall back to the slow path which uses readdir. 172 */ 173 if ((statfs64(dirname, &st) != 0) || 174 (st.f_type != ZFS_SUPER_MAGIC)) { 175 return (dir_is_empty_readdir(dirname)); 176 } 177 178 /* 179 * At this point, we know the provided path is on a ZFS 180 * filesystem, so we can use stat instead of readdir to 181 * determine if the directory is empty or not. We try to avoid 182 * using readdir because that requires opening "dirname"; this 183 * open file descriptor can potentially end up in a child 184 * process if there's a concurrent fork, thus preventing the 185 * zfs_mount() from otherwise succeeding (the open file 186 * descriptor inherited by the child process will cause the 187 * parent's mount to fail with EBUSY). The performance 188 * implications of replacing the open, read, and close with a 189 * single stat is nice; but is not the main motivation for the 190 * added complexity. 191 */ 192 return (dir_is_empty_stat(dirname)); 193 } 194 195 /* 196 * Checks to see if the mount is active. If the filesystem is mounted, we fill 197 * in 'where' with the current mountpoint, and return 1. Otherwise, we return 198 * 0. 199 */ 200 boolean_t 201 is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where) 202 { 203 struct mnttab entry; 204 205 if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0) 206 return (B_FALSE); 207 208 if (where != NULL) 209 *where = zfs_strdup(zfs_hdl, entry.mnt_mountp); 210 211 return (B_TRUE); 212 } 213 214 boolean_t 215 zfs_is_mounted(zfs_handle_t *zhp, char **where) 216 { 217 return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where)); 218 } 219 220 /* 221 * Checks any higher order concerns about whether the given dataset is 222 * mountable, false otherwise. zfs_is_mountable_internal specifically assumes 223 * that the caller has verified the sanity of mounting the dataset at 224 * its mountpoint to the extent the caller wants. 225 */ 226 static boolean_t 227 zfs_is_mountable_internal(zfs_handle_t *zhp) 228 { 229 if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) && 230 getzoneid() == GLOBAL_ZONEID) 231 return (B_FALSE); 232 233 return (B_TRUE); 234 } 235 236 /* 237 * Returns true if the given dataset is mountable, false otherwise. Returns the 238 * mountpoint in 'buf'. 239 */ 240 static boolean_t 241 zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen, 242 zprop_source_t *source, int flags) 243 { 244 char sourceloc[MAXNAMELEN]; 245 zprop_source_t sourcetype; 246 247 if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type, 248 B_FALSE)) 249 return (B_FALSE); 250 251 verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen, 252 &sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0); 253 254 if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 || 255 strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0) 256 return (B_FALSE); 257 258 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF) 259 return (B_FALSE); 260 261 if (!zfs_is_mountable_internal(zhp)) 262 return (B_FALSE); 263 264 if (zfs_prop_get_int(zhp, ZFS_PROP_REDACTED) && !(flags & MS_FORCE)) 265 return (B_FALSE); 266 267 if (source) 268 *source = sourcetype; 269 270 return (B_TRUE); 271 } 272 273 /* 274 * The filesystem is mounted by invoking the system mount utility rather 275 * than by the system call mount(2). This ensures that the /etc/mtab 276 * file is correctly locked for the update. Performing our own locking 277 * and /etc/mtab update requires making an unsafe assumption about how 278 * the mount utility performs its locking. Unfortunately, this also means 279 * in the case of a mount failure we do not have the exact errno. We must 280 * make due with return value from the mount process. 281 * 282 * In the long term a shared library called libmount is under development 283 * which provides a common API to address the locking and errno issues. 284 * Once the standard mount utility has been updated to use this library 285 * we can add an autoconf check to conditionally use it. 286 * 287 * http://www.kernel.org/pub/linux/utils/util-linux/libmount-docs/index.html 288 */ 289 290 static int 291 zfs_add_option(zfs_handle_t *zhp, char *options, int len, 292 zfs_prop_t prop, const char *on, const char *off) 293 { 294 char *source; 295 uint64_t value; 296 297 /* Skip adding duplicate default options */ 298 if ((strstr(options, on) != NULL) || (strstr(options, off) != NULL)) 299 return (0); 300 301 /* 302 * zfs_prop_get_int() is not used to ensure our mount options 303 * are not influenced by the current /proc/self/mounts contents. 304 */ 305 value = getprop_uint64(zhp, prop, &source); 306 307 (void) strlcat(options, ",", len); 308 (void) strlcat(options, value ? on : off, len); 309 310 return (0); 311 } 312 313 static int 314 zfs_add_options(zfs_handle_t *zhp, char *options, int len) 315 { 316 int error = 0; 317 318 error = zfs_add_option(zhp, options, len, 319 ZFS_PROP_ATIME, MNTOPT_ATIME, MNTOPT_NOATIME); 320 /* 321 * don't add relatime/strictatime when atime=off, otherwise strictatime 322 * will force atime=on 323 */ 324 if (strstr(options, MNTOPT_NOATIME) == NULL) { 325 error = zfs_add_option(zhp, options, len, 326 ZFS_PROP_RELATIME, MNTOPT_RELATIME, MNTOPT_STRICTATIME); 327 } 328 error = error ? error : zfs_add_option(zhp, options, len, 329 ZFS_PROP_DEVICES, MNTOPT_DEVICES, MNTOPT_NODEVICES); 330 error = error ? error : zfs_add_option(zhp, options, len, 331 ZFS_PROP_EXEC, MNTOPT_EXEC, MNTOPT_NOEXEC); 332 error = error ? error : zfs_add_option(zhp, options, len, 333 ZFS_PROP_READONLY, MNTOPT_RO, MNTOPT_RW); 334 error = error ? error : zfs_add_option(zhp, options, len, 335 ZFS_PROP_SETUID, MNTOPT_SETUID, MNTOPT_NOSETUID); 336 error = error ? error : zfs_add_option(zhp, options, len, 337 ZFS_PROP_NBMAND, MNTOPT_NBMAND, MNTOPT_NONBMAND); 338 339 return (error); 340 } 341 342 int 343 zfs_mount(zfs_handle_t *zhp, const char *options, int flags) 344 { 345 char mountpoint[ZFS_MAXPROPLEN]; 346 347 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL, 348 flags)) 349 return (0); 350 351 return (zfs_mount_at(zhp, options, flags, mountpoint)); 352 } 353 354 /* 355 * Mount the given filesystem. 356 */ 357 int 358 zfs_mount_at(zfs_handle_t *zhp, const char *options, int flags, 359 const char *mountpoint) 360 { 361 struct stat buf; 362 char mntopts[MNT_LINE_MAX]; 363 char overlay[ZFS_MAXPROPLEN]; 364 char prop_encroot[MAXNAMELEN]; 365 boolean_t is_encroot; 366 zfs_handle_t *encroot_hp = zhp; 367 libzfs_handle_t *hdl = zhp->zfs_hdl; 368 uint64_t keystatus; 369 int remount = 0, rc; 370 371 if (options == NULL) { 372 (void) strlcpy(mntopts, MNTOPT_DEFAULTS, sizeof (mntopts)); 373 } else { 374 (void) strlcpy(mntopts, options, sizeof (mntopts)); 375 } 376 377 if (strstr(mntopts, MNTOPT_REMOUNT) != NULL) 378 remount = 1; 379 380 /* Potentially duplicates some checks if invoked by zfs_mount(). */ 381 if (!zfs_is_mountable_internal(zhp)) 382 return (0); 383 384 /* 385 * If the pool is imported read-only then all mounts must be read-only 386 */ 387 if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL)) 388 (void) strlcat(mntopts, "," MNTOPT_RO, sizeof (mntopts)); 389 390 /* 391 * Append default mount options which apply to the mount point. 392 * This is done because under Linux (unlike Solaris) multiple mount 393 * points may reference a single super block. This means that just 394 * given a super block there is no back reference to update the per 395 * mount point options. 396 */ 397 rc = zfs_add_options(zhp, mntopts, sizeof (mntopts)); 398 if (rc) { 399 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 400 "default options unavailable")); 401 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, 402 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), 403 mountpoint)); 404 } 405 406 /* 407 * If the filesystem is encrypted the key must be loaded in order to 408 * mount. If the key isn't loaded, the MS_CRYPT flag decides whether 409 * or not we attempt to load the keys. Note: we must call 410 * zfs_refresh_properties() here since some callers of this function 411 * (most notably zpool_enable_datasets()) may implicitly load our key 412 * by loading the parent's key first. 413 */ 414 if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) { 415 zfs_refresh_properties(zhp); 416 keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS); 417 418 /* 419 * If the key is unavailable and MS_CRYPT is set give the 420 * user a chance to enter the key. Otherwise just fail 421 * immediately. 422 */ 423 if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) { 424 if (flags & MS_CRYPT) { 425 rc = zfs_crypto_get_encryption_root(zhp, 426 &is_encroot, prop_encroot); 427 if (rc) { 428 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 429 "Failed to get encryption root for " 430 "'%s'."), zfs_get_name(zhp)); 431 return (rc); 432 } 433 434 if (!is_encroot) { 435 encroot_hp = zfs_open(hdl, prop_encroot, 436 ZFS_TYPE_DATASET); 437 if (encroot_hp == NULL) 438 return (hdl->libzfs_error); 439 } 440 441 rc = zfs_crypto_load_key(encroot_hp, 442 B_FALSE, NULL); 443 444 if (!is_encroot) 445 zfs_close(encroot_hp); 446 if (rc) 447 return (rc); 448 } else { 449 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 450 "encryption key not loaded")); 451 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, 452 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), 453 mountpoint)); 454 } 455 } 456 457 } 458 459 /* 460 * Append zfsutil option so the mount helper allow the mount 461 */ 462 strlcat(mntopts, "," MNTOPT_ZFSUTIL, sizeof (mntopts)); 463 464 /* Create the directory if it doesn't already exist */ 465 if (lstat(mountpoint, &buf) != 0) { 466 if (mkdirp(mountpoint, 0755) != 0) { 467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 468 "failed to create mountpoint: %s"), 469 strerror(errno)); 470 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, 471 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), 472 mountpoint)); 473 } 474 } 475 476 /* 477 * Overlay mounts are enabled by default but may be disabled 478 * via the 'overlay' property. The -O flag remains for compatibility. 479 */ 480 if (!(flags & MS_OVERLAY)) { 481 if (zfs_prop_get(zhp, ZFS_PROP_OVERLAY, overlay, 482 sizeof (overlay), NULL, NULL, 0, B_FALSE) == 0) { 483 if (strcmp(overlay, "on") == 0) { 484 flags |= MS_OVERLAY; 485 } 486 } 487 } 488 489 /* 490 * Determine if the mountpoint is empty. If so, refuse to perform the 491 * mount. We don't perform this check if 'remount' is 492 * specified or if overlay option (-O) is given 493 */ 494 if ((flags & MS_OVERLAY) == 0 && !remount && 495 !dir_is_empty(mountpoint)) { 496 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 497 "directory is not empty")); 498 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, 499 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint)); 500 } 501 502 /* perform the mount */ 503 rc = do_mount(zhp, mountpoint, mntopts, flags); 504 if (rc) { 505 /* 506 * Generic errors are nasty, but there are just way too many 507 * from mount(), and they're well-understood. We pick a few 508 * common ones to improve upon. 509 */ 510 if (rc == EBUSY) { 511 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 512 "mountpoint or dataset is busy")); 513 } else if (rc == EPERM) { 514 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 515 "Insufficient privileges")); 516 } else if (rc == ENOTSUP) { 517 int spa_version; 518 519 VERIFY(zfs_spa_version(zhp, &spa_version) == 0); 520 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 521 "Can't mount a version %llu " 522 "file system on a version %d pool. Pool must be" 523 " upgraded to mount this file system."), 524 (u_longlong_t)zfs_prop_get_int(zhp, 525 ZFS_PROP_VERSION), spa_version); 526 } else { 527 zfs_error_aux(hdl, "%s", strerror(rc)); 528 } 529 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, 530 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), 531 zhp->zfs_name)); 532 } 533 534 /* remove the mounted entry before re-adding on remount */ 535 if (remount) 536 libzfs_mnttab_remove(hdl, zhp->zfs_name); 537 538 /* add the mounted entry into our cache */ 539 libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint, mntopts); 540 return (0); 541 } 542 543 /* 544 * Unmount a single filesystem. 545 */ 546 static int 547 unmount_one(zfs_handle_t *zhp, const char *mountpoint, int flags) 548 { 549 int error; 550 551 error = do_unmount(zhp, mountpoint, flags); 552 if (error != 0) { 553 int libzfs_err; 554 555 switch (error) { 556 case EBUSY: 557 libzfs_err = EZFS_BUSY; 558 break; 559 case EIO: 560 libzfs_err = EZFS_IO; 561 break; 562 case ENOENT: 563 libzfs_err = EZFS_NOENT; 564 break; 565 case ENOMEM: 566 libzfs_err = EZFS_NOMEM; 567 break; 568 case EPERM: 569 libzfs_err = EZFS_PERM; 570 break; 571 default: 572 libzfs_err = EZFS_UMOUNTFAILED; 573 } 574 if (zhp) { 575 return (zfs_error_fmt(zhp->zfs_hdl, libzfs_err, 576 dgettext(TEXT_DOMAIN, "cannot unmount '%s'"), 577 mountpoint)); 578 } else { 579 return (-1); 580 } 581 } 582 583 return (0); 584 } 585 586 /* 587 * Unmount the given filesystem. 588 */ 589 int 590 zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags) 591 { 592 libzfs_handle_t *hdl = zhp->zfs_hdl; 593 struct mnttab entry; 594 char *mntpt = NULL; 595 boolean_t encroot, unmounted = B_FALSE; 596 597 /* check to see if we need to unmount the filesystem */ 598 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) && 599 libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) { 600 /* 601 * mountpoint may have come from a call to 602 * getmnt/getmntany if it isn't NULL. If it is NULL, 603 * we know it comes from libzfs_mnttab_find which can 604 * then get freed later. We strdup it to play it safe. 605 */ 606 if (mountpoint == NULL) 607 mntpt = zfs_strdup(hdl, entry.mnt_mountp); 608 else 609 mntpt = zfs_strdup(hdl, mountpoint); 610 611 /* 612 * Unshare and unmount the filesystem 613 */ 614 if (zfs_unshare(zhp, mntpt, share_all_proto) != 0) { 615 free(mntpt); 616 return (-1); 617 } 618 zfs_commit_shares(NULL); 619 620 if (unmount_one(zhp, mntpt, flags) != 0) { 621 free(mntpt); 622 (void) zfs_share(zhp, NULL); 623 zfs_commit_shares(NULL); 624 return (-1); 625 } 626 627 libzfs_mnttab_remove(hdl, zhp->zfs_name); 628 free(mntpt); 629 unmounted = B_TRUE; 630 } 631 632 /* 633 * If the MS_CRYPT flag is provided we must ensure we attempt to 634 * unload the dataset's key regardless of whether we did any work 635 * to unmount it. We only do this for encryption roots. 636 */ 637 if ((flags & MS_CRYPT) != 0 && 638 zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) { 639 zfs_refresh_properties(zhp); 640 641 if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0 && 642 unmounted) { 643 (void) zfs_mount(zhp, NULL, 0); 644 return (-1); 645 } 646 647 if (encroot && zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) == 648 ZFS_KEYSTATUS_AVAILABLE && 649 zfs_crypto_unload_key(zhp) != 0) { 650 (void) zfs_mount(zhp, NULL, 0); 651 return (-1); 652 } 653 } 654 655 zpool_disable_volume_os(zhp->zfs_name); 656 657 return (0); 658 } 659 660 /* 661 * Unmount this filesystem and any children inheriting the mountpoint property. 662 * To do this, just act like we're changing the mountpoint property, but don't 663 * remount the filesystems afterwards. 664 */ 665 int 666 zfs_unmountall(zfs_handle_t *zhp, int flags) 667 { 668 prop_changelist_t *clp; 669 int ret; 670 671 clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 672 CL_GATHER_ITER_MOUNTED, flags); 673 if (clp == NULL) 674 return (-1); 675 676 ret = changelist_prefix(clp); 677 changelist_free(clp); 678 679 return (ret); 680 } 681 682 /* 683 * Unshare a filesystem by mountpoint. 684 */ 685 static int 686 unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint, 687 enum sa_protocol proto) 688 { 689 int err = sa_disable_share(mountpoint, proto); 690 if (err != SA_OK) 691 return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err, 692 dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"), 693 name, sa_errorstr(err))); 694 695 return (0); 696 } 697 698 /* 699 * Share the given filesystem according to the options in the specified 700 * protocol specific properties (sharenfs, sharesmb). We rely 701 * on "libshare" to do the dirty work for us. 702 */ 703 int 704 zfs_share(zfs_handle_t *zhp, const enum sa_protocol *proto) 705 { 706 char mountpoint[ZFS_MAXPROPLEN]; 707 char shareopts[ZFS_MAXPROPLEN]; 708 char sourcestr[ZFS_MAXPROPLEN]; 709 const enum sa_protocol *curr_proto; 710 zprop_source_t sourcetype; 711 int err = 0; 712 713 if (proto == NULL) 714 proto = share_all_proto; 715 716 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL, 0)) 717 return (0); 718 719 for (curr_proto = proto; *curr_proto != SA_NO_PROTOCOL; curr_proto++) { 720 /* 721 * Return success if there are no share options. 722 */ 723 if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop, 724 shareopts, sizeof (shareopts), &sourcetype, sourcestr, 725 ZFS_MAXPROPLEN, B_FALSE) != 0 || 726 strcmp(shareopts, "off") == 0) 727 continue; 728 729 /* 730 * If the 'zoned' property is set, then zfs_is_mountable() 731 * will have already bailed out if we are in the global zone. 732 * But local zones cannot be NFS servers, so we ignore it for 733 * local zones as well. 734 */ 735 if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) 736 continue; 737 738 err = sa_enable_share(zfs_get_name(zhp), mountpoint, shareopts, 739 *curr_proto); 740 if (err != SA_OK) { 741 return (zfs_error_fmt(zhp->zfs_hdl, 742 proto_table[*curr_proto].p_share_err, 743 dgettext(TEXT_DOMAIN, "cannot share '%s: %s'"), 744 zfs_get_name(zhp), sa_errorstr(err))); 745 } 746 747 } 748 return (0); 749 } 750 751 /* 752 * Check to see if the filesystem is currently shared. 753 */ 754 boolean_t 755 zfs_is_shared(zfs_handle_t *zhp, char **where, 756 const enum sa_protocol *proto) 757 { 758 char *mountpoint; 759 if (proto == NULL) 760 proto = share_all_proto; 761 762 if (ZFS_IS_VOLUME(zhp)) 763 return (B_FALSE); 764 765 if (!zfs_is_mounted(zhp, &mountpoint)) 766 return (B_FALSE); 767 768 for (const enum sa_protocol *p = proto; *p != SA_NO_PROTOCOL; ++p) 769 if (sa_is_shared(mountpoint, *p)) { 770 if (where != NULL) 771 *where = mountpoint; 772 else 773 free(mountpoint); 774 return (B_TRUE); 775 } 776 777 free(mountpoint); 778 return (B_FALSE); 779 } 780 781 void 782 zfs_commit_shares(const enum sa_protocol *proto) 783 { 784 if (proto == NULL) 785 proto = share_all_proto; 786 787 for (const enum sa_protocol *p = proto; *p != SA_NO_PROTOCOL; ++p) 788 sa_commit_shares(*p); 789 } 790 791 /* 792 * Unshare the given filesystem. 793 */ 794 int 795 zfs_unshare(zfs_handle_t *zhp, const char *mountpoint, 796 const enum sa_protocol *proto) 797 { 798 libzfs_handle_t *hdl = zhp->zfs_hdl; 799 struct mnttab entry; 800 801 if (proto == NULL) 802 proto = share_all_proto; 803 804 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) && 805 libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) { 806 807 /* check to see if need to unmount the filesystem */ 808 const char *mntpt = mountpoint ?: entry.mnt_mountp; 809 810 for (const enum sa_protocol *curr_proto = proto; 811 *curr_proto != SA_NO_PROTOCOL; curr_proto++) 812 if (sa_is_shared(mntpt, *curr_proto) && 813 unshare_one(hdl, zhp->zfs_name, 814 mntpt, *curr_proto) != 0) 815 return (-1); 816 } 817 818 return (0); 819 } 820 821 /* 822 * Same as zfs_unmountall(), but for NFS and SMB unshares. 823 */ 824 int 825 zfs_unshareall(zfs_handle_t *zhp, const enum sa_protocol *proto) 826 { 827 prop_changelist_t *clp; 828 int ret; 829 830 if (proto == NULL) 831 proto = share_all_proto; 832 833 clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0); 834 if (clp == NULL) 835 return (-1); 836 837 ret = changelist_unshare(clp, proto); 838 changelist_free(clp); 839 840 return (ret); 841 } 842 843 /* 844 * Remove the mountpoint associated with the current dataset, if necessary. 845 * We only remove the underlying directory if: 846 * 847 * - The mountpoint is not 'none' or 'legacy' 848 * - The mountpoint is non-empty 849 * - The mountpoint is the default or inherited 850 * - The 'zoned' property is set, or we're in a local zone 851 * 852 * Any other directories we leave alone. 853 */ 854 void 855 remove_mountpoint(zfs_handle_t *zhp) 856 { 857 char mountpoint[ZFS_MAXPROPLEN]; 858 zprop_source_t source; 859 860 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), 861 &source, 0)) 862 return; 863 864 if (source == ZPROP_SRC_DEFAULT || 865 source == ZPROP_SRC_INHERITED) { 866 /* 867 * Try to remove the directory, silently ignoring any errors. 868 * The filesystem may have since been removed or moved around, 869 * and this error isn't really useful to the administrator in 870 * any way. 871 */ 872 (void) rmdir(mountpoint); 873 } 874 } 875 876 /* 877 * Add the given zfs handle to the cb_handles array, dynamically reallocating 878 * the array if it is out of space. 879 */ 880 void 881 libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp) 882 { 883 if (cbp->cb_alloc == cbp->cb_used) { 884 size_t newsz; 885 zfs_handle_t **newhandles; 886 887 newsz = cbp->cb_alloc != 0 ? cbp->cb_alloc * 2 : 64; 888 newhandles = zfs_realloc(zhp->zfs_hdl, 889 cbp->cb_handles, cbp->cb_alloc * sizeof (zfs_handle_t *), 890 newsz * sizeof (zfs_handle_t *)); 891 cbp->cb_handles = newhandles; 892 cbp->cb_alloc = newsz; 893 } 894 cbp->cb_handles[cbp->cb_used++] = zhp; 895 } 896 897 /* 898 * Recursive helper function used during file system enumeration 899 */ 900 static int 901 zfs_iter_cb(zfs_handle_t *zhp, void *data) 902 { 903 get_all_cb_t *cbp = data; 904 905 if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) { 906 zfs_close(zhp); 907 return (0); 908 } 909 910 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) { 911 zfs_close(zhp); 912 return (0); 913 } 914 915 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) == 916 ZFS_KEYSTATUS_UNAVAILABLE) { 917 zfs_close(zhp); 918 return (0); 919 } 920 921 /* 922 * If this filesystem is inconsistent and has a receive resume 923 * token, we can not mount it. 924 */ 925 if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) && 926 zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, 927 NULL, 0, NULL, NULL, 0, B_TRUE) == 0) { 928 zfs_close(zhp); 929 return (0); 930 } 931 932 libzfs_add_handle(cbp, zhp); 933 if (zfs_iter_filesystems(zhp, zfs_iter_cb, cbp) != 0) { 934 zfs_close(zhp); 935 return (-1); 936 } 937 return (0); 938 } 939 940 /* 941 * Sort comparator that compares two mountpoint paths. We sort these paths so 942 * that subdirectories immediately follow their parents. This means that we 943 * effectively treat the '/' character as the lowest value non-nul char. 944 * Since filesystems from non-global zones can have the same mountpoint 945 * as other filesystems, the comparator sorts global zone filesystems to 946 * the top of the list. This means that the global zone will traverse the 947 * filesystem list in the correct order and can stop when it sees the 948 * first zoned filesystem. In a non-global zone, only the delegated 949 * filesystems are seen. 950 * 951 * An example sorted list using this comparator would look like: 952 * 953 * /foo 954 * /foo/bar 955 * /foo/bar/baz 956 * /foo/baz 957 * /foo.bar 958 * /foo (NGZ1) 959 * /foo (NGZ2) 960 * 961 * The mounting code depends on this ordering to deterministically iterate 962 * over filesystems in order to spawn parallel mount tasks. 963 */ 964 static int 965 mountpoint_cmp(const void *arga, const void *argb) 966 { 967 zfs_handle_t *const *zap = arga; 968 zfs_handle_t *za = *zap; 969 zfs_handle_t *const *zbp = argb; 970 zfs_handle_t *zb = *zbp; 971 char mounta[MAXPATHLEN]; 972 char mountb[MAXPATHLEN]; 973 const char *a = mounta; 974 const char *b = mountb; 975 boolean_t gota, gotb; 976 uint64_t zoneda, zonedb; 977 978 zoneda = zfs_prop_get_int(za, ZFS_PROP_ZONED); 979 zonedb = zfs_prop_get_int(zb, ZFS_PROP_ZONED); 980 if (zoneda && !zonedb) 981 return (1); 982 if (!zoneda && zonedb) 983 return (-1); 984 985 gota = (zfs_get_type(za) == ZFS_TYPE_FILESYSTEM); 986 if (gota) { 987 verify(zfs_prop_get(za, ZFS_PROP_MOUNTPOINT, mounta, 988 sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0); 989 } 990 gotb = (zfs_get_type(zb) == ZFS_TYPE_FILESYSTEM); 991 if (gotb) { 992 verify(zfs_prop_get(zb, ZFS_PROP_MOUNTPOINT, mountb, 993 sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0); 994 } 995 996 if (gota && gotb) { 997 while (*a != '\0' && (*a == *b)) { 998 a++; 999 b++; 1000 } 1001 if (*a == *b) 1002 return (0); 1003 if (*a == '\0') 1004 return (-1); 1005 if (*b == '\0') 1006 return (1); 1007 if (*a == '/') 1008 return (-1); 1009 if (*b == '/') 1010 return (1); 1011 return (*a < *b ? -1 : *a > *b); 1012 } 1013 1014 if (gota) 1015 return (-1); 1016 if (gotb) 1017 return (1); 1018 1019 /* 1020 * If neither filesystem has a mountpoint, revert to sorting by 1021 * dataset name. 1022 */ 1023 return (strcmp(zfs_get_name(za), zfs_get_name(zb))); 1024 } 1025 1026 /* 1027 * Return true if path2 is a child of path1 or path2 equals path1 or 1028 * path1 is "/" (path2 is always a child of "/"). 1029 */ 1030 static boolean_t 1031 libzfs_path_contains(const char *path1, const char *path2) 1032 { 1033 return (strcmp(path1, path2) == 0 || strcmp(path1, "/") == 0 || 1034 (strstr(path2, path1) == path2 && path2[strlen(path1)] == '/')); 1035 } 1036 1037 /* 1038 * Given a mountpoint specified by idx in the handles array, find the first 1039 * non-descendent of that mountpoint and return its index. Descendant paths 1040 * start with the parent's path. This function relies on the ordering 1041 * enforced by mountpoint_cmp(). 1042 */ 1043 static int 1044 non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx) 1045 { 1046 char parent[ZFS_MAXPROPLEN]; 1047 char child[ZFS_MAXPROPLEN]; 1048 int i; 1049 1050 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, parent, 1051 sizeof (parent), NULL, NULL, 0, B_FALSE) == 0); 1052 1053 for (i = idx + 1; i < num_handles; i++) { 1054 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, child, 1055 sizeof (child), NULL, NULL, 0, B_FALSE) == 0); 1056 if (!libzfs_path_contains(parent, child)) 1057 break; 1058 } 1059 return (i); 1060 } 1061 1062 typedef struct mnt_param { 1063 libzfs_handle_t *mnt_hdl; 1064 tpool_t *mnt_tp; 1065 zfs_handle_t **mnt_zhps; /* filesystems to mount */ 1066 size_t mnt_num_handles; 1067 int mnt_idx; /* Index of selected entry to mount */ 1068 zfs_iter_f mnt_func; 1069 void *mnt_data; 1070 } mnt_param_t; 1071 1072 /* 1073 * Allocate and populate the parameter struct for mount function, and 1074 * schedule mounting of the entry selected by idx. 1075 */ 1076 static void 1077 zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles, 1078 size_t num_handles, int idx, zfs_iter_f func, void *data, tpool_t *tp) 1079 { 1080 mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t)); 1081 1082 mnt_param->mnt_hdl = hdl; 1083 mnt_param->mnt_tp = tp; 1084 mnt_param->mnt_zhps = handles; 1085 mnt_param->mnt_num_handles = num_handles; 1086 mnt_param->mnt_idx = idx; 1087 mnt_param->mnt_func = func; 1088 mnt_param->mnt_data = data; 1089 1090 (void) tpool_dispatch(tp, zfs_mount_task, (void*)mnt_param); 1091 } 1092 1093 /* 1094 * This is the structure used to keep state of mounting or sharing operations 1095 * during a call to zpool_enable_datasets(). 1096 */ 1097 typedef struct mount_state { 1098 /* 1099 * ms_mntstatus is set to -1 if any mount fails. While multiple threads 1100 * could update this variable concurrently, no synchronization is 1101 * needed as it's only ever set to -1. 1102 */ 1103 int ms_mntstatus; 1104 int ms_mntflags; 1105 const char *ms_mntopts; 1106 } mount_state_t; 1107 1108 static int 1109 zfs_mount_one(zfs_handle_t *zhp, void *arg) 1110 { 1111 mount_state_t *ms = arg; 1112 int ret = 0; 1113 1114 /* 1115 * don't attempt to mount encrypted datasets with 1116 * unloaded keys 1117 */ 1118 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) == 1119 ZFS_KEYSTATUS_UNAVAILABLE) 1120 return (0); 1121 1122 if (zfs_mount(zhp, ms->ms_mntopts, ms->ms_mntflags) != 0) 1123 ret = ms->ms_mntstatus = -1; 1124 return (ret); 1125 } 1126 1127 static int 1128 zfs_share_one(zfs_handle_t *zhp, void *arg) 1129 { 1130 mount_state_t *ms = arg; 1131 int ret = 0; 1132 1133 if (zfs_share(zhp, NULL) != 0) 1134 ret = ms->ms_mntstatus = -1; 1135 return (ret); 1136 } 1137 1138 /* 1139 * Thread pool function to mount one file system. On completion, it finds and 1140 * schedules its children to be mounted. This depends on the sorting done in 1141 * zfs_foreach_mountpoint(). Note that the degenerate case (chain of entries 1142 * each descending from the previous) will have no parallelism since we always 1143 * have to wait for the parent to finish mounting before we can schedule 1144 * its children. 1145 */ 1146 static void 1147 zfs_mount_task(void *arg) 1148 { 1149 mnt_param_t *mp = arg; 1150 int idx = mp->mnt_idx; 1151 zfs_handle_t **handles = mp->mnt_zhps; 1152 size_t num_handles = mp->mnt_num_handles; 1153 char mountpoint[ZFS_MAXPROPLEN]; 1154 1155 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, mountpoint, 1156 sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0); 1157 1158 if (mp->mnt_func(handles[idx], mp->mnt_data) != 0) 1159 goto out; 1160 1161 /* 1162 * We dispatch tasks to mount filesystems with mountpoints underneath 1163 * this one. We do this by dispatching the next filesystem with a 1164 * descendant mountpoint of the one we just mounted, then skip all of 1165 * its descendants, dispatch the next descendant mountpoint, and so on. 1166 * The non_descendant_idx() function skips over filesystems that are 1167 * descendants of the filesystem we just dispatched. 1168 */ 1169 for (int i = idx + 1; i < num_handles; 1170 i = non_descendant_idx(handles, num_handles, i)) { 1171 char child[ZFS_MAXPROPLEN]; 1172 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, 1173 child, sizeof (child), NULL, NULL, 0, B_FALSE) == 0); 1174 1175 if (!libzfs_path_contains(mountpoint, child)) 1176 break; /* not a descendant, return */ 1177 zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i, 1178 mp->mnt_func, mp->mnt_data, mp->mnt_tp); 1179 } 1180 1181 out: 1182 free(mp); 1183 } 1184 1185 /* 1186 * Issue the func callback for each ZFS handle contained in the handles 1187 * array. This function is used to mount all datasets, and so this function 1188 * guarantees that filesystems for parent mountpoints are called before their 1189 * children. As such, before issuing any callbacks, we first sort the array 1190 * of handles by mountpoint. 1191 * 1192 * Callbacks are issued in one of two ways: 1193 * 1194 * 1. Sequentially: If the parallel argument is B_FALSE or the ZFS_SERIAL_MOUNT 1195 * environment variable is set, then we issue callbacks sequentially. 1196 * 1197 * 2. In parallel: If the parallel argument is B_TRUE and the ZFS_SERIAL_MOUNT 1198 * environment variable is not set, then we use a tpool to dispatch threads 1199 * to mount filesystems in parallel. This function dispatches tasks to mount 1200 * the filesystems at the top-level mountpoints, and these tasks in turn 1201 * are responsible for recursively mounting filesystems in their children 1202 * mountpoints. 1203 */ 1204 void 1205 zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles, 1206 size_t num_handles, zfs_iter_f func, void *data, boolean_t parallel) 1207 { 1208 zoneid_t zoneid = getzoneid(); 1209 1210 /* 1211 * The ZFS_SERIAL_MOUNT environment variable is an undocumented 1212 * variable that can be used as a convenience to do a/b comparison 1213 * of serial vs. parallel mounting. 1214 */ 1215 boolean_t serial_mount = !parallel || 1216 (getenv("ZFS_SERIAL_MOUNT") != NULL); 1217 1218 /* 1219 * Sort the datasets by mountpoint. See mountpoint_cmp for details 1220 * of how these are sorted. 1221 */ 1222 qsort(handles, num_handles, sizeof (zfs_handle_t *), mountpoint_cmp); 1223 1224 if (serial_mount) { 1225 for (int i = 0; i < num_handles; i++) { 1226 func(handles[i], data); 1227 } 1228 return; 1229 } 1230 1231 /* 1232 * Issue the callback function for each dataset using a parallel 1233 * algorithm that uses a thread pool to manage threads. 1234 */ 1235 tpool_t *tp = tpool_create(1, mount_tp_nthr, 0, NULL); 1236 1237 /* 1238 * There may be multiple "top level" mountpoints outside of the pool's 1239 * root mountpoint, e.g.: /foo /bar. Dispatch a mount task for each of 1240 * these. 1241 */ 1242 for (int i = 0; i < num_handles; 1243 i = non_descendant_idx(handles, num_handles, i)) { 1244 /* 1245 * Since the mountpoints have been sorted so that the zoned 1246 * filesystems are at the end, a zoned filesystem seen from 1247 * the global zone means that we're done. 1248 */ 1249 if (zoneid == GLOBAL_ZONEID && 1250 zfs_prop_get_int(handles[i], ZFS_PROP_ZONED)) 1251 break; 1252 zfs_dispatch_mount(hdl, handles, num_handles, i, func, data, 1253 tp); 1254 } 1255 1256 tpool_wait(tp); /* wait for all scheduled mounts to complete */ 1257 tpool_destroy(tp); 1258 } 1259 1260 /* 1261 * Mount and share all datasets within the given pool. This assumes that no 1262 * datasets within the pool are currently mounted. 1263 */ 1264 int 1265 zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags) 1266 { 1267 get_all_cb_t cb = { 0 }; 1268 mount_state_t ms = { 0 }; 1269 zfs_handle_t *zfsp; 1270 int ret = 0; 1271 1272 if ((zfsp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, 1273 ZFS_TYPE_DATASET)) == NULL) 1274 goto out; 1275 1276 /* 1277 * Gather all non-snapshot datasets within the pool. Start by adding 1278 * the root filesystem for this pool to the list, and then iterate 1279 * over all child filesystems. 1280 */ 1281 libzfs_add_handle(&cb, zfsp); 1282 if (zfs_iter_filesystems(zfsp, zfs_iter_cb, &cb) != 0) 1283 goto out; 1284 1285 /* 1286 * Mount all filesystems 1287 */ 1288 ms.ms_mntopts = mntopts; 1289 ms.ms_mntflags = flags; 1290 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used, 1291 zfs_mount_one, &ms, B_TRUE); 1292 if (ms.ms_mntstatus != 0) 1293 ret = ms.ms_mntstatus; 1294 1295 /* 1296 * Share all filesystems that need to be shared. This needs to be 1297 * a separate pass because libshare is not mt-safe, and so we need 1298 * to share serially. 1299 */ 1300 ms.ms_mntstatus = 0; 1301 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used, 1302 zfs_share_one, &ms, B_FALSE); 1303 if (ms.ms_mntstatus != 0) 1304 ret = ms.ms_mntstatus; 1305 else 1306 zfs_commit_shares(NULL); 1307 1308 out: 1309 for (int i = 0; i < cb.cb_used; i++) 1310 zfs_close(cb.cb_handles[i]); 1311 free(cb.cb_handles); 1312 1313 return (ret); 1314 } 1315 1316 struct sets_s { 1317 char *mountpoint; 1318 zfs_handle_t *dataset; 1319 }; 1320 1321 static int 1322 mountpoint_compare(const void *a, const void *b) 1323 { 1324 const struct sets_s *mounta = (struct sets_s *)a; 1325 const struct sets_s *mountb = (struct sets_s *)b; 1326 1327 return (strcmp(mountb->mountpoint, mounta->mountpoint)); 1328 } 1329 1330 /* 1331 * Unshare and unmount all datasets within the given pool. We don't want to 1332 * rely on traversing the DSL to discover the filesystems within the pool, 1333 * because this may be expensive (if not all of them are mounted), and can fail 1334 * arbitrarily (on I/O error, for example). Instead, we walk /proc/self/mounts 1335 * and gather all the filesystems that are currently mounted. 1336 */ 1337 int 1338 zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force) 1339 { 1340 int used, alloc; 1341 FILE *mnttab; 1342 struct mnttab entry; 1343 size_t namelen; 1344 struct sets_s *sets = NULL; 1345 libzfs_handle_t *hdl = zhp->zpool_hdl; 1346 int i; 1347 int ret = -1; 1348 int flags = (force ? MS_FORCE : 0); 1349 1350 namelen = strlen(zhp->zpool_name); 1351 1352 if ((mnttab = fopen(MNTTAB, "re")) == NULL) 1353 return (ENOENT); 1354 1355 used = alloc = 0; 1356 while (getmntent(mnttab, &entry) == 0) { 1357 /* 1358 * Ignore non-ZFS entries. 1359 */ 1360 if (entry.mnt_fstype == NULL || 1361 strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) 1362 continue; 1363 1364 /* 1365 * Ignore filesystems not within this pool. 1366 */ 1367 if (entry.mnt_mountp == NULL || 1368 strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 || 1369 (entry.mnt_special[namelen] != '/' && 1370 entry.mnt_special[namelen] != '\0')) 1371 continue; 1372 1373 /* 1374 * At this point we've found a filesystem within our pool. Add 1375 * it to our growing list. 1376 */ 1377 if (used == alloc) { 1378 if (alloc == 0) { 1379 sets = zfs_alloc(hdl, 1380 8 * sizeof (struct sets_s)); 1381 alloc = 8; 1382 } else { 1383 sets = zfs_realloc(hdl, sets, 1384 alloc * sizeof (struct sets_s), 1385 alloc * 2 * sizeof (struct sets_s)); 1386 1387 alloc *= 2; 1388 } 1389 } 1390 1391 sets[used].mountpoint = zfs_strdup(hdl, entry.mnt_mountp); 1392 1393 /* 1394 * This is allowed to fail, in case there is some I/O error. It 1395 * is only used to determine if we need to remove the underlying 1396 * mountpoint, so failure is not fatal. 1397 */ 1398 sets[used].dataset = make_dataset_handle(hdl, 1399 entry.mnt_special); 1400 1401 used++; 1402 } 1403 1404 /* 1405 * At this point, we have the entire list of filesystems, so sort it by 1406 * mountpoint. 1407 */ 1408 if (used != 0) 1409 qsort(sets, used, sizeof (struct sets_s), mountpoint_compare); 1410 1411 /* 1412 * Walk through and first unshare everything. 1413 */ 1414 for (i = 0; i < used; i++) { 1415 for (enum sa_protocol i = 0; i < SA_PROTOCOL_COUNT; ++i) { 1416 if (sa_is_shared(sets[i].mountpoint, i) && 1417 unshare_one(hdl, sets[i].mountpoint, 1418 sets[i].mountpoint, i) != 0) 1419 goto out; 1420 } 1421 } 1422 zfs_commit_shares(NULL); 1423 1424 /* 1425 * Now unmount everything, removing the underlying directories as 1426 * appropriate. 1427 */ 1428 for (i = 0; i < used; i++) { 1429 if (unmount_one(sets[i].dataset, sets[i].mountpoint, 1430 flags) != 0) 1431 goto out; 1432 } 1433 1434 for (i = 0; i < used; i++) { 1435 if (sets[i].dataset) 1436 remove_mountpoint(sets[i].dataset); 1437 } 1438 1439 zpool_disable_datasets_os(zhp, force); 1440 1441 ret = 0; 1442 out: 1443 (void) fclose(mnttab); 1444 for (i = 0; i < used; i++) { 1445 if (sets[i].dataset) 1446 zfs_close(sets[i].dataset); 1447 free(sets[i].mountpoint); 1448 } 1449 free(sets); 1450 1451 return (ret); 1452 } 1453