1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <assert.h> 30 #include <ctype.h> 31 #include <errno.h> 32 #include <devid.h> 33 #include <fcntl.h> 34 #include <libintl.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <unistd.h> 39 #include <sys/zfs_ioctl.h> 40 41 #include "zfs_namecheck.h" 42 #include "libzfs_impl.h" 43 44 /* 45 * Validate the given pool name, optionally putting an extended error message in 46 * 'buf'. 47 */ 48 static int 49 zpool_name_valid(const char *pool, char *buf, size_t buflen) 50 { 51 namecheck_err_t why; 52 char what; 53 54 if (strlen(pool) >= ZPOOL_MAXNAMELEN) { 55 if (buf) 56 (void) snprintf(buf, buflen, 57 dgettext(TEXT_DOMAIN, "name is too long")); 58 return (FALSE); 59 } 60 61 if (pool_namecheck(pool, &why, &what) != 0) { 62 if (buf != NULL) { 63 switch (why) { 64 case NAME_ERR_INVALCHAR: 65 (void) snprintf(buf, buflen, 66 dgettext(TEXT_DOMAIN, "invalid character " 67 "'%c' in pool name"), what); 68 break; 69 70 case NAME_ERR_NOLETTER: 71 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 72 "name must begin with a letter"), buflen); 73 break; 74 75 case NAME_ERR_RESERVED: 76 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 77 "name is reserved\n" 78 "pool name may have been omitted"), buflen); 79 break; 80 81 case NAME_ERR_DISKLIKE: 82 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 83 "pool name is reserved\n" 84 "pool name may have been omitted"), buflen); 85 break; 86 } 87 } 88 return (FALSE); 89 } 90 91 return (TRUE); 92 } 93 94 /* 95 * Set the pool-wide health based on the vdev state of the root vdev. 96 */ 97 void 98 set_pool_health(nvlist_t *config) 99 { 100 nvlist_t *nvroot; 101 vdev_stat_t *vs; 102 uint_t vsc; 103 char *health; 104 105 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 106 &nvroot) == 0); 107 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 108 (uint64_t **)&vs, &vsc) == 0); 109 110 switch (vs->vs_state) { 111 112 case VDEV_STATE_CLOSED: 113 case VDEV_STATE_CANT_OPEN: 114 case VDEV_STATE_OFFLINE: 115 health = dgettext(TEXT_DOMAIN, "FAULTED"); 116 break; 117 118 case VDEV_STATE_DEGRADED: 119 health = dgettext(TEXT_DOMAIN, "DEGRADED"); 120 break; 121 122 case VDEV_STATE_HEALTHY: 123 health = dgettext(TEXT_DOMAIN, "ONLINE"); 124 break; 125 126 default: 127 zfs_baderror(vs->vs_state); 128 } 129 130 verify(nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, 131 health) == 0); 132 } 133 134 /* 135 * Open a handle to the given pool, even if the pool is currently in the FAULTED 136 * state. 137 */ 138 zpool_handle_t * 139 zpool_open_canfail(const char *pool) 140 { 141 zpool_handle_t *zhp; 142 int error; 143 144 /* 145 * Make sure the pool name is valid. 146 */ 147 if (!zpool_name_valid(pool, NULL, 0)) { 148 zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': invalid " 149 "pool name"), pool); 150 return (NULL); 151 } 152 153 zhp = zfs_malloc(sizeof (zpool_handle_t)); 154 155 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 156 157 if ((error = zpool_refresh_stats(zhp)) != 0) { 158 if (error == ENOENT || error == EINVAL) { 159 zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': no " 160 "such pool"), pool); 161 free(zhp); 162 return (NULL); 163 } else { 164 zhp->zpool_state = POOL_STATE_UNAVAIL; 165 } 166 } else { 167 zhp->zpool_state = POOL_STATE_ACTIVE; 168 } 169 170 return (zhp); 171 } 172 173 /* 174 * Like the above, but silent on error. Used when iterating over pools (because 175 * the configuration cache may be out of date). 176 */ 177 zpool_handle_t * 178 zpool_open_silent(const char *pool) 179 { 180 zpool_handle_t *zhp; 181 int error; 182 183 zhp = zfs_malloc(sizeof (zpool_handle_t)); 184 185 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 186 187 if ((error = zpool_refresh_stats(zhp)) != 0) { 188 if (error == ENOENT || error == EINVAL) { 189 free(zhp); 190 return (NULL); 191 } else { 192 zhp->zpool_state = POOL_STATE_UNAVAIL; 193 } 194 } else { 195 zhp->zpool_state = POOL_STATE_ACTIVE; 196 } 197 198 return (zhp); 199 } 200 201 /* 202 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 203 * state. 204 */ 205 zpool_handle_t * 206 zpool_open(const char *pool) 207 { 208 zpool_handle_t *zhp; 209 210 if ((zhp = zpool_open_canfail(pool)) == NULL) 211 return (NULL); 212 213 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 214 zfs_error(dgettext(TEXT_DOMAIN, "cannot open ' %s': pool is " 215 "currently unavailable\n"), zhp->zpool_name); 216 zfs_error(dgettext(TEXT_DOMAIN, "run 'zpool status -v %s' for " 217 "detailed information\n"), zhp->zpool_name); 218 zpool_close(zhp); 219 return (NULL); 220 } 221 222 return (zhp); 223 } 224 225 /* 226 * Close the handle. Simply frees the memory associated with the handle. 227 */ 228 void 229 zpool_close(zpool_handle_t *zhp) 230 { 231 if (zhp->zpool_config) 232 nvlist_free(zhp->zpool_config); 233 if (zhp->zpool_old_config) 234 nvlist_free(zhp->zpool_old_config); 235 free(zhp); 236 } 237 238 /* 239 * Return the name of the pool. 240 */ 241 const char * 242 zpool_get_name(zpool_handle_t *zhp) 243 { 244 return (zhp->zpool_name); 245 } 246 247 /* 248 * Return the GUID of the pool. 249 */ 250 uint64_t 251 zpool_get_guid(zpool_handle_t *zhp) 252 { 253 uint64_t guid; 254 255 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, 256 &guid) == 0); 257 return (guid); 258 } 259 260 /* 261 * Return the amount of space currently consumed by the pool. 262 */ 263 uint64_t 264 zpool_get_space_used(zpool_handle_t *zhp) 265 { 266 nvlist_t *nvroot; 267 vdev_stat_t *vs; 268 uint_t vsc; 269 270 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 271 &nvroot) == 0); 272 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 273 (uint64_t **)&vs, &vsc) == 0); 274 275 return (vs->vs_alloc); 276 } 277 278 /* 279 * Return the total space in the pool. 280 */ 281 uint64_t 282 zpool_get_space_total(zpool_handle_t *zhp) 283 { 284 nvlist_t *nvroot; 285 vdev_stat_t *vs; 286 uint_t vsc; 287 288 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 289 &nvroot) == 0); 290 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 291 (uint64_t **)&vs, &vsc) == 0); 292 293 return (vs->vs_space); 294 } 295 296 /* 297 * Return the alternate root for this pool, if any. 298 */ 299 int 300 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen) 301 { 302 zfs_cmd_t zc = { 0 }; 303 304 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 305 if (ioctl(zfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 || 306 zc.zc_objset_stats.dds_altroot[0] == '\0') 307 return (-1); 308 309 (void) strlcpy(buf, zc.zc_objset_stats.dds_altroot, buflen); 310 311 return (0); 312 } 313 314 /* 315 * Return the state of the pool (ACTIVE or UNAVAILABLE) 316 */ 317 int 318 zpool_get_state(zpool_handle_t *zhp) 319 { 320 return (zhp->zpool_state); 321 } 322 323 /* 324 * Create the named pool, using the provided vdev list. It is assumed 325 * that the consumer has already validated the contents of the nvlist, so we 326 * don't have to worry about error semantics. 327 */ 328 int 329 zpool_create(const char *pool, nvlist_t *nvroot, const char *altroot) 330 { 331 zfs_cmd_t zc = { 0 }; 332 char *packed; 333 size_t len; 334 int err; 335 char reason[64]; 336 337 if (!zpool_name_valid(pool, reason, sizeof (reason))) { 338 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': %s"), 339 pool, reason); 340 return (-1); 341 } 342 343 if (altroot != NULL && altroot[0] != '/') { 344 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': alternate " 345 "root '%s' must be a complete path"), pool, altroot); 346 return (-1); 347 } 348 349 if ((err = nvlist_size(nvroot, &len, NV_ENCODE_NATIVE)) != 0) 350 zfs_baderror(err); 351 352 packed = zfs_malloc(len); 353 354 if ((err = nvlist_pack(nvroot, &packed, &len, 355 NV_ENCODE_NATIVE, 0)) != 0) 356 zfs_baderror(err); 357 358 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 359 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 360 zc.zc_config_src_size = len; 361 362 if (altroot != NULL) 363 (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); 364 365 if (ioctl(zfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) { 366 switch (errno) { 367 case EEXIST: 368 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 369 "pool exists"), pool); 370 break; 371 372 case EPERM: 373 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 374 "permission denied"), pool); 375 break; 376 377 case EBUSY: 378 /* 379 * This can happen if the user has specified the same 380 * device multiple times. We can't reliably detect this 381 * until we try to add it and see we already have a 382 * label. 383 */ 384 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 385 "one or more vdevs refer to the same device"), 386 pool); 387 break; 388 389 case EOVERFLOW: 390 /* 391 * This occurrs when one of the devices is below 392 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 393 * device was the problem device since there's no 394 * reliable way to determine device size from userland. 395 */ 396 { 397 char buf[64]; 398 399 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 400 401 zfs_error(dgettext(TEXT_DOMAIN, "cannot " 402 "create '%s': one or more devices is less " 403 "than the minimum size (%s)"), pool, 404 buf); 405 } 406 break; 407 408 case ENAMETOOLONG: 409 /* 410 * One of the vdevs has exceeded VDEV_SPEC_MAX length in 411 * its plaintext representation. 412 */ 413 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 414 "too many devices in a single vdev"), pool); 415 break; 416 417 case EIO: 418 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 419 "I/O error on one or more devices"), pool); 420 break; 421 422 case ENXIO: 423 /* 424 * This is unlikely to happen since we've verified that 425 * all the devices can be opened from userland, but it's 426 * still possible in some circumstances. 427 */ 428 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 429 "one or more devices is unavailable"), pool); 430 break; 431 432 case ENOSPC: 433 /* 434 * This can occur if we were incapable of writing to a 435 * file vdev because the underlying filesystem is out of 436 * space. This is very similar to EOVERFLOW, but we'll 437 * produce a slightly different message. 438 */ 439 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 440 "one or more devices is out of space"), pool); 441 break; 442 443 default: 444 zfs_baderror(errno); 445 } 446 447 return (-1); 448 } 449 450 free(packed); 451 452 /* 453 * If this is an alternate root pool, then we automatically set the 454 * moutnpoint of the root dataset to be '/'. 455 */ 456 if (altroot != NULL) { 457 zfs_handle_t *zhp; 458 459 verify((zhp = zfs_open(pool, ZFS_TYPE_ANY)) != NULL); 460 verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0); 461 462 zfs_close(zhp); 463 } 464 465 return (0); 466 } 467 468 /* 469 * Destroy the given pool. It is up to the caller to ensure that there are no 470 * datasets left in the pool. 471 */ 472 int 473 zpool_destroy(zpool_handle_t *zhp) 474 { 475 zfs_cmd_t zc = { 0 }; 476 zfs_handle_t *zfp = NULL; 477 478 if (zhp->zpool_state == POOL_STATE_ACTIVE && 479 (zfp = zfs_open(zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 480 return (-1); 481 482 if (zpool_remove_zvol_links(zhp) != NULL) 483 return (-1); 484 485 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 486 487 if (ioctl(zfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 488 switch (errno) { 489 case EPERM: 490 zfs_error(dgettext(TEXT_DOMAIN, 491 "cannot destroy '%s': permission denied"), 492 zhp->zpool_name); 493 break; 494 495 case EBUSY: 496 zfs_error(dgettext(TEXT_DOMAIN, 497 "cannot destroy '%s': pool busy"), 498 zhp->zpool_name); 499 break; 500 501 case ENOENT: 502 zfs_error(dgettext(TEXT_DOMAIN, 503 "cannot destroy '%s': no such pool"), 504 zhp->zpool_name); 505 break; 506 507 case EROFS: 508 zfs_error(dgettext(TEXT_DOMAIN, 509 "cannot destroy '%s': one or more devices is " 510 "read only, or '/' is mounted read only"), 511 zhp->zpool_name); 512 break; 513 514 default: 515 zfs_baderror(errno); 516 } 517 518 if (zfp) 519 zfs_close(zfp); 520 return (-1); 521 } 522 523 if (zfp) { 524 remove_mountpoint(zfp); 525 zfs_close(zfp); 526 } 527 528 return (0); 529 } 530 531 /* 532 * Add the given vdevs to the pool. The caller must have already performed the 533 * necessary verification to ensure that the vdev specification is well-formed. 534 */ 535 int 536 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 537 { 538 char *packed; 539 size_t len; 540 zfs_cmd_t zc; 541 542 verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); 543 544 packed = zfs_malloc(len); 545 546 verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 547 548 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 549 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 550 zc.zc_config_src_size = len; 551 552 if (ioctl(zfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) { 553 switch (errno) { 554 case EPERM: 555 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 556 "permission denied"), zhp->zpool_name); 557 break; 558 559 case EBUSY: 560 /* 561 * This can happen if the user has specified the same 562 * device multiple times. We can't reliably detect this 563 * until we try to add it and see we already have a 564 * label. 565 */ 566 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 567 "one or more vdevs refer to the same device"), 568 zhp->zpool_name); 569 break; 570 571 case ENAMETOOLONG: 572 /* 573 * One of the vdevs has exceeded VDEV_SPEC_MAX length in 574 * its plaintext representation. 575 */ 576 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 577 "too many devices in a single vdev"), 578 zhp->zpool_name); 579 break; 580 581 case ENXIO: 582 /* 583 * This is unlikely to happen since we've verified that 584 * all the devices can be opened from userland, but it's 585 * still possible in some circumstances. 586 */ 587 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 588 "one or more devices is unavailable"), 589 zhp->zpool_name); 590 break; 591 592 case EOVERFLOW: 593 /* 594 * This occurrs when one of the devices is below 595 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 596 * device was the problem device since there's no 597 * reliable way to determine device size from userland. 598 */ 599 { 600 char buf[64]; 601 602 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 603 604 zfs_error(dgettext(TEXT_DOMAIN, "cannot " 605 "add to '%s': one or more devices is less " 606 "than the minimum size (%s)"), 607 zhp->zpool_name, buf); 608 } 609 break; 610 611 default: 612 zfs_baderror(errno); 613 } 614 615 return (-1); 616 } 617 618 free(packed); 619 620 return (0); 621 } 622 623 /* 624 * Exports the pool from the system. The caller must ensure that there are no 625 * mounted datasets in the pool. 626 */ 627 int 628 zpool_export(zpool_handle_t *zhp) 629 { 630 zfs_cmd_t zc = { 0 }; 631 632 if (zpool_remove_zvol_links(zhp) != 0) 633 return (-1); 634 635 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 636 637 if (ioctl(zfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 638 switch (errno) { 639 case EPERM: 640 zfs_error(dgettext(TEXT_DOMAIN, 641 "cannot export '%s': permission denied"), 642 zhp->zpool_name); 643 break; 644 645 case EBUSY: 646 zfs_error(dgettext(TEXT_DOMAIN, 647 "cannot export '%s': pool is in use"), 648 zhp->zpool_name); 649 break; 650 651 case ENOENT: 652 zfs_error(dgettext(TEXT_DOMAIN, 653 "cannot export '%s': no such pool"), 654 zhp->zpool_name); 655 break; 656 657 default: 658 zfs_baderror(errno); 659 } 660 661 return (-1); 662 } 663 664 return (0); 665 } 666 667 /* 668 * Import the given pool using the known configuration. The configuration 669 * should have come from zpool_find_import(). The 'newname' and 'altroot' 670 * parameters control whether the pool is imported with a different name or with 671 * an alternate root, respectively. 672 */ 673 int 674 zpool_import(nvlist_t *config, const char *newname, const char *altroot) 675 { 676 zfs_cmd_t zc; 677 char *packed; 678 size_t len; 679 char *thename; 680 char *origname; 681 int ret; 682 683 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 684 &origname) == 0); 685 686 if (newname != NULL) { 687 if (!zpool_name_valid(newname, NULL, 0)) { 688 zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': " 689 "invalid pool name"), newname); 690 return (-1); 691 } 692 thename = (char *)newname; 693 } else { 694 thename = origname; 695 } 696 697 if (altroot != NULL && altroot[0] != '/') { 698 zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': alternate " 699 "root '%s' must be a complete path"), thename, 700 altroot); 701 return (-1); 702 } 703 704 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 705 706 if (altroot != NULL) 707 (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); 708 else 709 zc.zc_root[0] = '\0'; 710 711 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 712 &zc.zc_pool_guid) == 0); 713 714 verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0); 715 716 packed = zfs_malloc(len); 717 718 verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 719 720 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 721 zc.zc_config_src_size = len; 722 723 ret = 0; 724 if (ioctl(zfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) { 725 char desc[1024]; 726 if (newname == NULL) 727 (void) snprintf(desc, sizeof (desc), 728 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 729 thename); 730 else 731 (void) snprintf(desc, sizeof (desc), 732 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 733 origname, thename); 734 735 switch (errno) { 736 case EEXIST: 737 /* 738 * A pool with that name already exists. 739 */ 740 zfs_error(dgettext(TEXT_DOMAIN, "%s: pool exists"), 741 desc); 742 break; 743 744 case EPERM: 745 /* 746 * The user doesn't have permission to create pools. 747 */ 748 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission " 749 "denied"), desc); 750 break; 751 752 case ENXIO: 753 case EDOM: 754 /* 755 * Device is unavailable, or vdev sum didn't match. 756 */ 757 zfs_error(dgettext(TEXT_DOMAIN, "%s: one or more " 758 "devices is unavailable"), 759 desc); 760 break; 761 762 default: 763 zfs_baderror(errno); 764 } 765 766 ret = -1; 767 } else { 768 zpool_handle_t *zhp; 769 /* 770 * This should never fail, but play it safe anyway. 771 */ 772 if ((zhp = zpool_open_silent(thename)) != NULL) { 773 ret = zpool_create_zvol_links(zhp); 774 zpool_close(zhp); 775 } 776 } 777 778 free(packed); 779 return (ret); 780 } 781 782 /* 783 * Scrub the pool. 784 */ 785 int 786 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 787 { 788 zfs_cmd_t zc = { 0 }; 789 char msg[1024]; 790 791 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 792 zc.zc_cookie = type; 793 794 if (ioctl(zfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0) 795 return (0); 796 797 (void) snprintf(msg, sizeof (msg), 798 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 799 800 switch (errno) { 801 case EPERM: 802 /* 803 * No permission to scrub this pool. 804 */ 805 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 806 break; 807 808 case EBUSY: 809 /* 810 * Resilver in progress. 811 */ 812 zfs_error(dgettext(TEXT_DOMAIN, "%s: currently resilvering"), 813 msg); 814 break; 815 816 default: 817 zfs_baderror(errno); 818 } 819 return (-1); 820 } 821 822 /* 823 * Bring the specified vdev online 824 */ 825 int 826 zpool_vdev_online(zpool_handle_t *zhp, const char *path) 827 { 828 zfs_cmd_t zc = { 0 }; 829 char msg[1024]; 830 831 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 832 (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value), 833 "%s%s", path[0] == '/' ? "" : "/dev/dsk/", path); 834 835 if (ioctl(zfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0) 836 return (0); 837 838 (void) snprintf(msg, sizeof (msg), 839 dgettext(TEXT_DOMAIN, "cannot online %s"), zc.zc_prop_value); 840 841 switch (errno) { 842 case ENODEV: 843 /* 844 * Device doesn't exist 845 */ 846 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 847 break; 848 849 case EPERM: 850 /* 851 * No permission to bring this vdev online. 852 */ 853 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 854 break; 855 856 default: 857 zfs_baderror(errno); 858 } 859 return (-1); 860 } 861 862 /* 863 * Take the specified vdev offline 864 */ 865 int 866 zpool_vdev_offline(zpool_handle_t *zhp, const char *path) 867 { 868 zfs_cmd_t zc = { 0 }; 869 char msg[1024]; 870 871 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 872 (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value), 873 "%s%s", path[0] == '/' ? "" : "/dev/dsk/", path); 874 875 if (ioctl(zfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0) 876 return (0); 877 878 (void) snprintf(msg, sizeof (msg), 879 dgettext(TEXT_DOMAIN, "cannot offline %s"), zc.zc_prop_value); 880 881 switch (errno) { 882 case ENODEV: 883 /* 884 * Device doesn't exist 885 */ 886 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 887 break; 888 889 case EPERM: 890 /* 891 * No permission to take this vdev offline. 892 */ 893 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 894 break; 895 896 case EBUSY: 897 /* 898 * There are no other replicas of this device. 899 */ 900 zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg); 901 break; 902 903 default: 904 zfs_baderror(errno); 905 } 906 return (-1); 907 } 908 909 /* 910 * Attach new_disk (fully described by nvroot) to old_disk. 911 * If 'replacing' is specified, tne new disk will replace the old one. 912 */ 913 int 914 zpool_vdev_attach(zpool_handle_t *zhp, 915 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 916 { 917 zfs_cmd_t zc = { 0 }; 918 char msg[1024]; 919 char *packed; 920 int ret; 921 size_t len; 922 923 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 924 (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value), 925 "%s%s", old_disk[0] == '/' ? "" : "/dev/dsk/", old_disk); 926 zc.zc_cookie = replacing; 927 928 verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); 929 930 packed = zfs_malloc(len); 931 932 verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 933 934 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 935 zc.zc_config_src_size = len; 936 937 ret = ioctl(zfs_fd, ZFS_IOC_VDEV_ATTACH, &zc); 938 939 free(packed); 940 941 if (ret == 0) 942 return (0); 943 944 if (replacing) 945 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 946 "cannot replace %s with %s"), old_disk, new_disk); 947 else 948 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 949 "cannot attach %s to %s"), new_disk, old_disk); 950 951 switch (errno) { 952 case EPERM: 953 /* 954 * No permission to mess with the config. 955 */ 956 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 957 break; 958 959 case ENODEV: 960 /* 961 * Device doesn't exist. 962 */ 963 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s not in pool"), 964 msg, old_disk); 965 break; 966 967 case ENOTSUP: 968 /* 969 * Can't attach to or replace this type of vdev. 970 */ 971 if (replacing) 972 zfs_error(dgettext(TEXT_DOMAIN, 973 "%s: cannot replace a replacing device"), msg); 974 else 975 zfs_error(dgettext(TEXT_DOMAIN, 976 "%s: attach is only applicable to mirrors"), msg); 977 break; 978 979 case EINVAL: 980 /* 981 * The new device must be a single disk. 982 */ 983 zfs_error(dgettext(TEXT_DOMAIN, 984 "%s: <new_device> must be a single disk"), msg); 985 break; 986 987 case ENXIO: 988 /* 989 * This is unlikely to happen since we've verified that 990 * all the devices can be opened from userland, but it's 991 * still possible in some circumstances. 992 */ 993 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is unavailable"), 994 msg, new_disk); 995 break; 996 997 case EBUSY: 998 /* 999 * The new device is is use. 1000 */ 1001 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s busy"), msg, new_disk); 1002 break; 1003 1004 case EOVERFLOW: 1005 /* 1006 * The new device is too small. 1007 */ 1008 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is too small"), 1009 msg, new_disk); 1010 break; 1011 1012 case EDOM: 1013 /* 1014 * The new device has a different alignment requirement. 1015 */ 1016 zfs_error(dgettext(TEXT_DOMAIN, 1017 "%s: devices have different sector alignment"), msg); 1018 break; 1019 1020 case ENAMETOOLONG: 1021 /* 1022 * The resulting top-level vdev spec won't fit in the label. 1023 */ 1024 zfs_error(dgettext(TEXT_DOMAIN, 1025 "%s: too many devices in a single vdev"), msg); 1026 break; 1027 1028 default: 1029 zfs_baderror(errno); 1030 } 1031 1032 return (1); 1033 } 1034 1035 /* 1036 * Detach the specified device. 1037 */ 1038 int 1039 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 1040 { 1041 zfs_cmd_t zc = { 0 }; 1042 char msg[1024]; 1043 1044 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1045 (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value), 1046 "%s%s", path[0] == '/' ? "" : "/dev/dsk/", path); 1047 1048 if (ioctl(zfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0) 1049 return (0); 1050 1051 (void) snprintf(msg, sizeof (msg), 1052 dgettext(TEXT_DOMAIN, "cannot detach %s"), zc.zc_prop_value); 1053 1054 switch (errno) { 1055 case EPERM: 1056 /* 1057 * No permission to mess with the config. 1058 */ 1059 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 1060 break; 1061 1062 case ENODEV: 1063 /* 1064 * Device doesn't exist. 1065 */ 1066 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 1067 break; 1068 1069 case ENOTSUP: 1070 /* 1071 * Can't detach from this type of vdev. 1072 */ 1073 zfs_error(dgettext(TEXT_DOMAIN, 1074 "%s: only applicable to mirror and replacing vdevs"), msg); 1075 break; 1076 1077 case EBUSY: 1078 /* 1079 * There are no other replicas of this device. 1080 */ 1081 zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg); 1082 break; 1083 1084 default: 1085 zfs_baderror(errno); 1086 } 1087 1088 return (1); 1089 } 1090 1091 static int 1092 do_zvol(zfs_handle_t *zhp, void *data) 1093 { 1094 int linktype = (int)(uintptr_t)data; 1095 int ret; 1096 1097 /* 1098 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we 1099 * correctly handle snapshots of volumes. 1100 */ 1101 if (zhp->zfs_volblocksize != 0) { 1102 if (linktype) 1103 ret = zvol_create_link(zhp->zfs_name); 1104 else 1105 ret = zvol_remove_link(zhp->zfs_name); 1106 } 1107 1108 ret = zfs_iter_children(zhp, do_zvol, data); 1109 1110 zfs_close(zhp); 1111 return (ret); 1112 } 1113 1114 /* 1115 * Iterate over all zvols in the pool and make any necessary minor nodes. 1116 */ 1117 int 1118 zpool_create_zvol_links(zpool_handle_t *zhp) 1119 { 1120 zfs_handle_t *zfp; 1121 int ret; 1122 1123 /* 1124 * If the pool is unavailable, just return success. 1125 */ 1126 if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL) 1127 return (0); 1128 1129 ret = zfs_iter_children(zfp, do_zvol, (void *)TRUE); 1130 1131 zfs_close(zfp); 1132 return (ret); 1133 } 1134 1135 /* 1136 * Iterate over all zvols in the poool and remove any minor nodes. 1137 */ 1138 int 1139 zpool_remove_zvol_links(zpool_handle_t *zhp) 1140 { 1141 zfs_handle_t *zfp; 1142 int ret; 1143 1144 /* 1145 * If the pool is unavailable, just return success. 1146 */ 1147 if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL) 1148 return (0); 1149 1150 ret = zfs_iter_children(zfp, do_zvol, (void *)FALSE); 1151 1152 zfs_close(zfp); 1153 return (ret); 1154 } 1155