1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <assert.h> 29 #include <ctype.h> 30 #include <errno.h> 31 #include <devid.h> 32 #include <fcntl.h> 33 #include <libintl.h> 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <unistd.h> 38 #include <sys/zfs_ioctl.h> 39 40 #include "zfs_namecheck.h" 41 #include "libzfs_impl.h" 42 43 /* 44 * Validate the given pool name, optionally putting an extended error message in 45 * 'buf'. 46 */ 47 static int 48 zpool_name_valid(const char *pool, char *buf, size_t buflen) 49 { 50 namecheck_err_t why; 51 char what; 52 53 if (pool_namecheck(pool, &why, &what) != 0) { 54 if (buf != NULL) { 55 switch (why) { 56 case NAME_ERR_TOOLONG: 57 (void) snprintf(buf, buflen, 58 dgettext(TEXT_DOMAIN, "name is too long")); 59 break; 60 61 case NAME_ERR_INVALCHAR: 62 (void) snprintf(buf, buflen, 63 dgettext(TEXT_DOMAIN, "invalid character " 64 "'%c' in pool name"), what); 65 break; 66 67 case NAME_ERR_NOLETTER: 68 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 69 "name must begin with a letter"), buflen); 70 break; 71 72 case NAME_ERR_RESERVED: 73 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 74 "name is reserved\n" 75 "pool name may have been omitted"), buflen); 76 break; 77 78 case NAME_ERR_DISKLIKE: 79 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 80 "pool name is reserved\n" 81 "pool name may have been omitted"), buflen); 82 break; 83 } 84 } 85 return (FALSE); 86 } 87 88 return (TRUE); 89 } 90 91 /* 92 * Set the pool-wide health based on the vdev state of the root vdev. 93 */ 94 void 95 set_pool_health(nvlist_t *config) 96 { 97 nvlist_t *nvroot; 98 vdev_stat_t *vs; 99 uint_t vsc; 100 char *health; 101 102 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 103 &nvroot) == 0); 104 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 105 (uint64_t **)&vs, &vsc) == 0); 106 107 switch (vs->vs_state) { 108 109 case VDEV_STATE_CLOSED: 110 case VDEV_STATE_CANT_OPEN: 111 case VDEV_STATE_OFFLINE: 112 health = dgettext(TEXT_DOMAIN, "FAULTED"); 113 break; 114 115 case VDEV_STATE_DEGRADED: 116 health = dgettext(TEXT_DOMAIN, "DEGRADED"); 117 break; 118 119 case VDEV_STATE_HEALTHY: 120 health = dgettext(TEXT_DOMAIN, "ONLINE"); 121 break; 122 123 default: 124 zfs_baderror(vs->vs_state); 125 } 126 127 verify(nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, 128 health) == 0); 129 } 130 131 /* 132 * Open a handle to the given pool, even if the pool is currently in the FAULTED 133 * state. 134 */ 135 zpool_handle_t * 136 zpool_open_canfail(const char *pool) 137 { 138 zpool_handle_t *zhp; 139 int error; 140 141 /* 142 * Make sure the pool name is valid. 143 */ 144 if (!zpool_name_valid(pool, NULL, 0)) { 145 zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': invalid " 146 "pool name"), pool); 147 return (NULL); 148 } 149 150 zhp = zfs_malloc(sizeof (zpool_handle_t)); 151 152 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 153 154 if ((error = zpool_refresh_stats(zhp)) != 0) { 155 if (error == ENOENT || error == EINVAL) { 156 zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': no " 157 "such pool"), pool); 158 free(zhp); 159 return (NULL); 160 } else { 161 zhp->zpool_state = POOL_STATE_UNAVAIL; 162 } 163 } else { 164 zhp->zpool_state = POOL_STATE_ACTIVE; 165 } 166 167 return (zhp); 168 } 169 170 /* 171 * Like the above, but silent on error. Used when iterating over pools (because 172 * the configuration cache may be out of date). 173 */ 174 zpool_handle_t * 175 zpool_open_silent(const char *pool) 176 { 177 zpool_handle_t *zhp; 178 int error; 179 180 zhp = zfs_malloc(sizeof (zpool_handle_t)); 181 182 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 183 184 if ((error = zpool_refresh_stats(zhp)) != 0) { 185 if (error == ENOENT || error == EINVAL) { 186 free(zhp); 187 return (NULL); 188 } else { 189 zhp->zpool_state = POOL_STATE_UNAVAIL; 190 } 191 } else { 192 zhp->zpool_state = POOL_STATE_ACTIVE; 193 } 194 195 return (zhp); 196 } 197 198 /* 199 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 200 * state. 201 */ 202 zpool_handle_t * 203 zpool_open(const char *pool) 204 { 205 zpool_handle_t *zhp; 206 207 if ((zhp = zpool_open_canfail(pool)) == NULL) 208 return (NULL); 209 210 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 211 zfs_error(dgettext(TEXT_DOMAIN, "cannot open ' %s': pool is " 212 "currently unavailable\n"), zhp->zpool_name); 213 zfs_error(dgettext(TEXT_DOMAIN, "run 'zpool status -v %s' for " 214 "detailed information\n"), zhp->zpool_name); 215 zpool_close(zhp); 216 return (NULL); 217 } 218 219 return (zhp); 220 } 221 222 /* 223 * Close the handle. Simply frees the memory associated with the handle. 224 */ 225 void 226 zpool_close(zpool_handle_t *zhp) 227 { 228 if (zhp->zpool_config) 229 nvlist_free(zhp->zpool_config); 230 if (zhp->zpool_old_config) 231 nvlist_free(zhp->zpool_old_config); 232 free(zhp); 233 } 234 235 /* 236 * Return the name of the pool. 237 */ 238 const char * 239 zpool_get_name(zpool_handle_t *zhp) 240 { 241 return (zhp->zpool_name); 242 } 243 244 /* 245 * Return the GUID of the pool. 246 */ 247 uint64_t 248 zpool_get_guid(zpool_handle_t *zhp) 249 { 250 uint64_t guid; 251 252 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, 253 &guid) == 0); 254 return (guid); 255 } 256 257 /* 258 * Return the amount of space currently consumed by the pool. 259 */ 260 uint64_t 261 zpool_get_space_used(zpool_handle_t *zhp) 262 { 263 nvlist_t *nvroot; 264 vdev_stat_t *vs; 265 uint_t vsc; 266 267 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 268 &nvroot) == 0); 269 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 270 (uint64_t **)&vs, &vsc) == 0); 271 272 return (vs->vs_alloc); 273 } 274 275 /* 276 * Return the total space in the pool. 277 */ 278 uint64_t 279 zpool_get_space_total(zpool_handle_t *zhp) 280 { 281 nvlist_t *nvroot; 282 vdev_stat_t *vs; 283 uint_t vsc; 284 285 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 286 &nvroot) == 0); 287 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 288 (uint64_t **)&vs, &vsc) == 0); 289 290 return (vs->vs_space); 291 } 292 293 /* 294 * Return the alternate root for this pool, if any. 295 */ 296 int 297 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen) 298 { 299 zfs_cmd_t zc = { 0 }; 300 301 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 302 if (ioctl(zfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 || 303 zc.zc_objset_stats.dds_altroot[0] == '\0') 304 return (-1); 305 306 (void) strlcpy(buf, zc.zc_objset_stats.dds_altroot, buflen); 307 308 return (0); 309 } 310 311 /* 312 * Return the state of the pool (ACTIVE or UNAVAILABLE) 313 */ 314 int 315 zpool_get_state(zpool_handle_t *zhp) 316 { 317 return (zhp->zpool_state); 318 } 319 320 /* 321 * Create the named pool, using the provided vdev list. It is assumed 322 * that the consumer has already validated the contents of the nvlist, so we 323 * don't have to worry about error semantics. 324 */ 325 int 326 zpool_create(const char *pool, nvlist_t *nvroot, const char *altroot) 327 { 328 zfs_cmd_t zc = { 0 }; 329 char *packed; 330 size_t len; 331 int err; 332 char reason[64]; 333 334 if (!zpool_name_valid(pool, reason, sizeof (reason))) { 335 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': %s"), 336 pool, reason); 337 return (-1); 338 } 339 340 if (altroot != NULL && altroot[0] != '/') { 341 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': alternate " 342 "root '%s' must be a complete path"), pool, altroot); 343 return (-1); 344 } 345 346 if ((err = nvlist_size(nvroot, &len, NV_ENCODE_NATIVE)) != 0) 347 zfs_baderror(err); 348 349 packed = zfs_malloc(len); 350 351 if ((err = nvlist_pack(nvroot, &packed, &len, 352 NV_ENCODE_NATIVE, 0)) != 0) 353 zfs_baderror(err); 354 355 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 356 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 357 zc.zc_config_src_size = len; 358 359 if (altroot != NULL) 360 (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); 361 362 if (ioctl(zfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) { 363 switch (errno) { 364 case EEXIST: 365 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 366 "pool exists"), pool); 367 break; 368 369 case EPERM: 370 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 371 "permission denied"), pool); 372 break; 373 374 case EBUSY: 375 /* 376 * This can happen if the user has specified the same 377 * device multiple times. We can't reliably detect this 378 * until we try to add it and see we already have a 379 * label. 380 */ 381 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 382 "one or more vdevs refer to the same device"), 383 pool); 384 break; 385 386 case EOVERFLOW: 387 /* 388 * This occurrs when one of the devices is below 389 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 390 * device was the problem device since there's no 391 * reliable way to determine device size from userland. 392 */ 393 { 394 char buf[64]; 395 396 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 397 398 zfs_error(dgettext(TEXT_DOMAIN, "cannot " 399 "create '%s': one or more devices is less " 400 "than the minimum size (%s)"), pool, 401 buf); 402 } 403 break; 404 405 case ENAMETOOLONG: 406 /* 407 * One of the vdevs has exceeded VDEV_SPEC_MAX length in 408 * its plaintext representation. 409 */ 410 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 411 "too many devices in a single vdev"), pool); 412 break; 413 414 case EIO: 415 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 416 "I/O error on one or more devices"), pool); 417 break; 418 419 case ENXIO: 420 /* 421 * This is unlikely to happen since we've verified that 422 * all the devices can be opened from userland, but it's 423 * still possible in some circumstances. 424 */ 425 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 426 "one or more devices is unavailable"), pool); 427 break; 428 429 case ENOSPC: 430 /* 431 * This can occur if we were incapable of writing to a 432 * file vdev because the underlying filesystem is out of 433 * space. This is very similar to EOVERFLOW, but we'll 434 * produce a slightly different message. 435 */ 436 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 437 "one or more devices is out of space"), pool); 438 break; 439 440 default: 441 zfs_baderror(errno); 442 } 443 444 return (-1); 445 } 446 447 free(packed); 448 449 /* 450 * If this is an alternate root pool, then we automatically set the 451 * moutnpoint of the root dataset to be '/'. 452 */ 453 if (altroot != NULL) { 454 zfs_handle_t *zhp; 455 456 verify((zhp = zfs_open(pool, ZFS_TYPE_ANY)) != NULL); 457 verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0); 458 459 zfs_close(zhp); 460 } 461 462 return (0); 463 } 464 465 /* 466 * Destroy the given pool. It is up to the caller to ensure that there are no 467 * datasets left in the pool. 468 */ 469 int 470 zpool_destroy(zpool_handle_t *zhp) 471 { 472 zfs_cmd_t zc = { 0 }; 473 zfs_handle_t *zfp = NULL; 474 475 if (zhp->zpool_state == POOL_STATE_ACTIVE && 476 (zfp = zfs_open(zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 477 return (-1); 478 479 if (zpool_remove_zvol_links(zhp) != NULL) 480 return (-1); 481 482 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 483 484 if (ioctl(zfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 485 switch (errno) { 486 case EPERM: 487 zfs_error(dgettext(TEXT_DOMAIN, 488 "cannot destroy '%s': permission denied"), 489 zhp->zpool_name); 490 break; 491 492 case EBUSY: 493 zfs_error(dgettext(TEXT_DOMAIN, 494 "cannot destroy '%s': pool busy"), 495 zhp->zpool_name); 496 break; 497 498 case ENOENT: 499 zfs_error(dgettext(TEXT_DOMAIN, 500 "cannot destroy '%s': no such pool"), 501 zhp->zpool_name); 502 break; 503 504 case EROFS: 505 zfs_error(dgettext(TEXT_DOMAIN, 506 "cannot destroy '%s': one or more devices is " 507 "read only, or '/' is mounted read only"), 508 zhp->zpool_name); 509 break; 510 511 default: 512 zfs_baderror(errno); 513 } 514 515 if (zfp) 516 zfs_close(zfp); 517 return (-1); 518 } 519 520 if (zfp) { 521 remove_mountpoint(zfp); 522 zfs_close(zfp); 523 } 524 525 return (0); 526 } 527 528 /* 529 * Add the given vdevs to the pool. The caller must have already performed the 530 * necessary verification to ensure that the vdev specification is well-formed. 531 */ 532 int 533 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 534 { 535 char *packed; 536 size_t len; 537 zfs_cmd_t zc; 538 539 verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); 540 541 packed = zfs_malloc(len); 542 543 verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 544 545 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 546 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 547 zc.zc_config_src_size = len; 548 549 if (ioctl(zfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) { 550 switch (errno) { 551 case EPERM: 552 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 553 "permission denied"), zhp->zpool_name); 554 break; 555 556 case EBUSY: 557 /* 558 * This can happen if the user has specified the same 559 * device multiple times. We can't reliably detect this 560 * until we try to add it and see we already have a 561 * label. 562 */ 563 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 564 "one or more vdevs refer to the same device"), 565 zhp->zpool_name); 566 break; 567 568 case ENAMETOOLONG: 569 /* 570 * One of the vdevs has exceeded VDEV_SPEC_MAX length in 571 * its plaintext representation. 572 */ 573 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 574 "too many devices in a single vdev"), 575 zhp->zpool_name); 576 break; 577 578 case ENXIO: 579 /* 580 * This is unlikely to happen since we've verified that 581 * all the devices can be opened from userland, but it's 582 * still possible in some circumstances. 583 */ 584 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 585 "one or more devices is unavailable"), 586 zhp->zpool_name); 587 break; 588 589 case EOVERFLOW: 590 /* 591 * This occurrs when one of the devices is below 592 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 593 * device was the problem device since there's no 594 * reliable way to determine device size from userland. 595 */ 596 { 597 char buf[64]; 598 599 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 600 601 zfs_error(dgettext(TEXT_DOMAIN, "cannot " 602 "add to '%s': one or more devices is less " 603 "than the minimum size (%s)"), 604 zhp->zpool_name, buf); 605 } 606 break; 607 608 default: 609 zfs_baderror(errno); 610 } 611 612 return (-1); 613 } 614 615 free(packed); 616 617 return (0); 618 } 619 620 /* 621 * Exports the pool from the system. The caller must ensure that there are no 622 * mounted datasets in the pool. 623 */ 624 int 625 zpool_export(zpool_handle_t *zhp) 626 { 627 zfs_cmd_t zc = { 0 }; 628 629 if (zpool_remove_zvol_links(zhp) != 0) 630 return (-1); 631 632 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 633 634 if (ioctl(zfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 635 switch (errno) { 636 case EPERM: 637 zfs_error(dgettext(TEXT_DOMAIN, 638 "cannot export '%s': permission denied"), 639 zhp->zpool_name); 640 break; 641 642 case EBUSY: 643 zfs_error(dgettext(TEXT_DOMAIN, 644 "cannot export '%s': pool is in use"), 645 zhp->zpool_name); 646 break; 647 648 case ENOENT: 649 zfs_error(dgettext(TEXT_DOMAIN, 650 "cannot export '%s': no such pool"), 651 zhp->zpool_name); 652 break; 653 654 default: 655 zfs_baderror(errno); 656 } 657 658 return (-1); 659 } 660 661 return (0); 662 } 663 664 /* 665 * Import the given pool using the known configuration. The configuration 666 * should have come from zpool_find_import(). The 'newname' and 'altroot' 667 * parameters control whether the pool is imported with a different name or with 668 * an alternate root, respectively. 669 */ 670 int 671 zpool_import(nvlist_t *config, const char *newname, const char *altroot) 672 { 673 zfs_cmd_t zc; 674 char *packed; 675 size_t len; 676 char *thename; 677 char *origname; 678 int ret; 679 680 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 681 &origname) == 0); 682 683 if (newname != NULL) { 684 if (!zpool_name_valid(newname, NULL, 0)) { 685 zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': " 686 "invalid pool name"), newname); 687 return (-1); 688 } 689 thename = (char *)newname; 690 } else { 691 thename = origname; 692 } 693 694 if (altroot != NULL && altroot[0] != '/') { 695 zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': alternate " 696 "root '%s' must be a complete path"), thename, 697 altroot); 698 return (-1); 699 } 700 701 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 702 703 if (altroot != NULL) 704 (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); 705 else 706 zc.zc_root[0] = '\0'; 707 708 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 709 &zc.zc_pool_guid) == 0); 710 711 verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0); 712 713 packed = zfs_malloc(len); 714 715 verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 716 717 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 718 zc.zc_config_src_size = len; 719 720 ret = 0; 721 if (ioctl(zfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) { 722 char desc[1024]; 723 if (newname == NULL) 724 (void) snprintf(desc, sizeof (desc), 725 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 726 thename); 727 else 728 (void) snprintf(desc, sizeof (desc), 729 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 730 origname, thename); 731 732 switch (errno) { 733 case EEXIST: 734 /* 735 * A pool with that name already exists. 736 */ 737 zfs_error(dgettext(TEXT_DOMAIN, "%s: pool exists"), 738 desc); 739 break; 740 741 case EPERM: 742 /* 743 * The user doesn't have permission to create pools. 744 */ 745 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission " 746 "denied"), desc); 747 break; 748 749 case ENXIO: 750 case EDOM: 751 /* 752 * Device is unavailable, or vdev sum didn't match. 753 */ 754 zfs_error(dgettext(TEXT_DOMAIN, "%s: one or more " 755 "devices is unavailable"), 756 desc); 757 break; 758 759 default: 760 zfs_baderror(errno); 761 } 762 763 ret = -1; 764 } else { 765 zpool_handle_t *zhp; 766 /* 767 * This should never fail, but play it safe anyway. 768 */ 769 if ((zhp = zpool_open_silent(thename)) != NULL) { 770 ret = zpool_create_zvol_links(zhp); 771 zpool_close(zhp); 772 } 773 } 774 775 free(packed); 776 return (ret); 777 } 778 779 /* 780 * Scrub the pool. 781 */ 782 int 783 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 784 { 785 zfs_cmd_t zc = { 0 }; 786 char msg[1024]; 787 788 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 789 zc.zc_cookie = type; 790 791 if (ioctl(zfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0) 792 return (0); 793 794 (void) snprintf(msg, sizeof (msg), 795 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 796 797 switch (errno) { 798 case EPERM: 799 /* 800 * No permission to scrub this pool. 801 */ 802 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 803 break; 804 805 case EBUSY: 806 /* 807 * Resilver in progress. 808 */ 809 zfs_error(dgettext(TEXT_DOMAIN, "%s: currently resilvering"), 810 msg); 811 break; 812 813 default: 814 zfs_baderror(errno); 815 } 816 return (-1); 817 } 818 819 /* 820 * Bring the specified vdev online 821 */ 822 int 823 zpool_vdev_online(zpool_handle_t *zhp, const char *path) 824 { 825 zfs_cmd_t zc = { 0 }; 826 char msg[1024]; 827 828 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 829 (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value), 830 "%s%s", path[0] == '/' ? "" : "/dev/dsk/", path); 831 832 if (ioctl(zfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0) 833 return (0); 834 835 (void) snprintf(msg, sizeof (msg), 836 dgettext(TEXT_DOMAIN, "cannot online %s"), zc.zc_prop_value); 837 838 switch (errno) { 839 case ENODEV: 840 /* 841 * Device doesn't exist 842 */ 843 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 844 break; 845 846 case EPERM: 847 /* 848 * No permission to bring this vdev online. 849 */ 850 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 851 break; 852 853 default: 854 zfs_baderror(errno); 855 } 856 return (-1); 857 } 858 859 /* 860 * Take the specified vdev offline 861 */ 862 int 863 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp) 864 { 865 zfs_cmd_t zc = { 0 }; 866 char msg[1024]; 867 868 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 869 (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value), 870 "%s%s", path[0] == '/' ? "" : "/dev/dsk/", path); 871 872 zc.zc_cookie = istmp; 873 874 if (ioctl(zfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0) 875 return (0); 876 877 (void) snprintf(msg, sizeof (msg), 878 dgettext(TEXT_DOMAIN, "cannot offline %s"), zc.zc_prop_value); 879 880 switch (errno) { 881 case ENODEV: 882 /* 883 * Device doesn't exist 884 */ 885 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 886 break; 887 888 case EPERM: 889 /* 890 * No permission to take this vdev offline. 891 */ 892 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 893 break; 894 895 case EBUSY: 896 /* 897 * There are no other replicas of this device. 898 */ 899 zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg); 900 break; 901 902 default: 903 zfs_baderror(errno); 904 } 905 return (-1); 906 } 907 908 /* 909 * Attach new_disk (fully described by nvroot) to old_disk. 910 * If 'replacing' is specified, tne new disk will replace the old one. 911 */ 912 int 913 zpool_vdev_attach(zpool_handle_t *zhp, 914 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 915 { 916 zfs_cmd_t zc = { 0 }; 917 char msg[1024]; 918 char *packed; 919 int ret; 920 size_t len; 921 922 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 923 (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value), 924 "%s%s", old_disk[0] == '/' ? "" : "/dev/dsk/", old_disk); 925 zc.zc_cookie = replacing; 926 927 verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); 928 929 packed = zfs_malloc(len); 930 931 verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 932 933 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 934 zc.zc_config_src_size = len; 935 936 ret = ioctl(zfs_fd, ZFS_IOC_VDEV_ATTACH, &zc); 937 938 free(packed); 939 940 if (ret == 0) 941 return (0); 942 943 if (replacing) 944 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 945 "cannot replace %s with %s"), old_disk, new_disk); 946 else 947 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 948 "cannot attach %s to %s"), new_disk, old_disk); 949 950 switch (errno) { 951 case EPERM: 952 /* 953 * No permission to mess with the config. 954 */ 955 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 956 break; 957 958 case ENODEV: 959 /* 960 * Device doesn't exist. 961 */ 962 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s not in pool"), 963 msg, old_disk); 964 break; 965 966 case ENOTSUP: 967 /* 968 * Can't attach to or replace this type of vdev. 969 */ 970 if (replacing) 971 zfs_error(dgettext(TEXT_DOMAIN, 972 "%s: cannot replace a replacing device"), msg); 973 else 974 zfs_error(dgettext(TEXT_DOMAIN, 975 "%s: attach is only applicable to mirrors"), msg); 976 break; 977 978 case EINVAL: 979 /* 980 * The new device must be a single disk. 981 */ 982 zfs_error(dgettext(TEXT_DOMAIN, 983 "%s: <new_device> must be a single disk"), msg); 984 break; 985 986 case ENXIO: 987 /* 988 * This is unlikely to happen since we've verified that 989 * all the devices can be opened from userland, but it's 990 * still possible in some circumstances. 991 */ 992 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is unavailable"), 993 msg, new_disk); 994 break; 995 996 case EBUSY: 997 /* 998 * The new device is is use. 999 */ 1000 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s busy"), msg, new_disk); 1001 break; 1002 1003 case EOVERFLOW: 1004 /* 1005 * The new device is too small. 1006 */ 1007 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is too small"), 1008 msg, new_disk); 1009 break; 1010 1011 case EDOM: 1012 /* 1013 * The new device has a different alignment requirement. 1014 */ 1015 zfs_error(dgettext(TEXT_DOMAIN, 1016 "%s: devices have different sector alignment"), msg); 1017 break; 1018 1019 case ENAMETOOLONG: 1020 /* 1021 * The resulting top-level vdev spec won't fit in the label. 1022 */ 1023 zfs_error(dgettext(TEXT_DOMAIN, 1024 "%s: too many devices in a single vdev"), msg); 1025 break; 1026 1027 default: 1028 zfs_baderror(errno); 1029 } 1030 1031 return (1); 1032 } 1033 1034 /* 1035 * Detach the specified device. 1036 */ 1037 int 1038 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 1039 { 1040 zfs_cmd_t zc = { 0 }; 1041 char msg[1024]; 1042 1043 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1044 (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value), 1045 "%s%s", path[0] == '/' ? "" : "/dev/dsk/", path); 1046 1047 if (ioctl(zfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0) 1048 return (0); 1049 1050 (void) snprintf(msg, sizeof (msg), 1051 dgettext(TEXT_DOMAIN, "cannot detach %s"), zc.zc_prop_value); 1052 1053 switch (errno) { 1054 case EPERM: 1055 /* 1056 * No permission to mess with the config. 1057 */ 1058 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 1059 break; 1060 1061 case ENODEV: 1062 /* 1063 * Device doesn't exist. 1064 */ 1065 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 1066 break; 1067 1068 case ENOTSUP: 1069 /* 1070 * Can't detach from this type of vdev. 1071 */ 1072 zfs_error(dgettext(TEXT_DOMAIN, 1073 "%s: only applicable to mirror and replacing vdevs"), msg); 1074 break; 1075 1076 case EBUSY: 1077 /* 1078 * There are no other replicas of this device. 1079 */ 1080 zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg); 1081 break; 1082 1083 default: 1084 zfs_baderror(errno); 1085 } 1086 1087 return (1); 1088 } 1089 1090 static int 1091 do_zvol(zfs_handle_t *zhp, void *data) 1092 { 1093 int linktype = (int)(uintptr_t)data; 1094 int ret; 1095 1096 /* 1097 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we 1098 * correctly handle snapshots of volumes. 1099 */ 1100 if (zhp->zfs_volblocksize != 0) { 1101 if (linktype) 1102 ret = zvol_create_link(zhp->zfs_name); 1103 else 1104 ret = zvol_remove_link(zhp->zfs_name); 1105 } 1106 1107 ret = zfs_iter_children(zhp, do_zvol, data); 1108 1109 zfs_close(zhp); 1110 return (ret); 1111 } 1112 1113 /* 1114 * Iterate over all zvols in the pool and make any necessary minor nodes. 1115 */ 1116 int 1117 zpool_create_zvol_links(zpool_handle_t *zhp) 1118 { 1119 zfs_handle_t *zfp; 1120 int ret; 1121 1122 /* 1123 * If the pool is unavailable, just return success. 1124 */ 1125 if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL) 1126 return (0); 1127 1128 ret = zfs_iter_children(zfp, do_zvol, (void *)TRUE); 1129 1130 zfs_close(zfp); 1131 return (ret); 1132 } 1133 1134 /* 1135 * Iterate over all zvols in the poool and remove any minor nodes. 1136 */ 1137 int 1138 zpool_remove_zvol_links(zpool_handle_t *zhp) 1139 { 1140 zfs_handle_t *zfp; 1141 int ret; 1142 1143 /* 1144 * If the pool is unavailable, just return success. 1145 */ 1146 if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL) 1147 return (0); 1148 1149 ret = zfs_iter_children(zfp, do_zvol, (void *)FALSE); 1150 1151 zfs_close(zfp); 1152 return (ret); 1153 } 1154 1155 /* 1156 * Convert from a devid string to a path. 1157 */ 1158 static char * 1159 devid_to_path(char *devid_str) 1160 { 1161 ddi_devid_t devid; 1162 char *minor; 1163 char *path; 1164 devid_nmlist_t *list = NULL; 1165 int ret; 1166 1167 if (devid_str_decode(devid_str, &devid, &minor) != 0) 1168 return (NULL); 1169 1170 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 1171 1172 devid_str_free(minor); 1173 devid_free(devid); 1174 1175 if (ret != 0) 1176 return (NULL); 1177 1178 path = zfs_strdup(list[0].devname); 1179 devid_free_nmlist(list); 1180 1181 return (path); 1182 } 1183 1184 /* 1185 * Convert from a path to a devid string. 1186 */ 1187 static char * 1188 path_to_devid(const char *path) 1189 { 1190 int fd; 1191 ddi_devid_t devid; 1192 char *minor, *ret; 1193 1194 if ((fd = open(path, O_RDONLY)) < 0) 1195 return (NULL); 1196 1197 minor = NULL; 1198 ret = NULL; 1199 if (devid_get(fd, &devid) == 0) { 1200 if (devid_get_minor_name(fd, &minor) == 0) 1201 ret = devid_str_encode(devid, minor); 1202 if (minor != NULL) 1203 devid_str_free(minor); 1204 devid_free(devid); 1205 } 1206 (void) close(fd); 1207 1208 return (ret); 1209 } 1210 1211 /* 1212 * Issue the necessary ioctl() to update the stored path value for the vdev. We 1213 * ignore any failure here, since a common case is for an unprivileged user to 1214 * type 'zpool status', and we'll display the correct information anyway. 1215 */ 1216 static void 1217 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 1218 { 1219 zfs_cmd_t zc = { 0 }; 1220 1221 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1222 (void) strncpy(zc.zc_prop_value, path, sizeof (zc.zc_prop_value)); 1223 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1224 &zc.zc_pool_guid) == 0); 1225 1226 (void) ioctl(zfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 1227 } 1228 1229 /* 1230 * Given a vdev, return the name to display in iostat. If the vdev has a path, 1231 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 1232 * We also check if this is a whole disk, in which case we strip off the 1233 * trailing 's0' slice name. 1234 * 1235 * This routine is also responsible for identifying when disks have been 1236 * reconfigured in a new location. The kernel will have opened the device by 1237 * devid, but the path will still refer to the old location. To catch this, we 1238 * first do a path -> devid translation (which is fast for the common case). If 1239 * the devid matches, we're done. If not, we do a reverse devid -> path 1240 * translation and issue the appropriate ioctl() to update the path of the vdev. 1241 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 1242 * of these checks. 1243 */ 1244 char * 1245 zpool_vdev_name(zpool_handle_t *zhp, nvlist_t *nv) 1246 { 1247 char *path, *devid; 1248 uint64_t wholedisk; 1249 1250 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 1251 1252 if (zhp != NULL && 1253 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 1254 /* 1255 * Determine if the current path is correct. 1256 */ 1257 char *newdevid = path_to_devid(path); 1258 1259 if (newdevid == NULL || 1260 strcmp(devid, newdevid) != 0) { 1261 char *newpath; 1262 1263 if ((newpath = devid_to_path(devid)) != NULL) { 1264 /* 1265 * Update the path appropriately. 1266 */ 1267 set_path(zhp, nv, newpath); 1268 verify(nvlist_add_string(nv, 1269 ZPOOL_CONFIG_PATH, newpath) == 0); 1270 free(newpath); 1271 verify(nvlist_lookup_string(nv, 1272 ZPOOL_CONFIG_PATH, &path) == 0); 1273 } 1274 1275 if (newdevid) 1276 devid_str_free(newdevid); 1277 } 1278 1279 } 1280 1281 if (strncmp(path, "/dev/dsk/", 9) == 0) 1282 path += 9; 1283 1284 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1285 &wholedisk) == 0 && wholedisk) { 1286 char *tmp = zfs_strdup(path); 1287 tmp[strlen(path) - 2] = '\0'; 1288 return (tmp); 1289 } 1290 } else { 1291 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 1292 } 1293 1294 return (zfs_strdup(path)); 1295 } 1296