1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <assert.h> 30 #include <ctype.h> 31 #include <errno.h> 32 #include <devid.h> 33 #include <fcntl.h> 34 #include <libintl.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <unistd.h> 39 #include <sys/zfs_ioctl.h> 40 #include <sys/zio.h> 41 #include <strings.h> 42 43 #include "zfs_namecheck.h" 44 #include "libzfs_impl.h" 45 46 /* 47 * Validate the given pool name, optionally putting an extended error message in 48 * 'buf'. 49 */ 50 static boolean_t 51 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 52 { 53 namecheck_err_t why; 54 char what; 55 int ret; 56 57 ret = pool_namecheck(pool, &why, &what); 58 59 /* 60 * The rules for reserved pool names were extended at a later point. 61 * But we need to support users with existing pools that may now be 62 * invalid. So we only check for this expanded set of names during a 63 * create (or import), and only in userland. 64 */ 65 if (ret == 0 && !isopen && 66 (strncmp(pool, "mirror", 6) == 0 || 67 strncmp(pool, "raidz", 5) == 0 || 68 strncmp(pool, "spare", 5) == 0)) { 69 zfs_error_aux(hdl, 70 dgettext(TEXT_DOMAIN, "name is reserved")); 71 return (B_FALSE); 72 } 73 74 75 if (ret != 0) { 76 if (hdl != NULL) { 77 switch (why) { 78 case NAME_ERR_TOOLONG: 79 zfs_error_aux(hdl, 80 dgettext(TEXT_DOMAIN, "name is too long")); 81 break; 82 83 case NAME_ERR_INVALCHAR: 84 zfs_error_aux(hdl, 85 dgettext(TEXT_DOMAIN, "invalid character " 86 "'%c' in pool name"), what); 87 break; 88 89 case NAME_ERR_NOLETTER: 90 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 91 "name must begin with a letter")); 92 break; 93 94 case NAME_ERR_RESERVED: 95 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 96 "name is reserved")); 97 break; 98 99 case NAME_ERR_DISKLIKE: 100 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 101 "pool name is reserved")); 102 break; 103 104 case NAME_ERR_LEADING_SLASH: 105 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 106 "leading slash in name")); 107 break; 108 109 case NAME_ERR_EMPTY_COMPONENT: 110 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 111 "empty component in name")); 112 break; 113 114 case NAME_ERR_TRAILING_SLASH: 115 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 116 "trailing slash in name")); 117 break; 118 119 case NAME_ERR_MULTIPLE_AT: 120 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 121 "multiple '@' delimiters in name")); 122 break; 123 124 } 125 } 126 return (B_FALSE); 127 } 128 129 return (B_TRUE); 130 } 131 132 /* 133 * Set the pool-wide health based on the vdev state of the root vdev. 134 */ 135 int 136 set_pool_health(nvlist_t *config) 137 { 138 nvlist_t *nvroot; 139 vdev_stat_t *vs; 140 uint_t vsc; 141 char *health; 142 143 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 144 &nvroot) == 0); 145 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 146 (uint64_t **)&vs, &vsc) == 0); 147 148 switch (vs->vs_state) { 149 150 case VDEV_STATE_CLOSED: 151 case VDEV_STATE_CANT_OPEN: 152 case VDEV_STATE_OFFLINE: 153 health = dgettext(TEXT_DOMAIN, "FAULTED"); 154 break; 155 156 case VDEV_STATE_DEGRADED: 157 health = dgettext(TEXT_DOMAIN, "DEGRADED"); 158 break; 159 160 case VDEV_STATE_HEALTHY: 161 health = dgettext(TEXT_DOMAIN, "ONLINE"); 162 break; 163 164 default: 165 abort(); 166 } 167 168 return (nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, health)); 169 } 170 171 /* 172 * Open a handle to the given pool, even if the pool is currently in the FAULTED 173 * state. 174 */ 175 zpool_handle_t * 176 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 177 { 178 zpool_handle_t *zhp; 179 boolean_t missing; 180 181 /* 182 * Make sure the pool name is valid. 183 */ 184 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 185 (void) zfs_error(hdl, EZFS_INVALIDNAME, 186 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 187 pool); 188 return (NULL); 189 } 190 191 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 192 return (NULL); 193 194 zhp->zpool_hdl = hdl; 195 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 196 197 if (zpool_refresh_stats(zhp, &missing) != 0) { 198 zpool_close(zhp); 199 return (NULL); 200 } 201 202 if (missing) { 203 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 204 "no such pool")); 205 (void) zfs_error(hdl, EZFS_NOENT, 206 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 207 pool); 208 zpool_close(zhp); 209 return (NULL); 210 } 211 212 return (zhp); 213 } 214 215 /* 216 * Like the above, but silent on error. Used when iterating over pools (because 217 * the configuration cache may be out of date). 218 */ 219 int 220 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 221 { 222 zpool_handle_t *zhp; 223 boolean_t missing; 224 225 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 226 return (-1); 227 228 zhp->zpool_hdl = hdl; 229 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 230 231 if (zpool_refresh_stats(zhp, &missing) != 0) { 232 zpool_close(zhp); 233 return (-1); 234 } 235 236 if (missing) { 237 zpool_close(zhp); 238 *ret = NULL; 239 return (0); 240 } 241 242 *ret = zhp; 243 return (0); 244 } 245 246 /* 247 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 248 * state. 249 */ 250 zpool_handle_t * 251 zpool_open(libzfs_handle_t *hdl, const char *pool) 252 { 253 zpool_handle_t *zhp; 254 255 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 256 return (NULL); 257 258 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 259 (void) zfs_error(hdl, EZFS_POOLUNAVAIL, 260 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 261 zpool_close(zhp); 262 return (NULL); 263 } 264 265 return (zhp); 266 } 267 268 /* 269 * Close the handle. Simply frees the memory associated with the handle. 270 */ 271 void 272 zpool_close(zpool_handle_t *zhp) 273 { 274 if (zhp->zpool_config) 275 nvlist_free(zhp->zpool_config); 276 if (zhp->zpool_old_config) 277 nvlist_free(zhp->zpool_old_config); 278 if (zhp->zpool_error_log) { 279 int i; 280 for (i = 0; i < zhp->zpool_error_count; i++) 281 nvlist_free(zhp->zpool_error_log[i]); 282 free(zhp->zpool_error_log); 283 } 284 free(zhp); 285 } 286 287 /* 288 * Return the name of the pool. 289 */ 290 const char * 291 zpool_get_name(zpool_handle_t *zhp) 292 { 293 return (zhp->zpool_name); 294 } 295 296 /* 297 * Return the GUID of the pool. 298 */ 299 uint64_t 300 zpool_get_guid(zpool_handle_t *zhp) 301 { 302 uint64_t guid; 303 304 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, 305 &guid) == 0); 306 return (guid); 307 } 308 309 /* 310 * Return the version of the pool. 311 */ 312 uint64_t 313 zpool_get_version(zpool_handle_t *zhp) 314 { 315 uint64_t version; 316 317 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION, 318 &version) == 0); 319 320 return (version); 321 } 322 323 /* 324 * Return the amount of space currently consumed by the pool. 325 */ 326 uint64_t 327 zpool_get_space_used(zpool_handle_t *zhp) 328 { 329 nvlist_t *nvroot; 330 vdev_stat_t *vs; 331 uint_t vsc; 332 333 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 334 &nvroot) == 0); 335 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 336 (uint64_t **)&vs, &vsc) == 0); 337 338 return (vs->vs_alloc); 339 } 340 341 /* 342 * Return the total space in the pool. 343 */ 344 uint64_t 345 zpool_get_space_total(zpool_handle_t *zhp) 346 { 347 nvlist_t *nvroot; 348 vdev_stat_t *vs; 349 uint_t vsc; 350 351 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 352 &nvroot) == 0); 353 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 354 (uint64_t **)&vs, &vsc) == 0); 355 356 return (vs->vs_space); 357 } 358 359 /* 360 * Return the alternate root for this pool, if any. 361 */ 362 int 363 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen) 364 { 365 zfs_cmd_t zc = { 0 }; 366 367 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 368 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 || 369 zc.zc_value[0] == '\0') 370 return (-1); 371 372 (void) strlcpy(buf, zc.zc_value, buflen); 373 374 return (0); 375 } 376 377 /* 378 * Return the state of the pool (ACTIVE or UNAVAILABLE) 379 */ 380 int 381 zpool_get_state(zpool_handle_t *zhp) 382 { 383 return (zhp->zpool_state); 384 } 385 386 /* 387 * Create the named pool, using the provided vdev list. It is assumed 388 * that the consumer has already validated the contents of the nvlist, so we 389 * don't have to worry about error semantics. 390 */ 391 int 392 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 393 const char *altroot) 394 { 395 zfs_cmd_t zc = { 0 }; 396 char msg[1024]; 397 398 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 399 "cannot create '%s'"), pool); 400 401 if (!zpool_name_valid(hdl, B_FALSE, pool)) 402 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 403 404 if (altroot != NULL && altroot[0] != '/') 405 return (zfs_error(hdl, EZFS_BADPATH, 406 dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot)); 407 408 if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0) 409 return (-1); 410 411 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 412 413 if (altroot != NULL) 414 (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value)); 415 416 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) { 417 zcmd_free_nvlists(&zc); 418 419 switch (errno) { 420 case EBUSY: 421 /* 422 * This can happen if the user has specified the same 423 * device multiple times. We can't reliably detect this 424 * until we try to add it and see we already have a 425 * label. 426 */ 427 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 428 "one or more vdevs refer to the same device")); 429 return (zfs_error(hdl, EZFS_BADDEV, msg)); 430 431 case EOVERFLOW: 432 /* 433 * This occurs when one of the devices is below 434 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 435 * device was the problem device since there's no 436 * reliable way to determine device size from userland. 437 */ 438 { 439 char buf[64]; 440 441 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 442 443 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 444 "one or more devices is less than the " 445 "minimum size (%s)"), buf); 446 } 447 return (zfs_error(hdl, EZFS_BADDEV, msg)); 448 449 case ENOSPC: 450 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 451 "one or more devices is out of space")); 452 return (zfs_error(hdl, EZFS_BADDEV, msg)); 453 454 default: 455 return (zpool_standard_error(hdl, errno, msg)); 456 } 457 } 458 459 zcmd_free_nvlists(&zc); 460 461 /* 462 * If this is an alternate root pool, then we automatically set the 463 * mountpoint of the root dataset to be '/'. 464 */ 465 if (altroot != NULL) { 466 zfs_handle_t *zhp; 467 468 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL); 469 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 470 "/") == 0); 471 472 zfs_close(zhp); 473 } 474 475 return (0); 476 } 477 478 /* 479 * Destroy the given pool. It is up to the caller to ensure that there are no 480 * datasets left in the pool. 481 */ 482 int 483 zpool_destroy(zpool_handle_t *zhp) 484 { 485 zfs_cmd_t zc = { 0 }; 486 zfs_handle_t *zfp = NULL; 487 libzfs_handle_t *hdl = zhp->zpool_hdl; 488 char msg[1024]; 489 490 if (zhp->zpool_state == POOL_STATE_ACTIVE && 491 (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, 492 ZFS_TYPE_FILESYSTEM)) == NULL) 493 return (-1); 494 495 if (zpool_remove_zvol_links(zhp) != 0) 496 return (-1); 497 498 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 499 500 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 501 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 502 "cannot destroy '%s'"), zhp->zpool_name); 503 504 if (errno == EROFS) { 505 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 506 "one or more devices is read only")); 507 (void) zfs_error(hdl, EZFS_BADDEV, msg); 508 } else { 509 (void) zpool_standard_error(hdl, errno, msg); 510 } 511 512 if (zfp) 513 zfs_close(zfp); 514 return (-1); 515 } 516 517 if (zfp) { 518 remove_mountpoint(zfp); 519 zfs_close(zfp); 520 } 521 522 return (0); 523 } 524 525 /* 526 * Add the given vdevs to the pool. The caller must have already performed the 527 * necessary verification to ensure that the vdev specification is well-formed. 528 */ 529 int 530 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 531 { 532 zfs_cmd_t zc = { 0 }; 533 int ret; 534 libzfs_handle_t *hdl = zhp->zpool_hdl; 535 char msg[1024]; 536 nvlist_t **spares; 537 uint_t nspares; 538 539 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 540 "cannot add to '%s'"), zhp->zpool_name); 541 542 if (zpool_get_version(zhp) < ZFS_VERSION_SPARES && 543 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 544 &spares, &nspares) == 0) { 545 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 546 "upgraded to add hot spares")); 547 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 548 } 549 550 if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0) 551 return (-1); 552 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 553 554 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) { 555 switch (errno) { 556 case EBUSY: 557 /* 558 * This can happen if the user has specified the same 559 * device multiple times. We can't reliably detect this 560 * until we try to add it and see we already have a 561 * label. 562 */ 563 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 564 "one or more vdevs refer to the same device")); 565 (void) zfs_error(hdl, EZFS_BADDEV, msg); 566 break; 567 568 case EOVERFLOW: 569 /* 570 * This occurrs when one of the devices is below 571 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 572 * device was the problem device since there's no 573 * reliable way to determine device size from userland. 574 */ 575 { 576 char buf[64]; 577 578 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 579 580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 581 "device is less than the minimum " 582 "size (%s)"), buf); 583 } 584 (void) zfs_error(hdl, EZFS_BADDEV, msg); 585 break; 586 587 case ENOTSUP: 588 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 589 "pool must be upgraded to add raidz2 vdevs")); 590 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 591 break; 592 593 default: 594 (void) zpool_standard_error(hdl, errno, msg); 595 } 596 597 ret = -1; 598 } else { 599 ret = 0; 600 } 601 602 zcmd_free_nvlists(&zc); 603 604 return (ret); 605 } 606 607 /* 608 * Exports the pool from the system. The caller must ensure that there are no 609 * mounted datasets in the pool. 610 */ 611 int 612 zpool_export(zpool_handle_t *zhp) 613 { 614 zfs_cmd_t zc = { 0 }; 615 616 if (zpool_remove_zvol_links(zhp) != 0) 617 return (-1); 618 619 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 620 621 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0) 622 return (zpool_standard_error(zhp->zpool_hdl, errno, 623 dgettext(TEXT_DOMAIN, "cannot export '%s'"), 624 zhp->zpool_name)); 625 626 return (0); 627 } 628 629 /* 630 * Import the given pool using the known configuration. The configuration 631 * should have come from zpool_find_import(). The 'newname' and 'altroot' 632 * parameters control whether the pool is imported with a different name or with 633 * an alternate root, respectively. 634 */ 635 int 636 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 637 const char *altroot) 638 { 639 zfs_cmd_t zc = { 0 }; 640 char *thename; 641 char *origname; 642 int ret; 643 644 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 645 &origname) == 0); 646 647 if (newname != NULL) { 648 if (!zpool_name_valid(hdl, B_FALSE, newname)) 649 return (zfs_error(hdl, EZFS_INVALIDNAME, 650 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 651 newname)); 652 thename = (char *)newname; 653 } else { 654 thename = origname; 655 } 656 657 if (altroot != NULL && altroot[0] != '/') 658 return (zfs_error(hdl, EZFS_BADPATH, 659 dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), 660 altroot)); 661 662 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 663 664 if (altroot != NULL) 665 (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value)); 666 else 667 zc.zc_value[0] = '\0'; 668 669 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 670 &zc.zc_guid) == 0); 671 672 if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0) 673 return (-1); 674 675 ret = 0; 676 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) { 677 char desc[1024]; 678 if (newname == NULL) 679 (void) snprintf(desc, sizeof (desc), 680 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 681 thename); 682 else 683 (void) snprintf(desc, sizeof (desc), 684 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 685 origname, thename); 686 687 switch (errno) { 688 case ENOTSUP: 689 /* 690 * Unsupported version. 691 */ 692 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 693 break; 694 695 case EINVAL: 696 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 697 break; 698 699 default: 700 (void) zpool_standard_error(hdl, errno, desc); 701 } 702 703 ret = -1; 704 } else { 705 zpool_handle_t *zhp; 706 /* 707 * This should never fail, but play it safe anyway. 708 */ 709 if (zpool_open_silent(hdl, thename, &zhp) != 0) { 710 ret = -1; 711 } else if (zhp != NULL) { 712 ret = zpool_create_zvol_links(zhp); 713 zpool_close(zhp); 714 } 715 } 716 717 zcmd_free_nvlists(&zc); 718 return (ret); 719 } 720 721 /* 722 * Scrub the pool. 723 */ 724 int 725 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 726 { 727 zfs_cmd_t zc = { 0 }; 728 char msg[1024]; 729 libzfs_handle_t *hdl = zhp->zpool_hdl; 730 731 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 732 zc.zc_cookie = type; 733 734 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0) 735 return (0); 736 737 (void) snprintf(msg, sizeof (msg), 738 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 739 740 if (errno == EBUSY) 741 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 742 else 743 return (zpool_standard_error(hdl, errno, msg)); 744 } 745 746 /* 747 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 748 * spare; but FALSE if its an INUSE spare. 749 */ 750 static nvlist_t * 751 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid, 752 boolean_t *avail_spare) 753 { 754 uint_t c, children; 755 nvlist_t **child; 756 uint64_t theguid, present; 757 char *path; 758 uint64_t wholedisk = 0; 759 nvlist_t *ret; 760 761 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0); 762 763 if (search == NULL && 764 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) { 765 /* 766 * If the device has never been present since import, the only 767 * reliable way to match the vdev is by GUID. 768 */ 769 if (theguid == guid) 770 return (nv); 771 } else if (search != NULL && 772 nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 773 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 774 &wholedisk); 775 if (wholedisk) { 776 /* 777 * For whole disks, the internal path has 's0', but the 778 * path passed in by the user doesn't. 779 */ 780 if (strlen(search) == strlen(path) - 2 && 781 strncmp(search, path, strlen(search)) == 0) 782 return (nv); 783 } else if (strcmp(search, path) == 0) { 784 return (nv); 785 } 786 } 787 788 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 789 &child, &children) != 0) 790 return (NULL); 791 792 for (c = 0; c < children; c++) 793 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 794 avail_spare)) != NULL) 795 return (ret); 796 797 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 798 &child, &children) == 0) { 799 for (c = 0; c < children; c++) { 800 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 801 avail_spare)) != NULL) { 802 *avail_spare = B_TRUE; 803 return (ret); 804 } 805 } 806 } 807 808 return (NULL); 809 } 810 811 nvlist_t * 812 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare) 813 { 814 char buf[MAXPATHLEN]; 815 const char *search; 816 char *end; 817 nvlist_t *nvroot; 818 uint64_t guid; 819 820 guid = strtoull(path, &end, 10); 821 if (guid != 0 && *end == '\0') { 822 search = NULL; 823 } else if (path[0] != '/') { 824 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 825 search = buf; 826 } else { 827 search = path; 828 } 829 830 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 831 &nvroot) == 0); 832 833 *avail_spare = B_FALSE; 834 return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare)); 835 } 836 837 /* 838 * Returns TRUE if the given guid corresponds to a spare (INUSE or not). 839 */ 840 static boolean_t 841 is_spare(zpool_handle_t *zhp, uint64_t guid) 842 { 843 uint64_t spare_guid; 844 nvlist_t *nvroot; 845 nvlist_t **spares; 846 uint_t nspares; 847 int i; 848 849 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 850 &nvroot) == 0); 851 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 852 &spares, &nspares) == 0) { 853 for (i = 0; i < nspares; i++) { 854 verify(nvlist_lookup_uint64(spares[i], 855 ZPOOL_CONFIG_GUID, &spare_guid) == 0); 856 if (guid == spare_guid) 857 return (B_TRUE); 858 } 859 } 860 861 return (B_FALSE); 862 } 863 864 /* 865 * Bring the specified vdev online 866 */ 867 int 868 zpool_vdev_online(zpool_handle_t *zhp, const char *path) 869 { 870 zfs_cmd_t zc = { 0 }; 871 char msg[1024]; 872 nvlist_t *tgt; 873 boolean_t avail_spare; 874 libzfs_handle_t *hdl = zhp->zpool_hdl; 875 876 (void) snprintf(msg, sizeof (msg), 877 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 878 879 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 880 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL) 881 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 882 883 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 884 885 if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE) 886 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 887 888 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0) 889 return (0); 890 891 return (zpool_standard_error(hdl, errno, msg)); 892 } 893 894 /* 895 * Take the specified vdev offline 896 */ 897 int 898 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp) 899 { 900 zfs_cmd_t zc = { 0 }; 901 char msg[1024]; 902 nvlist_t *tgt; 903 boolean_t avail_spare; 904 libzfs_handle_t *hdl = zhp->zpool_hdl; 905 906 (void) snprintf(msg, sizeof (msg), 907 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 908 909 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 910 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL) 911 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 912 913 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 914 915 if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE) 916 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 917 918 zc.zc_cookie = istmp; 919 920 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0) 921 return (0); 922 923 switch (errno) { 924 case EBUSY: 925 926 /* 927 * There are no other replicas of this device. 928 */ 929 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 930 931 default: 932 return (zpool_standard_error(hdl, errno, msg)); 933 } 934 } 935 936 /* 937 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 938 * a hot spare. 939 */ 940 static boolean_t 941 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 942 { 943 nvlist_t **child; 944 uint_t c, children; 945 char *type; 946 947 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 948 &children) == 0) { 949 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 950 &type) == 0); 951 952 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 953 children == 2 && child[which] == tgt) 954 return (B_TRUE); 955 956 for (c = 0; c < children; c++) 957 if (is_replacing_spare(child[c], tgt, which)) 958 return (B_TRUE); 959 } 960 961 return (B_FALSE); 962 } 963 964 /* 965 * Attach new_disk (fully described by nvroot) to old_disk. 966 * If 'replacing' is specified, tne new disk will replace the old one. 967 */ 968 int 969 zpool_vdev_attach(zpool_handle_t *zhp, 970 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 971 { 972 zfs_cmd_t zc = { 0 }; 973 char msg[1024]; 974 int ret; 975 nvlist_t *tgt; 976 boolean_t avail_spare; 977 uint64_t val; 978 char *path; 979 nvlist_t **child; 980 uint_t children; 981 nvlist_t *config_root; 982 libzfs_handle_t *hdl = zhp->zpool_hdl; 983 984 if (replacing) 985 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 986 "cannot replace %s with %s"), old_disk, new_disk); 987 else 988 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 989 "cannot attach %s to %s"), new_disk, old_disk); 990 991 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 992 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0) 993 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 994 995 if (avail_spare) 996 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 997 998 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 999 zc.zc_cookie = replacing; 1000 1001 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1002 &child, &children) != 0 || children != 1) { 1003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1004 "new device must be a single disk")); 1005 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 1006 } 1007 1008 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 1009 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 1010 1011 /* 1012 * If the target is a hot spare that has been swapped in, we can only 1013 * replace it with another hot spare. 1014 */ 1015 if (replacing && 1016 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 1017 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && 1018 (zpool_find_vdev(zhp, path, &avail_spare) == NULL || 1019 !avail_spare) && is_replacing_spare(config_root, tgt, 1)) { 1020 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1021 "can only be replaced by another hot spare")); 1022 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1023 } 1024 1025 /* 1026 * If we are attempting to replace a spare, it canot be applied to an 1027 * already spared device. 1028 */ 1029 if (replacing && 1030 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && 1031 zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare && 1032 is_replacing_spare(config_root, tgt, 0)) { 1033 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1034 "device has already been replaced with a spare")); 1035 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1036 } 1037 1038 if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0) 1039 return (-1); 1040 1041 ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc); 1042 1043 zcmd_free_nvlists(&zc); 1044 1045 if (ret == 0) 1046 return (0); 1047 1048 switch (errno) { 1049 case ENOTSUP: 1050 /* 1051 * Can't attach to or replace this type of vdev. 1052 */ 1053 if (replacing) 1054 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1055 "cannot replace a replacing device")); 1056 else 1057 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1058 "can only attach to mirrors and top-level " 1059 "disks")); 1060 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 1061 break; 1062 1063 case EINVAL: 1064 /* 1065 * The new device must be a single disk. 1066 */ 1067 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1068 "new device must be a single disk")); 1069 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 1070 break; 1071 1072 case EBUSY: 1073 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 1074 new_disk); 1075 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1076 break; 1077 1078 case EOVERFLOW: 1079 /* 1080 * The new device is too small. 1081 */ 1082 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1083 "device is too small")); 1084 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1085 break; 1086 1087 case EDOM: 1088 /* 1089 * The new device has a different alignment requirement. 1090 */ 1091 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1092 "devices have different sector alignment")); 1093 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1094 break; 1095 1096 case ENAMETOOLONG: 1097 /* 1098 * The resulting top-level vdev spec won't fit in the label. 1099 */ 1100 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 1101 break; 1102 1103 default: 1104 (void) zpool_standard_error(hdl, errno, msg); 1105 } 1106 1107 return (-1); 1108 } 1109 1110 /* 1111 * Detach the specified device. 1112 */ 1113 int 1114 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 1115 { 1116 zfs_cmd_t zc = { 0 }; 1117 char msg[1024]; 1118 nvlist_t *tgt; 1119 boolean_t avail_spare; 1120 libzfs_handle_t *hdl = zhp->zpool_hdl; 1121 1122 (void) snprintf(msg, sizeof (msg), 1123 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 1124 1125 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1126 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0) 1127 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1128 1129 if (avail_spare) 1130 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1131 1132 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1133 1134 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0) 1135 return (0); 1136 1137 switch (errno) { 1138 1139 case ENOTSUP: 1140 /* 1141 * Can't detach from this type of vdev. 1142 */ 1143 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 1144 "applicable to mirror and replacing vdevs")); 1145 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg); 1146 break; 1147 1148 case EBUSY: 1149 /* 1150 * There are no other replicas of this device. 1151 */ 1152 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 1153 break; 1154 1155 default: 1156 (void) zpool_standard_error(hdl, errno, msg); 1157 } 1158 1159 return (-1); 1160 } 1161 1162 /* 1163 * Remove the given device. Currently, this is supported only for hot spares. 1164 */ 1165 int 1166 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 1167 { 1168 zfs_cmd_t zc = { 0 }; 1169 char msg[1024]; 1170 nvlist_t *tgt; 1171 boolean_t avail_spare; 1172 libzfs_handle_t *hdl = zhp->zpool_hdl; 1173 1174 (void) snprintf(msg, sizeof (msg), 1175 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 1176 1177 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1178 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0) 1179 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1180 1181 if (!avail_spare) { 1182 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1183 "only hot spares can be removed")); 1184 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1185 } 1186 1187 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1188 1189 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 1190 return (0); 1191 1192 return (zpool_standard_error(hdl, errno, msg)); 1193 } 1194 1195 /* 1196 * Clear the errors for the pool, or the particular device if specified. 1197 */ 1198 int 1199 zpool_clear(zpool_handle_t *zhp, const char *path) 1200 { 1201 zfs_cmd_t zc = { 0 }; 1202 char msg[1024]; 1203 nvlist_t *tgt; 1204 boolean_t avail_spare; 1205 libzfs_handle_t *hdl = zhp->zpool_hdl; 1206 1207 if (path) 1208 (void) snprintf(msg, sizeof (msg), 1209 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 1210 path); 1211 else 1212 (void) snprintf(msg, sizeof (msg), 1213 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 1214 zhp->zpool_name); 1215 1216 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1217 if (path) { 1218 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0) 1219 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1220 1221 if (avail_spare) 1222 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1223 1224 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 1225 &zc.zc_guid) == 0); 1226 } 1227 1228 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 1229 return (0); 1230 1231 return (zpool_standard_error(hdl, errno, msg)); 1232 } 1233 1234 static int 1235 do_zvol(zfs_handle_t *zhp, void *data) 1236 { 1237 int linktype = (int)(uintptr_t)data; 1238 int ret; 1239 1240 /* 1241 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we 1242 * correctly handle snapshots of volumes. 1243 */ 1244 if (ZFS_IS_VOLUME(zhp)) { 1245 if (linktype) 1246 ret = zvol_create_link(zhp->zfs_hdl, zhp->zfs_name); 1247 else 1248 ret = zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name); 1249 } 1250 1251 ret = zfs_iter_children(zhp, do_zvol, data); 1252 1253 zfs_close(zhp); 1254 return (ret); 1255 } 1256 1257 /* 1258 * Iterate over all zvols in the pool and make any necessary minor nodes. 1259 */ 1260 int 1261 zpool_create_zvol_links(zpool_handle_t *zhp) 1262 { 1263 zfs_handle_t *zfp; 1264 int ret; 1265 1266 /* 1267 * If the pool is unavailable, just return success. 1268 */ 1269 if ((zfp = make_dataset_handle(zhp->zpool_hdl, 1270 zhp->zpool_name)) == NULL) 1271 return (0); 1272 1273 ret = zfs_iter_children(zfp, do_zvol, (void *)B_TRUE); 1274 1275 zfs_close(zfp); 1276 return (ret); 1277 } 1278 1279 /* 1280 * Iterate over all zvols in the poool and remove any minor nodes. 1281 */ 1282 int 1283 zpool_remove_zvol_links(zpool_handle_t *zhp) 1284 { 1285 zfs_handle_t *zfp; 1286 int ret; 1287 1288 /* 1289 * If the pool is unavailable, just return success. 1290 */ 1291 if ((zfp = make_dataset_handle(zhp->zpool_hdl, 1292 zhp->zpool_name)) == NULL) 1293 return (0); 1294 1295 ret = zfs_iter_children(zfp, do_zvol, (void *)B_FALSE); 1296 1297 zfs_close(zfp); 1298 return (ret); 1299 } 1300 1301 /* 1302 * Convert from a devid string to a path. 1303 */ 1304 static char * 1305 devid_to_path(char *devid_str) 1306 { 1307 ddi_devid_t devid; 1308 char *minor; 1309 char *path; 1310 devid_nmlist_t *list = NULL; 1311 int ret; 1312 1313 if (devid_str_decode(devid_str, &devid, &minor) != 0) 1314 return (NULL); 1315 1316 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 1317 1318 devid_str_free(minor); 1319 devid_free(devid); 1320 1321 if (ret != 0) 1322 return (NULL); 1323 1324 if ((path = strdup(list[0].devname)) == NULL) 1325 return (NULL); 1326 1327 devid_free_nmlist(list); 1328 1329 return (path); 1330 } 1331 1332 /* 1333 * Convert from a path to a devid string. 1334 */ 1335 static char * 1336 path_to_devid(const char *path) 1337 { 1338 int fd; 1339 ddi_devid_t devid; 1340 char *minor, *ret; 1341 1342 if ((fd = open(path, O_RDONLY)) < 0) 1343 return (NULL); 1344 1345 minor = NULL; 1346 ret = NULL; 1347 if (devid_get(fd, &devid) == 0) { 1348 if (devid_get_minor_name(fd, &minor) == 0) 1349 ret = devid_str_encode(devid, minor); 1350 if (minor != NULL) 1351 devid_str_free(minor); 1352 devid_free(devid); 1353 } 1354 (void) close(fd); 1355 1356 return (ret); 1357 } 1358 1359 /* 1360 * Issue the necessary ioctl() to update the stored path value for the vdev. We 1361 * ignore any failure here, since a common case is for an unprivileged user to 1362 * type 'zpool status', and we'll display the correct information anyway. 1363 */ 1364 static void 1365 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 1366 { 1367 zfs_cmd_t zc = { 0 }; 1368 1369 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1370 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 1371 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1372 &zc.zc_guid) == 0); 1373 1374 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 1375 } 1376 1377 /* 1378 * Given a vdev, return the name to display in iostat. If the vdev has a path, 1379 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 1380 * We also check if this is a whole disk, in which case we strip off the 1381 * trailing 's0' slice name. 1382 * 1383 * This routine is also responsible for identifying when disks have been 1384 * reconfigured in a new location. The kernel will have opened the device by 1385 * devid, but the path will still refer to the old location. To catch this, we 1386 * first do a path -> devid translation (which is fast for the common case). If 1387 * the devid matches, we're done. If not, we do a reverse devid -> path 1388 * translation and issue the appropriate ioctl() to update the path of the vdev. 1389 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 1390 * of these checks. 1391 */ 1392 char * 1393 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv) 1394 { 1395 char *path, *devid; 1396 uint64_t value; 1397 char buf[64]; 1398 1399 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1400 &value) == 0) { 1401 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1402 &value) == 0); 1403 (void) snprintf(buf, sizeof (buf), "%llu", 1404 (u_longlong_t)value); 1405 path = buf; 1406 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 1407 1408 if (zhp != NULL && 1409 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 1410 /* 1411 * Determine if the current path is correct. 1412 */ 1413 char *newdevid = path_to_devid(path); 1414 1415 if (newdevid == NULL || 1416 strcmp(devid, newdevid) != 0) { 1417 char *newpath; 1418 1419 if ((newpath = devid_to_path(devid)) != NULL) { 1420 /* 1421 * Update the path appropriately. 1422 */ 1423 set_path(zhp, nv, newpath); 1424 if (nvlist_add_string(nv, 1425 ZPOOL_CONFIG_PATH, newpath) == 0) 1426 verify(nvlist_lookup_string(nv, 1427 ZPOOL_CONFIG_PATH, 1428 &path) == 0); 1429 free(newpath); 1430 } 1431 } 1432 1433 if (newdevid) 1434 devid_str_free(newdevid); 1435 } 1436 1437 if (strncmp(path, "/dev/dsk/", 9) == 0) 1438 path += 9; 1439 1440 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1441 &value) == 0 && value) { 1442 char *tmp = zfs_strdup(hdl, path); 1443 if (tmp == NULL) 1444 return (NULL); 1445 tmp[strlen(path) - 2] = '\0'; 1446 return (tmp); 1447 } 1448 } else { 1449 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 1450 1451 /* 1452 * If it's a raidz device, we need to stick in the parity level. 1453 */ 1454 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 1455 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 1456 &value) == 0); 1457 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 1458 (u_longlong_t)value); 1459 path = buf; 1460 } 1461 } 1462 1463 return (zfs_strdup(hdl, path)); 1464 } 1465 1466 static int 1467 zbookmark_compare(const void *a, const void *b) 1468 { 1469 return (memcmp(a, b, sizeof (zbookmark_t))); 1470 } 1471 1472 /* 1473 * Retrieve the persistent error log, uniquify the members, and return to the 1474 * caller. 1475 */ 1476 int 1477 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem) 1478 { 1479 zfs_cmd_t zc = { 0 }; 1480 uint64_t count; 1481 zbookmark_t *zb = NULL; 1482 libzfs_handle_t *hdl = zhp->zpool_hdl; 1483 int i, j; 1484 1485 if (zhp->zpool_error_log != NULL) { 1486 *list = zhp->zpool_error_log; 1487 *nelem = zhp->zpool_error_count; 1488 return (0); 1489 } 1490 1491 /* 1492 * Retrieve the raw error list from the kernel. If the number of errors 1493 * has increased, allocate more space and continue until we get the 1494 * entire list. 1495 */ 1496 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 1497 &count) == 0); 1498 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 1499 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 1500 return (-1); 1501 zc.zc_nvlist_dst_size = count; 1502 (void) strcpy(zc.zc_name, zhp->zpool_name); 1503 for (;;) { 1504 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 1505 &zc) != 0) { 1506 free((void *)(uintptr_t)zc.zc_nvlist_dst); 1507 if (errno == ENOMEM) { 1508 if ((zc.zc_nvlist_dst = (uintptr_t) 1509 zfs_alloc(zhp->zpool_hdl, 1510 zc.zc_nvlist_dst_size)) == (uintptr_t)NULL) 1511 return (-1); 1512 } else { 1513 return (-1); 1514 } 1515 } else { 1516 break; 1517 } 1518 } 1519 1520 /* 1521 * Sort the resulting bookmarks. This is a little confusing due to the 1522 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 1523 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 1524 * _not_ copied as part of the process. So we point the start of our 1525 * array appropriate and decrement the total number of elements. 1526 */ 1527 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 1528 zc.zc_nvlist_dst_size; 1529 count -= zc.zc_nvlist_dst_size; 1530 zc.zc_nvlist_dst = 0ULL; 1531 1532 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 1533 1534 /* 1535 * Count the number of unique elements 1536 */ 1537 j = 0; 1538 for (i = 0; i < count; i++) { 1539 if (i > 0 && memcmp(&zb[i - 1], &zb[i], 1540 sizeof (zbookmark_t)) == 0) 1541 continue; 1542 j++; 1543 } 1544 1545 /* 1546 * If the user has only requested the number of items, return it now 1547 * without bothering with the extra work. 1548 */ 1549 if (list == NULL) { 1550 *nelem = j; 1551 free((void *)(uintptr_t)zc.zc_nvlist_dst); 1552 return (0); 1553 } 1554 1555 zhp->zpool_error_count = j; 1556 1557 /* 1558 * Allocate an array of nvlists to hold the results 1559 */ 1560 if ((zhp->zpool_error_log = zfs_alloc(zhp->zpool_hdl, 1561 j * sizeof (nvlist_t *))) == NULL) { 1562 free((void *)(uintptr_t)zc.zc_nvlist_dst); 1563 return (-1); 1564 } 1565 1566 /* 1567 * Fill in the results with names from the kernel. 1568 */ 1569 j = 0; 1570 for (i = 0; i < count; i++) { 1571 char buf[64]; 1572 nvlist_t *nv; 1573 1574 if (i > 0 && memcmp(&zb[i - 1], &zb[i], 1575 sizeof (zbookmark_t)) == 0) 1576 continue; 1577 1578 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 1579 goto nomem; 1580 1581 zc.zc_bookmark = zb[i]; 1582 for (;;) { 1583 if (ioctl(zhp->zpool_hdl->libzfs_fd, 1584 ZFS_IOC_BOOKMARK_NAME, &zc) != 0) { 1585 if (errno == ENOMEM) { 1586 if (zcmd_expand_dst_nvlist(hdl, &zc) 1587 != 0) { 1588 zcmd_free_nvlists(&zc); 1589 goto nomem; 1590 } 1591 1592 continue; 1593 } else { 1594 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 1595 0) != 0) 1596 goto nomem; 1597 1598 zhp->zpool_error_log[j] = nv; 1599 (void) snprintf(buf, sizeof (buf), 1600 "%llx", (longlong_t) 1601 zb[i].zb_objset); 1602 if (nvlist_add_string(nv, 1603 ZPOOL_ERR_DATASET, buf) != 0) 1604 goto nomem; 1605 (void) snprintf(buf, sizeof (buf), 1606 "%llx", (longlong_t) 1607 zb[i].zb_object); 1608 if (nvlist_add_string(nv, 1609 ZPOOL_ERR_OBJECT, buf) != 0) 1610 goto nomem; 1611 (void) snprintf(buf, sizeof (buf), 1612 "lvl=%u blkid=%llu", 1613 (int)zb[i].zb_level, 1614 (long long)zb[i].zb_blkid); 1615 if (nvlist_add_string(nv, 1616 ZPOOL_ERR_RANGE, buf) != 0) 1617 goto nomem; 1618 } 1619 } else { 1620 if (zcmd_read_dst_nvlist(hdl, &zc, 1621 &zhp->zpool_error_log[j]) != 0) { 1622 zcmd_free_nvlists(&zc); 1623 goto nomem; 1624 } 1625 } 1626 1627 break; 1628 } 1629 1630 zcmd_free_nvlists(&zc); 1631 1632 j++; 1633 } 1634 1635 *list = zhp->zpool_error_log; 1636 *nelem = zhp->zpool_error_count; 1637 free(zb); 1638 1639 return (0); 1640 1641 nomem: 1642 free(zb); 1643 free((void *)(uintptr_t)zc.zc_nvlist_dst); 1644 for (i = 0; i < zhp->zpool_error_count; i++) 1645 nvlist_free(zhp->zpool_error_log[i]); 1646 free(zhp->zpool_error_log); 1647 zhp->zpool_error_log = NULL; 1648 return (no_memory(zhp->zpool_hdl)); 1649 } 1650 1651 /* 1652 * Upgrade a ZFS pool to the latest on-disk version. 1653 */ 1654 int 1655 zpool_upgrade(zpool_handle_t *zhp) 1656 { 1657 zfs_cmd_t zc = { 0 }; 1658 libzfs_handle_t *hdl = zhp->zpool_hdl; 1659 1660 (void) strcpy(zc.zc_name, zhp->zpool_name); 1661 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 1662 return (zpool_standard_error(hdl, errno, 1663 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 1664 zhp->zpool_name)); 1665 1666 return (0); 1667 } 1668 1669 /* 1670 * Log command history. 1671 * 1672 * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE 1673 * otherwise ('zfs'). 'pool_create' is B_TRUE if we are logging the creation 1674 * of the pool; B_FALSE otherwise. 'path' is the pathanme containing the 1675 * poolname. 'argc' and 'argv' are used to construct the command string. 1676 */ 1677 void 1678 zpool_log_history(libzfs_handle_t *hdl, int argc, char **argv, const char *path, 1679 boolean_t pool, boolean_t pool_create) 1680 { 1681 char cmd_buf[HIS_MAX_RECORD_LEN]; 1682 char *dspath; 1683 zfs_cmd_t zc = { 0 }; 1684 int i; 1685 1686 /* construct the command string */ 1687 (void) strcpy(cmd_buf, pool ? "zpool" : "zfs"); 1688 for (i = 0; i < argc; i++) { 1689 if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN) 1690 break; 1691 (void) strcat(cmd_buf, " "); 1692 (void) strcat(cmd_buf, argv[i]); 1693 } 1694 1695 /* figure out the poolname */ 1696 dspath = strpbrk(path, "/@"); 1697 if (dspath == NULL) { 1698 (void) strcpy(zc.zc_name, path); 1699 } else { 1700 (void) strncpy(zc.zc_name, path, dspath - path); 1701 zc.zc_name[dspath-path] = '\0'; 1702 } 1703 1704 zc.zc_history = (uint64_t)(uintptr_t)cmd_buf; 1705 zc.zc_history_len = strlen(cmd_buf); 1706 1707 /* overloading zc_history_offset */ 1708 zc.zc_history_offset = pool_create; 1709 1710 (void) ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_LOG_HISTORY, &zc); 1711 } 1712 1713 /* 1714 * Perform ioctl to get some command history of a pool. 1715 * 1716 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 1717 * logical offset of the history buffer to start reading from. 1718 * 1719 * Upon return, 'off' is the next logical offset to read from and 1720 * 'len' is the actual amount of bytes read into 'buf'. 1721 */ 1722 static int 1723 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 1724 { 1725 zfs_cmd_t zc = { 0 }; 1726 libzfs_handle_t *hdl = zhp->zpool_hdl; 1727 1728 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1729 1730 zc.zc_history = (uint64_t)(uintptr_t)buf; 1731 zc.zc_history_len = *len; 1732 zc.zc_history_offset = *off; 1733 1734 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 1735 switch (errno) { 1736 case EPERM: 1737 return (zfs_error(hdl, EZFS_PERM, dgettext(TEXT_DOMAIN, 1738 "cannot show history for pool '%s'"), 1739 zhp->zpool_name)); 1740 case ENOENT: 1741 return (zfs_error(hdl, EZFS_NOHISTORY, 1742 dgettext(TEXT_DOMAIN, "cannot get history for pool " 1743 "'%s'"), zhp->zpool_name)); 1744 default: 1745 return (zpool_standard_error(hdl, errno, 1746 dgettext(TEXT_DOMAIN, 1747 "cannot get history for '%s'"), zhp->zpool_name)); 1748 } 1749 } 1750 1751 *len = zc.zc_history_len; 1752 *off = zc.zc_history_offset; 1753 1754 return (0); 1755 } 1756 1757 /* 1758 * Process the buffer of nvlists, unpacking and storing each nvlist record 1759 * into 'records'. 'leftover' is set to the number of bytes that weren't 1760 * processed as there wasn't a complete record. 1761 */ 1762 static int 1763 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 1764 nvlist_t ***records, uint_t *numrecords) 1765 { 1766 uint64_t reclen; 1767 nvlist_t *nv; 1768 int i; 1769 1770 while (bytes_read > sizeof (reclen)) { 1771 1772 /* get length of packed record (stored as little endian) */ 1773 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 1774 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 1775 1776 if (bytes_read < sizeof (reclen) + reclen) 1777 break; 1778 1779 /* unpack record */ 1780 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 1781 return (ENOMEM); 1782 bytes_read -= sizeof (reclen) + reclen; 1783 buf += sizeof (reclen) + reclen; 1784 1785 /* add record to nvlist array */ 1786 (*numrecords)++; 1787 if (ISP2(*numrecords + 1)) { 1788 *records = realloc(*records, 1789 *numrecords * 2 * sizeof (nvlist_t *)); 1790 } 1791 (*records)[*numrecords - 1] = nv; 1792 } 1793 1794 *leftover = bytes_read; 1795 return (0); 1796 } 1797 1798 #define HIS_BUF_LEN (128*1024) 1799 1800 /* 1801 * Retrieve the command history of a pool. 1802 */ 1803 int 1804 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 1805 { 1806 char buf[HIS_BUF_LEN]; 1807 uint64_t off = 0; 1808 nvlist_t **records = NULL; 1809 uint_t numrecords = 0; 1810 int err, i; 1811 1812 do { 1813 uint64_t bytes_read = sizeof (buf); 1814 uint64_t leftover; 1815 1816 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 1817 break; 1818 1819 /* if nothing else was read in, we're at EOF, just return */ 1820 if (!bytes_read) 1821 break; 1822 1823 if ((err = zpool_history_unpack(buf, bytes_read, 1824 &leftover, &records, &numrecords)) != 0) 1825 break; 1826 off -= leftover; 1827 1828 /* CONSTCOND */ 1829 } while (1); 1830 1831 if (!err) { 1832 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 1833 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 1834 records, numrecords) == 0); 1835 } 1836 for (i = 0; i < numrecords; i++) 1837 nvlist_free(records[i]); 1838 free(records); 1839 1840 return (err); 1841 } 1842