1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <assert.h> 30 #include <ctype.h> 31 #include <errno.h> 32 #include <devid.h> 33 #include <fcntl.h> 34 #include <libintl.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <unistd.h> 39 #include <sys/zfs_ioctl.h> 40 #include <sys/zio.h> 41 42 #include "zfs_namecheck.h" 43 #include "libzfs_impl.h" 44 45 /* 46 * Validate the given pool name, optionally putting an extended error message in 47 * 'buf'. 48 */ 49 static boolean_t 50 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 51 { 52 namecheck_err_t why; 53 char what; 54 int ret; 55 56 ret = pool_namecheck(pool, &why, &what); 57 58 /* 59 * The rules for reserved pool names were extended at a later point. 60 * But we need to support users with existing pools that may now be 61 * invalid. So we only check for this expanded set of names during a 62 * create (or import), and only in userland. 63 */ 64 if (ret == 0 && !isopen && 65 (strncmp(pool, "mirror", 6) == 0 || 66 strncmp(pool, "raidz", 5) == 0 || 67 strncmp(pool, "spare", 5) == 0)) { 68 zfs_error_aux(hdl, 69 dgettext(TEXT_DOMAIN, "name is reserved")); 70 return (B_FALSE); 71 } 72 73 74 if (ret != 0) { 75 if (hdl != NULL) { 76 switch (why) { 77 case NAME_ERR_TOOLONG: 78 zfs_error_aux(hdl, 79 dgettext(TEXT_DOMAIN, "name is too long")); 80 break; 81 82 case NAME_ERR_INVALCHAR: 83 zfs_error_aux(hdl, 84 dgettext(TEXT_DOMAIN, "invalid character " 85 "'%c' in pool name"), what); 86 break; 87 88 case NAME_ERR_NOLETTER: 89 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 90 "name must begin with a letter")); 91 break; 92 93 case NAME_ERR_RESERVED: 94 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 95 "name is reserved")); 96 break; 97 98 case NAME_ERR_DISKLIKE: 99 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 100 "pool name is reserved")); 101 break; 102 } 103 } 104 return (B_FALSE); 105 } 106 107 return (B_TRUE); 108 } 109 110 /* 111 * Set the pool-wide health based on the vdev state of the root vdev. 112 */ 113 int 114 set_pool_health(nvlist_t *config) 115 { 116 nvlist_t *nvroot; 117 vdev_stat_t *vs; 118 uint_t vsc; 119 char *health; 120 121 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 122 &nvroot) == 0); 123 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 124 (uint64_t **)&vs, &vsc) == 0); 125 126 switch (vs->vs_state) { 127 128 case VDEV_STATE_CLOSED: 129 case VDEV_STATE_CANT_OPEN: 130 case VDEV_STATE_OFFLINE: 131 health = dgettext(TEXT_DOMAIN, "FAULTED"); 132 break; 133 134 case VDEV_STATE_DEGRADED: 135 health = dgettext(TEXT_DOMAIN, "DEGRADED"); 136 break; 137 138 case VDEV_STATE_HEALTHY: 139 health = dgettext(TEXT_DOMAIN, "ONLINE"); 140 break; 141 142 default: 143 abort(); 144 } 145 146 return (nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, health)); 147 } 148 149 /* 150 * Open a handle to the given pool, even if the pool is currently in the FAULTED 151 * state. 152 */ 153 zpool_handle_t * 154 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 155 { 156 zpool_handle_t *zhp; 157 158 /* 159 * Make sure the pool name is valid. 160 */ 161 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 162 (void) zfs_error(hdl, EZFS_INVALIDNAME, 163 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 164 pool); 165 return (NULL); 166 } 167 168 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 169 return (NULL); 170 171 zhp->zpool_hdl = hdl; 172 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 173 174 if (zpool_refresh_stats(zhp) != 0) { 175 if (errno == ENOENT || errno == EINVAL) { 176 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 177 "no such pool")); 178 (void) zfs_error(hdl, EZFS_NOENT, 179 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 180 pool); 181 free(zhp); 182 return (NULL); 183 } else { 184 zhp->zpool_state = POOL_STATE_UNAVAIL; 185 } 186 } else { 187 zhp->zpool_state = POOL_STATE_ACTIVE; 188 } 189 190 return (zhp); 191 } 192 193 /* 194 * Like the above, but silent on error. Used when iterating over pools (because 195 * the configuration cache may be out of date). 196 */ 197 zpool_handle_t * 198 zpool_open_silent(libzfs_handle_t *hdl, const char *pool) 199 { 200 zpool_handle_t *zhp; 201 202 if ((zhp = calloc(sizeof (zpool_handle_t), 1)) == NULL) 203 return (NULL); 204 205 zhp->zpool_hdl = hdl; 206 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 207 208 if (zpool_refresh_stats(zhp) != 0) { 209 if (errno == ENOENT || errno == EINVAL) { 210 free(zhp); 211 return (NULL); 212 } else { 213 zhp->zpool_state = POOL_STATE_UNAVAIL; 214 } 215 } else { 216 zhp->zpool_state = POOL_STATE_ACTIVE; 217 } 218 219 return (zhp); 220 } 221 222 /* 223 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 224 * state. 225 */ 226 zpool_handle_t * 227 zpool_open(libzfs_handle_t *hdl, const char *pool) 228 { 229 zpool_handle_t *zhp; 230 231 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 232 return (NULL); 233 234 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 235 (void) zfs_error(hdl, EZFS_POOLUNAVAIL, 236 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 237 zpool_close(zhp); 238 return (NULL); 239 } 240 241 return (zhp); 242 } 243 244 /* 245 * Close the handle. Simply frees the memory associated with the handle. 246 */ 247 void 248 zpool_close(zpool_handle_t *zhp) 249 { 250 if (zhp->zpool_config) 251 nvlist_free(zhp->zpool_config); 252 if (zhp->zpool_old_config) 253 nvlist_free(zhp->zpool_old_config); 254 if (zhp->zpool_error_log) { 255 int i; 256 for (i = 0; i < zhp->zpool_error_count; i++) 257 nvlist_free(zhp->zpool_error_log[i]); 258 free(zhp->zpool_error_log); 259 } 260 free(zhp); 261 } 262 263 /* 264 * Return the name of the pool. 265 */ 266 const char * 267 zpool_get_name(zpool_handle_t *zhp) 268 { 269 return (zhp->zpool_name); 270 } 271 272 /* 273 * Return the GUID of the pool. 274 */ 275 uint64_t 276 zpool_get_guid(zpool_handle_t *zhp) 277 { 278 uint64_t guid; 279 280 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, 281 &guid) == 0); 282 return (guid); 283 } 284 285 /* 286 * Return the version of the pool. 287 */ 288 uint64_t 289 zpool_get_version(zpool_handle_t *zhp) 290 { 291 uint64_t version; 292 293 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION, 294 &version) == 0); 295 296 return (version); 297 } 298 299 /* 300 * Return the amount of space currently consumed by the pool. 301 */ 302 uint64_t 303 zpool_get_space_used(zpool_handle_t *zhp) 304 { 305 nvlist_t *nvroot; 306 vdev_stat_t *vs; 307 uint_t vsc; 308 309 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 310 &nvroot) == 0); 311 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 312 (uint64_t **)&vs, &vsc) == 0); 313 314 return (vs->vs_alloc); 315 } 316 317 /* 318 * Return the total space in the pool. 319 */ 320 uint64_t 321 zpool_get_space_total(zpool_handle_t *zhp) 322 { 323 nvlist_t *nvroot; 324 vdev_stat_t *vs; 325 uint_t vsc; 326 327 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 328 &nvroot) == 0); 329 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 330 (uint64_t **)&vs, &vsc) == 0); 331 332 return (vs->vs_space); 333 } 334 335 /* 336 * Return the alternate root for this pool, if any. 337 */ 338 int 339 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen) 340 { 341 zfs_cmd_t zc = { 0 }; 342 343 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 344 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 || 345 zc.zc_root[0] == '\0') 346 return (-1); 347 348 (void) strlcpy(buf, zc.zc_root, buflen); 349 350 return (0); 351 } 352 353 /* 354 * Return the state of the pool (ACTIVE or UNAVAILABLE) 355 */ 356 int 357 zpool_get_state(zpool_handle_t *zhp) 358 { 359 return (zhp->zpool_state); 360 } 361 362 /* 363 * Create the named pool, using the provided vdev list. It is assumed 364 * that the consumer has already validated the contents of the nvlist, so we 365 * don't have to worry about error semantics. 366 */ 367 int 368 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 369 const char *altroot) 370 { 371 zfs_cmd_t zc = { 0 }; 372 char *packed; 373 size_t len; 374 char msg[1024]; 375 376 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 377 "cannot create '%s'"), pool); 378 379 if (!zpool_name_valid(hdl, B_FALSE, pool)) 380 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 381 382 if (altroot != NULL && altroot[0] != '/') 383 return (zfs_error(hdl, EZFS_BADPATH, 384 dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot)); 385 386 if (nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) != 0) 387 return (no_memory(hdl)); 388 389 if ((packed = zfs_alloc(hdl, len)) == NULL) 390 return (-1); 391 392 if (nvlist_pack(nvroot, &packed, &len, 393 NV_ENCODE_NATIVE, 0) != 0) { 394 free(packed); 395 return (no_memory(hdl)); 396 } 397 398 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 399 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 400 zc.zc_config_src_size = len; 401 402 if (altroot != NULL) 403 (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); 404 405 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) { 406 free(packed); 407 408 switch (errno) { 409 case EBUSY: 410 /* 411 * This can happen if the user has specified the same 412 * device multiple times. We can't reliably detect this 413 * until we try to add it and see we already have a 414 * label. 415 */ 416 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 417 "one or more vdevs refer to the same device")); 418 return (zfs_error(hdl, EZFS_BADDEV, msg)); 419 420 case EOVERFLOW: 421 /* 422 * This occurs when one of the devices is below 423 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 424 * device was the problem device since there's no 425 * reliable way to determine device size from userland. 426 */ 427 { 428 char buf[64]; 429 430 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 431 432 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 433 "one or more devices is less than the " 434 "minimum size (%s)"), buf); 435 } 436 return (zfs_error(hdl, EZFS_BADDEV, msg)); 437 438 case ENOSPC: 439 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 440 "one or more devices is out of space")); 441 return (zfs_error(hdl, EZFS_BADDEV, msg)); 442 443 default: 444 return (zpool_standard_error(hdl, errno, msg)); 445 } 446 } 447 448 free(packed); 449 450 /* 451 * If this is an alternate root pool, then we automatically set the 452 * moutnpoint of the root dataset to be '/'. 453 */ 454 if (altroot != NULL) { 455 zfs_handle_t *zhp; 456 457 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL); 458 verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0); 459 460 zfs_close(zhp); 461 } 462 463 return (0); 464 } 465 466 /* 467 * Destroy the given pool. It is up to the caller to ensure that there are no 468 * datasets left in the pool. 469 */ 470 int 471 zpool_destroy(zpool_handle_t *zhp) 472 { 473 zfs_cmd_t zc = { 0 }; 474 zfs_handle_t *zfp = NULL; 475 libzfs_handle_t *hdl = zhp->zpool_hdl; 476 char msg[1024]; 477 478 if (zhp->zpool_state == POOL_STATE_ACTIVE && 479 (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, 480 ZFS_TYPE_FILESYSTEM)) == NULL) 481 return (-1); 482 483 if (zpool_remove_zvol_links(zhp) != NULL) 484 return (-1); 485 486 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 487 488 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 489 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 490 "cannot destroy '%s'"), zhp->zpool_name); 491 492 if (errno == EROFS) { 493 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 494 "one or more devices is read only")); 495 (void) zfs_error(hdl, EZFS_BADDEV, msg); 496 } else { 497 (void) zpool_standard_error(hdl, errno, msg); 498 } 499 500 if (zfp) 501 zfs_close(zfp); 502 return (-1); 503 } 504 505 if (zfp) { 506 remove_mountpoint(zfp); 507 zfs_close(zfp); 508 } 509 510 return (0); 511 } 512 513 /* 514 * Add the given vdevs to the pool. The caller must have already performed the 515 * necessary verification to ensure that the vdev specification is well-formed. 516 */ 517 int 518 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 519 { 520 char *packed; 521 size_t len; 522 zfs_cmd_t zc; 523 int ret; 524 libzfs_handle_t *hdl = zhp->zpool_hdl; 525 char msg[1024]; 526 nvlist_t **spares; 527 uint_t nspares; 528 529 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 530 "cannot add to '%s'"), zhp->zpool_name); 531 532 if (zpool_get_version(zhp) < ZFS_VERSION_SPARES && 533 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 534 &spares, &nspares) == 0) { 535 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 536 "upgraded to add hot spares")); 537 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 538 } 539 540 verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); 541 542 if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL) 543 return (-1); 544 545 verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 546 547 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 548 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 549 zc.zc_config_src_size = len; 550 551 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) { 552 switch (errno) { 553 case EBUSY: 554 /* 555 * This can happen if the user has specified the same 556 * device multiple times. We can't reliably detect this 557 * until we try to add it and see we already have a 558 * label. 559 */ 560 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 561 "one or more vdevs refer to the same device")); 562 (void) zfs_error(hdl, EZFS_BADDEV, msg); 563 break; 564 565 case EOVERFLOW: 566 /* 567 * This occurrs when one of the devices is below 568 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 569 * device was the problem device since there's no 570 * reliable way to determine device size from userland. 571 */ 572 { 573 char buf[64]; 574 575 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 576 577 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 578 "device is less than the minimum " 579 "size (%s)"), buf); 580 } 581 (void) zfs_error(hdl, EZFS_BADDEV, msg); 582 break; 583 584 case ENOTSUP: 585 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 586 "pool must be upgraded to add raidz2 vdevs")); 587 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 588 break; 589 590 default: 591 (void) zpool_standard_error(hdl, errno, msg); 592 } 593 594 ret = -1; 595 } else { 596 ret = 0; 597 } 598 599 free(packed); 600 601 return (ret); 602 } 603 604 /* 605 * Exports the pool from the system. The caller must ensure that there are no 606 * mounted datasets in the pool. 607 */ 608 int 609 zpool_export(zpool_handle_t *zhp) 610 { 611 zfs_cmd_t zc = { 0 }; 612 613 if (zpool_remove_zvol_links(zhp) != 0) 614 return (-1); 615 616 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 617 618 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0) 619 return (zpool_standard_error(zhp->zpool_hdl, errno, 620 dgettext(TEXT_DOMAIN, "cannot export '%s'"), 621 zhp->zpool_name)); 622 623 return (0); 624 } 625 626 /* 627 * Import the given pool using the known configuration. The configuration 628 * should have come from zpool_find_import(). The 'newname' and 'altroot' 629 * parameters control whether the pool is imported with a different name or with 630 * an alternate root, respectively. 631 */ 632 int 633 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 634 const char *altroot) 635 { 636 zfs_cmd_t zc; 637 char *packed; 638 size_t len; 639 char *thename; 640 char *origname; 641 int ret; 642 643 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 644 &origname) == 0); 645 646 if (newname != NULL) { 647 if (!zpool_name_valid(hdl, B_FALSE, newname)) 648 return (zfs_error(hdl, EZFS_INVALIDNAME, 649 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 650 newname)); 651 thename = (char *)newname; 652 } else { 653 thename = origname; 654 } 655 656 if (altroot != NULL && altroot[0] != '/') 657 return (zfs_error(hdl, EZFS_BADPATH, 658 dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), 659 altroot)); 660 661 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 662 663 if (altroot != NULL) 664 (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); 665 else 666 zc.zc_root[0] = '\0'; 667 668 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 669 &zc.zc_guid) == 0); 670 671 verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0); 672 673 if ((packed = zfs_alloc(hdl, len)) == NULL) 674 return (-1); 675 676 verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 677 678 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 679 zc.zc_config_src_size = len; 680 681 ret = 0; 682 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) { 683 char desc[1024]; 684 if (newname == NULL) 685 (void) snprintf(desc, sizeof (desc), 686 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 687 thename); 688 else 689 (void) snprintf(desc, sizeof (desc), 690 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 691 origname, thename); 692 693 switch (errno) { 694 case ENOTSUP: 695 /* 696 * Unsupported version. 697 */ 698 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 699 break; 700 701 default: 702 (void) zpool_standard_error(hdl, errno, desc); 703 } 704 705 ret = -1; 706 } else { 707 zpool_handle_t *zhp; 708 /* 709 * This should never fail, but play it safe anyway. 710 */ 711 if ((zhp = zpool_open_silent(hdl, thename)) != NULL) { 712 ret = zpool_create_zvol_links(zhp); 713 zpool_close(zhp); 714 } 715 } 716 717 free(packed); 718 return (ret); 719 } 720 721 /* 722 * Scrub the pool. 723 */ 724 int 725 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 726 { 727 zfs_cmd_t zc = { 0 }; 728 char msg[1024]; 729 libzfs_handle_t *hdl = zhp->zpool_hdl; 730 731 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 732 zc.zc_cookie = type; 733 734 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0) 735 return (0); 736 737 (void) snprintf(msg, sizeof (msg), 738 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 739 740 if (errno == EBUSY) 741 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 742 else 743 return (zpool_standard_error(hdl, errno, msg)); 744 } 745 746 static nvlist_t * 747 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid, 748 boolean_t *isspare) 749 { 750 uint_t c, children; 751 nvlist_t **child; 752 uint64_t theguid, present; 753 char *path; 754 uint64_t wholedisk = 0; 755 nvlist_t *ret; 756 757 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0); 758 759 if (search == NULL && 760 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) { 761 /* 762 * If the device has never been present since import, the only 763 * reliable way to match the vdev is by GUID. 764 */ 765 if (theguid == guid) 766 return (nv); 767 } else if (search != NULL && 768 nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 769 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 770 &wholedisk); 771 if (wholedisk) { 772 /* 773 * For whole disks, the internal path has 's0', but the 774 * path passed in by the user doesn't. 775 */ 776 if (strlen(search) == strlen(path) - 2 && 777 strncmp(search, path, strlen(search)) == 0) 778 return (nv); 779 } else if (strcmp(search, path) == 0) { 780 return (nv); 781 } 782 } 783 784 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 785 &child, &children) != 0) 786 return (NULL); 787 788 for (c = 0; c < children; c++) 789 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 790 isspare)) != NULL) 791 return (ret); 792 793 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 794 &child, &children) == 0) { 795 for (c = 0; c < children; c++) { 796 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 797 isspare)) != NULL) { 798 *isspare = B_TRUE; 799 return (ret); 800 } 801 } 802 } 803 804 return (NULL); 805 } 806 807 nvlist_t * 808 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *isspare) 809 { 810 char buf[MAXPATHLEN]; 811 const char *search; 812 char *end; 813 nvlist_t *nvroot; 814 uint64_t guid; 815 816 guid = strtoull(path, &end, 10); 817 if (guid != 0 && *end == '\0') { 818 search = NULL; 819 } else if (path[0] != '/') { 820 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 821 search = buf; 822 } else { 823 search = path; 824 } 825 826 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 827 &nvroot) == 0); 828 829 *isspare = B_FALSE; 830 return (vdev_to_nvlist_iter(nvroot, search, guid, isspare)); 831 } 832 833 /* 834 * Bring the specified vdev online 835 */ 836 int 837 zpool_vdev_online(zpool_handle_t *zhp, const char *path) 838 { 839 zfs_cmd_t zc = { 0 }; 840 char msg[1024]; 841 nvlist_t *tgt; 842 boolean_t isspare; 843 libzfs_handle_t *hdl = zhp->zpool_hdl; 844 845 (void) snprintf(msg, sizeof (msg), 846 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 847 848 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 849 if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == NULL) 850 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 851 852 if (isspare) 853 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 854 855 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 856 857 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0) 858 return (0); 859 860 return (zpool_standard_error(hdl, errno, msg)); 861 } 862 863 /* 864 * Take the specified vdev offline 865 */ 866 int 867 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp) 868 { 869 zfs_cmd_t zc = { 0 }; 870 char msg[1024]; 871 nvlist_t *tgt; 872 boolean_t isspare; 873 libzfs_handle_t *hdl = zhp->zpool_hdl; 874 875 (void) snprintf(msg, sizeof (msg), 876 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 877 878 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 879 if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == NULL) 880 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 881 882 if (isspare) 883 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 884 885 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 886 887 zc.zc_cookie = istmp; 888 889 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0) 890 return (0); 891 892 switch (errno) { 893 case EBUSY: 894 895 /* 896 * There are no other replicas of this device. 897 */ 898 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 899 900 default: 901 return (zpool_standard_error(hdl, errno, msg)); 902 } 903 } 904 905 /* 906 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 907 * a hot spare. 908 */ 909 static boolean_t 910 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 911 { 912 nvlist_t **child; 913 uint_t c, children; 914 char *type; 915 916 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 917 &children) == 0) { 918 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 919 &type) == 0); 920 921 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 922 children == 2 && child[which] == tgt) 923 return (B_TRUE); 924 925 for (c = 0; c < children; c++) 926 if (is_replacing_spare(child[c], tgt, which)) 927 return (B_TRUE); 928 } 929 930 return (B_FALSE); 931 } 932 933 /* 934 * Attach new_disk (fully described by nvroot) to old_disk. 935 * If 'replacing' is specified, tne new disk will replace the old one. 936 */ 937 int 938 zpool_vdev_attach(zpool_handle_t *zhp, 939 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 940 { 941 zfs_cmd_t zc = { 0 }; 942 char msg[1024]; 943 char *packed; 944 int ret; 945 size_t len; 946 nvlist_t *tgt; 947 boolean_t isspare; 948 uint64_t val; 949 char *path; 950 nvlist_t **child; 951 uint_t children; 952 nvlist_t *config_root; 953 libzfs_handle_t *hdl = zhp->zpool_hdl; 954 955 if (replacing) 956 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 957 "cannot replace %s with %s"), old_disk, new_disk); 958 else 959 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 960 "cannot attach %s to %s"), new_disk, old_disk); 961 962 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 963 if ((tgt = zpool_find_vdev(zhp, old_disk, &isspare)) == 0) 964 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 965 966 if (isspare) 967 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 968 969 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 970 zc.zc_cookie = replacing; 971 972 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 973 &child, &children) != 0 || children != 1) { 974 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 975 "new device must be a single disk")); 976 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 977 } 978 979 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 980 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 981 982 /* 983 * If the target is a hot spare that has been swapped in, we can only 984 * replace it with another hot spare. 985 */ 986 if (replacing && 987 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 988 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && 989 (zpool_find_vdev(zhp, path, &isspare) == NULL || !isspare) && 990 is_replacing_spare(config_root, tgt, 1)) { 991 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 992 "can only be replaced by another hot spare")); 993 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 994 } 995 996 /* 997 * If we are attempting to replace a spare, it canot be applied to an 998 * already spared device. 999 */ 1000 if (replacing && 1001 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && 1002 zpool_find_vdev(zhp, path, &isspare) != NULL && isspare && 1003 is_replacing_spare(config_root, tgt, 0)) { 1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1005 "device has already been replaced with a spare")); 1006 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1007 } 1008 1009 verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); 1010 1011 if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL) 1012 return (-1); 1013 1014 verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 1015 1016 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 1017 zc.zc_config_src_size = len; 1018 1019 ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc); 1020 1021 free(packed); 1022 1023 if (ret == 0) 1024 return (0); 1025 1026 switch (errno) { 1027 case ENOTSUP: 1028 /* 1029 * Can't attach to or replace this type of vdev. 1030 */ 1031 if (replacing) 1032 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1033 "cannot replace a replacing device")); 1034 else 1035 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1036 "can only attach to mirrors and top-level " 1037 "disks")); 1038 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 1039 break; 1040 1041 case EINVAL: 1042 /* 1043 * The new device must be a single disk. 1044 */ 1045 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1046 "new device must be a single disk")); 1047 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 1048 break; 1049 1050 case EBUSY: 1051 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 1052 new_disk); 1053 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1054 break; 1055 1056 case EOVERFLOW: 1057 /* 1058 * The new device is too small. 1059 */ 1060 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1061 "device is too small")); 1062 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1063 break; 1064 1065 case EDOM: 1066 /* 1067 * The new device has a different alignment requirement. 1068 */ 1069 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1070 "devices have different sector alignment")); 1071 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1072 break; 1073 1074 case ENAMETOOLONG: 1075 /* 1076 * The resulting top-level vdev spec won't fit in the label. 1077 */ 1078 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 1079 break; 1080 1081 default: 1082 (void) zpool_standard_error(hdl, errno, msg); 1083 } 1084 1085 return (-1); 1086 } 1087 1088 /* 1089 * Detach the specified device. 1090 */ 1091 int 1092 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 1093 { 1094 zfs_cmd_t zc = { 0 }; 1095 char msg[1024]; 1096 nvlist_t *tgt; 1097 boolean_t isspare; 1098 libzfs_handle_t *hdl = zhp->zpool_hdl; 1099 1100 (void) snprintf(msg, sizeof (msg), 1101 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 1102 1103 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1104 if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == 0) 1105 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1106 1107 if (isspare) 1108 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1109 1110 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1111 1112 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0) 1113 return (0); 1114 1115 switch (errno) { 1116 1117 case ENOTSUP: 1118 /* 1119 * Can't detach from this type of vdev. 1120 */ 1121 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 1122 "applicable to mirror and replacing vdevs")); 1123 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg); 1124 break; 1125 1126 case EBUSY: 1127 /* 1128 * There are no other replicas of this device. 1129 */ 1130 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 1131 break; 1132 1133 default: 1134 (void) zpool_standard_error(hdl, errno, msg); 1135 } 1136 1137 return (-1); 1138 } 1139 1140 /* 1141 * Remove the given device. Currently, this is supported only for hot spares. 1142 */ 1143 int 1144 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 1145 { 1146 zfs_cmd_t zc = { 0 }; 1147 char msg[1024]; 1148 nvlist_t *tgt; 1149 boolean_t isspare; 1150 libzfs_handle_t *hdl = zhp->zpool_hdl; 1151 1152 (void) snprintf(msg, sizeof (msg), 1153 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 1154 1155 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1156 if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == 0) 1157 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1158 1159 if (!isspare) { 1160 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1161 "only hot spares can be removed")); 1162 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1163 } 1164 1165 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1166 1167 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 1168 return (0); 1169 1170 return (zpool_standard_error(hdl, errno, msg)); 1171 } 1172 1173 /* 1174 * Clear the errors for the pool, or the particular device if specified. 1175 */ 1176 int 1177 zpool_clear(zpool_handle_t *zhp, const char *path) 1178 { 1179 zfs_cmd_t zc = { 0 }; 1180 char msg[1024]; 1181 nvlist_t *tgt; 1182 boolean_t isspare; 1183 libzfs_handle_t *hdl = zhp->zpool_hdl; 1184 1185 if (path) 1186 (void) snprintf(msg, sizeof (msg), 1187 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 1188 zc.zc_prop_value); 1189 else 1190 (void) snprintf(msg, sizeof (msg), 1191 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 1192 zhp->zpool_name); 1193 1194 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1195 if (path) { 1196 if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == 0) 1197 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1198 1199 if (isspare) 1200 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1201 1202 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 1203 &zc.zc_guid) == 0); 1204 } 1205 1206 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 1207 return (0); 1208 1209 return (zpool_standard_error(hdl, errno, msg)); 1210 } 1211 1212 static int 1213 do_zvol(zfs_handle_t *zhp, void *data) 1214 { 1215 int linktype = (int)(uintptr_t)data; 1216 int ret; 1217 1218 /* 1219 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we 1220 * correctly handle snapshots of volumes. 1221 */ 1222 if (zhp->zfs_volblocksize != 0) { 1223 if (linktype) 1224 ret = zvol_create_link(zhp->zfs_hdl, zhp->zfs_name); 1225 else 1226 ret = zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name); 1227 } 1228 1229 ret = zfs_iter_children(zhp, do_zvol, data); 1230 1231 zfs_close(zhp); 1232 return (ret); 1233 } 1234 1235 /* 1236 * Iterate over all zvols in the pool and make any necessary minor nodes. 1237 */ 1238 int 1239 zpool_create_zvol_links(zpool_handle_t *zhp) 1240 { 1241 zfs_handle_t *zfp; 1242 int ret; 1243 1244 /* 1245 * If the pool is unavailable, just return success. 1246 */ 1247 if ((zfp = make_dataset_handle(zhp->zpool_hdl, 1248 zhp->zpool_name)) == NULL) 1249 return (0); 1250 1251 ret = zfs_iter_children(zfp, do_zvol, (void *)B_TRUE); 1252 1253 zfs_close(zfp); 1254 return (ret); 1255 } 1256 1257 /* 1258 * Iterate over all zvols in the poool and remove any minor nodes. 1259 */ 1260 int 1261 zpool_remove_zvol_links(zpool_handle_t *zhp) 1262 { 1263 zfs_handle_t *zfp; 1264 int ret; 1265 1266 /* 1267 * If the pool is unavailable, just return success. 1268 */ 1269 if ((zfp = make_dataset_handle(zhp->zpool_hdl, 1270 zhp->zpool_name)) == NULL) 1271 return (0); 1272 1273 ret = zfs_iter_children(zfp, do_zvol, (void *)B_FALSE); 1274 1275 zfs_close(zfp); 1276 return (ret); 1277 } 1278 1279 /* 1280 * Convert from a devid string to a path. 1281 */ 1282 static char * 1283 devid_to_path(char *devid_str) 1284 { 1285 ddi_devid_t devid; 1286 char *minor; 1287 char *path; 1288 devid_nmlist_t *list = NULL; 1289 int ret; 1290 1291 if (devid_str_decode(devid_str, &devid, &minor) != 0) 1292 return (NULL); 1293 1294 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 1295 1296 devid_str_free(minor); 1297 devid_free(devid); 1298 1299 if (ret != 0) 1300 return (NULL); 1301 1302 if ((path = strdup(list[0].devname)) == NULL) 1303 return (NULL); 1304 1305 devid_free_nmlist(list); 1306 1307 return (path); 1308 } 1309 1310 /* 1311 * Convert from a path to a devid string. 1312 */ 1313 static char * 1314 path_to_devid(const char *path) 1315 { 1316 int fd; 1317 ddi_devid_t devid; 1318 char *minor, *ret; 1319 1320 if ((fd = open(path, O_RDONLY)) < 0) 1321 return (NULL); 1322 1323 minor = NULL; 1324 ret = NULL; 1325 if (devid_get(fd, &devid) == 0) { 1326 if (devid_get_minor_name(fd, &minor) == 0) 1327 ret = devid_str_encode(devid, minor); 1328 if (minor != NULL) 1329 devid_str_free(minor); 1330 devid_free(devid); 1331 } 1332 (void) close(fd); 1333 1334 return (ret); 1335 } 1336 1337 /* 1338 * Issue the necessary ioctl() to update the stored path value for the vdev. We 1339 * ignore any failure here, since a common case is for an unprivileged user to 1340 * type 'zpool status', and we'll display the correct information anyway. 1341 */ 1342 static void 1343 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 1344 { 1345 zfs_cmd_t zc = { 0 }; 1346 1347 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1348 (void) strncpy(zc.zc_prop_value, path, sizeof (zc.zc_prop_value)); 1349 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1350 &zc.zc_guid) == 0); 1351 1352 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 1353 } 1354 1355 /* 1356 * Given a vdev, return the name to display in iostat. If the vdev has a path, 1357 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 1358 * We also check if this is a whole disk, in which case we strip off the 1359 * trailing 's0' slice name. 1360 * 1361 * This routine is also responsible for identifying when disks have been 1362 * reconfigured in a new location. The kernel will have opened the device by 1363 * devid, but the path will still refer to the old location. To catch this, we 1364 * first do a path -> devid translation (which is fast for the common case). If 1365 * the devid matches, we're done. If not, we do a reverse devid -> path 1366 * translation and issue the appropriate ioctl() to update the path of the vdev. 1367 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 1368 * of these checks. 1369 */ 1370 char * 1371 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv) 1372 { 1373 char *path, *devid; 1374 uint64_t value; 1375 char buf[64]; 1376 1377 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1378 &value) == 0) { 1379 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1380 &value) == 0); 1381 (void) snprintf(buf, sizeof (buf), "%llu", value); 1382 path = buf; 1383 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 1384 1385 if (zhp != NULL && 1386 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 1387 /* 1388 * Determine if the current path is correct. 1389 */ 1390 char *newdevid = path_to_devid(path); 1391 1392 if (newdevid == NULL || 1393 strcmp(devid, newdevid) != 0) { 1394 char *newpath; 1395 1396 if ((newpath = devid_to_path(devid)) != NULL) { 1397 /* 1398 * Update the path appropriately. 1399 */ 1400 set_path(zhp, nv, newpath); 1401 if (nvlist_add_string(nv, 1402 ZPOOL_CONFIG_PATH, newpath) == 0) 1403 verify(nvlist_lookup_string(nv, 1404 ZPOOL_CONFIG_PATH, 1405 &path) == 0); 1406 free(newpath); 1407 } 1408 } 1409 1410 if (newdevid) 1411 devid_str_free(newdevid); 1412 } 1413 1414 if (strncmp(path, "/dev/dsk/", 9) == 0) 1415 path += 9; 1416 1417 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1418 &value) == 0 && value) { 1419 char *tmp = zfs_strdup(hdl, path); 1420 if (tmp == NULL) 1421 return (NULL); 1422 tmp[strlen(path) - 2] = '\0'; 1423 return (tmp); 1424 } 1425 } else { 1426 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 1427 1428 /* 1429 * If it's a raidz device, we need to stick in the parity level. 1430 */ 1431 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 1432 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 1433 &value) == 0); 1434 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 1435 value); 1436 path = buf; 1437 } 1438 } 1439 1440 return (zfs_strdup(hdl, path)); 1441 } 1442 1443 static int 1444 zbookmark_compare(const void *a, const void *b) 1445 { 1446 return (memcmp(a, b, sizeof (zbookmark_t))); 1447 } 1448 1449 /* 1450 * Retrieve the persistent error log, uniquify the members, and return to the 1451 * caller. 1452 */ 1453 int 1454 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem) 1455 { 1456 zfs_cmd_t zc = { 0 }; 1457 uint64_t count; 1458 zbookmark_t *zb; 1459 int i, j; 1460 1461 if (zhp->zpool_error_log != NULL) { 1462 *list = zhp->zpool_error_log; 1463 *nelem = zhp->zpool_error_count; 1464 return (0); 1465 } 1466 1467 /* 1468 * Retrieve the raw error list from the kernel. If the number of errors 1469 * has increased, allocate more space and continue until we get the 1470 * entire list. 1471 */ 1472 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 1473 &count) == 0); 1474 if ((zc.zc_config_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 1475 count * sizeof (zbookmark_t))) == NULL) 1476 return (-1); 1477 zc.zc_config_dst_size = count; 1478 (void) strcpy(zc.zc_name, zhp->zpool_name); 1479 for (;;) { 1480 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 1481 &zc) != 0) { 1482 free((void *)(uintptr_t)zc.zc_config_dst); 1483 if (errno == ENOMEM) { 1484 if ((zc.zc_config_dst = (uintptr_t) 1485 zfs_alloc(zhp->zpool_hdl, 1486 zc.zc_config_dst_size)) == NULL) 1487 return (-1); 1488 } else { 1489 return (-1); 1490 } 1491 } else { 1492 break; 1493 } 1494 } 1495 1496 /* 1497 * Sort the resulting bookmarks. This is a little confusing due to the 1498 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 1499 * to first, and 'zc_config_dst_size' indicates the number of boomarks 1500 * _not_ copied as part of the process. So we point the start of our 1501 * array appropriate and decrement the total number of elements. 1502 */ 1503 zb = ((zbookmark_t *)(uintptr_t)zc.zc_config_dst) + 1504 zc.zc_config_dst_size; 1505 count -= zc.zc_config_dst_size; 1506 1507 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 1508 1509 /* 1510 * Count the number of unique elements 1511 */ 1512 j = 0; 1513 for (i = 0; i < count; i++) { 1514 if (i > 0 && memcmp(&zb[i - 1], &zb[i], 1515 sizeof (zbookmark_t)) == 0) 1516 continue; 1517 j++; 1518 } 1519 1520 /* 1521 * If the user has only requested the number of items, return it now 1522 * without bothering with the extra work. 1523 */ 1524 if (list == NULL) { 1525 *nelem = j; 1526 free((void *)(uintptr_t)zc.zc_config_dst); 1527 return (0); 1528 } 1529 1530 zhp->zpool_error_count = j; 1531 1532 /* 1533 * Allocate an array of nvlists to hold the results 1534 */ 1535 if ((zhp->zpool_error_log = zfs_alloc(zhp->zpool_hdl, 1536 j * sizeof (nvlist_t *))) == NULL) { 1537 free((void *)(uintptr_t)zc.zc_config_dst); 1538 return (-1); 1539 } 1540 1541 /* 1542 * Fill in the results with names from the kernel. 1543 */ 1544 j = 0; 1545 for (i = 0; i < count; i++) { 1546 char buf[64]; 1547 nvlist_t *nv; 1548 1549 if (i > 0 && memcmp(&zb[i - 1], &zb[i], 1550 sizeof (zbookmark_t)) == 0) 1551 continue; 1552 1553 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 1554 0) != 0) 1555 goto nomem; 1556 zhp->zpool_error_log[j] = nv; 1557 1558 zc.zc_bookmark = zb[i]; 1559 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_BOOKMARK_NAME, 1560 &zc) == 0) { 1561 if (nvlist_add_string(nv, ZPOOL_ERR_DATASET, 1562 zc.zc_prop_name) != 0 || 1563 nvlist_add_string(nv, ZPOOL_ERR_OBJECT, 1564 zc.zc_prop_value) != 0 || 1565 nvlist_add_string(nv, ZPOOL_ERR_RANGE, 1566 zc.zc_filename) != 0) 1567 goto nomem; 1568 } else { 1569 (void) snprintf(buf, sizeof (buf), "%llx", 1570 zb[i].zb_objset); 1571 if (nvlist_add_string(nv, 1572 ZPOOL_ERR_DATASET, buf) != 0) 1573 goto nomem; 1574 (void) snprintf(buf, sizeof (buf), "%llx", 1575 zb[i].zb_object); 1576 if (nvlist_add_string(nv, ZPOOL_ERR_OBJECT, 1577 buf) != 0) 1578 goto nomem; 1579 (void) snprintf(buf, sizeof (buf), "lvl=%u blkid=%llu", 1580 (int)zb[i].zb_level, (long long)zb[i].zb_blkid); 1581 if (nvlist_add_string(nv, ZPOOL_ERR_RANGE, 1582 buf) != 0) 1583 goto nomem; 1584 } 1585 1586 j++; 1587 } 1588 1589 *list = zhp->zpool_error_log; 1590 *nelem = zhp->zpool_error_count; 1591 1592 free((void *)(uintptr_t)zc.zc_config_dst); 1593 1594 return (0); 1595 1596 nomem: 1597 free((void *)(uintptr_t)zc.zc_config_dst); 1598 for (i = 0; i < zhp->zpool_error_count; i++) { 1599 if (zhp->zpool_error_log[i]) 1600 free(zhp->zpool_error_log[i]); 1601 } 1602 free(zhp->zpool_error_log); 1603 zhp->zpool_error_log = NULL; 1604 return (no_memory(zhp->zpool_hdl)); 1605 } 1606 1607 /* 1608 * Upgrade a ZFS pool to the latest on-disk version. 1609 */ 1610 int 1611 zpool_upgrade(zpool_handle_t *zhp) 1612 { 1613 zfs_cmd_t zc = { 0 }; 1614 libzfs_handle_t *hdl = zhp->zpool_hdl; 1615 1616 (void) strcpy(zc.zc_name, zhp->zpool_name); 1617 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 1618 return (zpool_standard_error(hdl, errno, 1619 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 1620 zhp->zpool_name)); 1621 1622 return (0); 1623 } 1624