1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <assert.h> 29 #include <ctype.h> 30 #include <errno.h> 31 #include <devid.h> 32 #include <fcntl.h> 33 #include <libintl.h> 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <unistd.h> 38 #include <sys/zfs_ioctl.h> 39 #include <sys/zio.h> 40 41 #include "zfs_namecheck.h" 42 #include "libzfs_impl.h" 43 44 /* 45 * Validate the given pool name, optionally putting an extended error message in 46 * 'buf'. 47 */ 48 static int 49 zpool_name_valid(const char *pool, char *buf, size_t buflen) 50 { 51 namecheck_err_t why; 52 char what; 53 54 if (pool_namecheck(pool, &why, &what) != 0) { 55 if (buf != NULL) { 56 switch (why) { 57 case NAME_ERR_TOOLONG: 58 (void) snprintf(buf, buflen, 59 dgettext(TEXT_DOMAIN, "name is too long")); 60 break; 61 62 case NAME_ERR_INVALCHAR: 63 (void) snprintf(buf, buflen, 64 dgettext(TEXT_DOMAIN, "invalid character " 65 "'%c' in pool name"), what); 66 break; 67 68 case NAME_ERR_NOLETTER: 69 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 70 "name must begin with a letter"), buflen); 71 break; 72 73 case NAME_ERR_RESERVED: 74 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 75 "name is reserved\n" 76 "pool name may have been omitted"), buflen); 77 break; 78 79 case NAME_ERR_DISKLIKE: 80 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 81 "pool name is reserved\n" 82 "pool name may have been omitted"), buflen); 83 break; 84 } 85 } 86 return (FALSE); 87 } 88 89 return (TRUE); 90 } 91 92 /* 93 * Set the pool-wide health based on the vdev state of the root vdev. 94 */ 95 void 96 set_pool_health(nvlist_t *config) 97 { 98 nvlist_t *nvroot; 99 vdev_stat_t *vs; 100 uint_t vsc; 101 char *health; 102 103 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 104 &nvroot) == 0); 105 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 106 (uint64_t **)&vs, &vsc) == 0); 107 108 switch (vs->vs_state) { 109 110 case VDEV_STATE_CLOSED: 111 case VDEV_STATE_CANT_OPEN: 112 case VDEV_STATE_OFFLINE: 113 health = dgettext(TEXT_DOMAIN, "FAULTED"); 114 break; 115 116 case VDEV_STATE_DEGRADED: 117 health = dgettext(TEXT_DOMAIN, "DEGRADED"); 118 break; 119 120 case VDEV_STATE_HEALTHY: 121 health = dgettext(TEXT_DOMAIN, "ONLINE"); 122 break; 123 124 default: 125 zfs_baderror(vs->vs_state); 126 } 127 128 verify(nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, 129 health) == 0); 130 } 131 132 /* 133 * Open a handle to the given pool, even if the pool is currently in the FAULTED 134 * state. 135 */ 136 zpool_handle_t * 137 zpool_open_canfail(const char *pool) 138 { 139 zpool_handle_t *zhp; 140 int error; 141 142 /* 143 * Make sure the pool name is valid. 144 */ 145 if (!zpool_name_valid(pool, NULL, 0)) { 146 zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': invalid " 147 "pool name"), pool); 148 return (NULL); 149 } 150 151 zhp = zfs_malloc(sizeof (zpool_handle_t)); 152 153 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 154 155 if ((error = zpool_refresh_stats(zhp)) != 0) { 156 if (error == ENOENT || error == EINVAL) { 157 zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': no " 158 "such pool"), pool); 159 free(zhp); 160 return (NULL); 161 } else { 162 zhp->zpool_state = POOL_STATE_UNAVAIL; 163 } 164 } else { 165 zhp->zpool_state = POOL_STATE_ACTIVE; 166 } 167 168 return (zhp); 169 } 170 171 /* 172 * Like the above, but silent on error. Used when iterating over pools (because 173 * the configuration cache may be out of date). 174 */ 175 zpool_handle_t * 176 zpool_open_silent(const char *pool) 177 { 178 zpool_handle_t *zhp; 179 int error; 180 181 zhp = zfs_malloc(sizeof (zpool_handle_t)); 182 183 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 184 185 if ((error = zpool_refresh_stats(zhp)) != 0) { 186 if (error == ENOENT || error == EINVAL) { 187 free(zhp); 188 return (NULL); 189 } else { 190 zhp->zpool_state = POOL_STATE_UNAVAIL; 191 } 192 } else { 193 zhp->zpool_state = POOL_STATE_ACTIVE; 194 } 195 196 return (zhp); 197 } 198 199 /* 200 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 201 * state. 202 */ 203 zpool_handle_t * 204 zpool_open(const char *pool) 205 { 206 zpool_handle_t *zhp; 207 208 if ((zhp = zpool_open_canfail(pool)) == NULL) 209 return (NULL); 210 211 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 212 zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': pool is " 213 "currently unavailable"), zhp->zpool_name); 214 zfs_error(dgettext(TEXT_DOMAIN, "run 'zpool status %s' for " 215 "detailed information"), zhp->zpool_name); 216 zpool_close(zhp); 217 return (NULL); 218 } 219 220 return (zhp); 221 } 222 223 /* 224 * Close the handle. Simply frees the memory associated with the handle. 225 */ 226 void 227 zpool_close(zpool_handle_t *zhp) 228 { 229 if (zhp->zpool_config) 230 nvlist_free(zhp->zpool_config); 231 if (zhp->zpool_old_config) 232 nvlist_free(zhp->zpool_old_config); 233 if (zhp->zpool_error_log) { 234 int i; 235 for (i = 0; i < zhp->zpool_error_count; i++) 236 free(zhp->zpool_error_log[i]); 237 free(zhp->zpool_error_log); 238 } 239 free(zhp); 240 } 241 242 /* 243 * Return the name of the pool. 244 */ 245 const char * 246 zpool_get_name(zpool_handle_t *zhp) 247 { 248 return (zhp->zpool_name); 249 } 250 251 /* 252 * Return the GUID of the pool. 253 */ 254 uint64_t 255 zpool_get_guid(zpool_handle_t *zhp) 256 { 257 uint64_t guid; 258 259 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, 260 &guid) == 0); 261 return (guid); 262 } 263 264 /* 265 * Return the amount of space currently consumed by the pool. 266 */ 267 uint64_t 268 zpool_get_space_used(zpool_handle_t *zhp) 269 { 270 nvlist_t *nvroot; 271 vdev_stat_t *vs; 272 uint_t vsc; 273 274 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 275 &nvroot) == 0); 276 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 277 (uint64_t **)&vs, &vsc) == 0); 278 279 return (vs->vs_alloc); 280 } 281 282 /* 283 * Return the total space in the pool. 284 */ 285 uint64_t 286 zpool_get_space_total(zpool_handle_t *zhp) 287 { 288 nvlist_t *nvroot; 289 vdev_stat_t *vs; 290 uint_t vsc; 291 292 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 293 &nvroot) == 0); 294 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 295 (uint64_t **)&vs, &vsc) == 0); 296 297 return (vs->vs_space); 298 } 299 300 /* 301 * Return the alternate root for this pool, if any. 302 */ 303 int 304 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen) 305 { 306 zfs_cmd_t zc = { 0 }; 307 308 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 309 if (zfs_ioctl(ZFS_IOC_OBJSET_STATS, &zc) != 0 || 310 zc.zc_root[0] == '\0') 311 return (-1); 312 313 (void) strlcpy(buf, zc.zc_root, buflen); 314 315 return (0); 316 } 317 318 /* 319 * Return the state of the pool (ACTIVE or UNAVAILABLE) 320 */ 321 int 322 zpool_get_state(zpool_handle_t *zhp) 323 { 324 return (zhp->zpool_state); 325 } 326 327 /* 328 * Create the named pool, using the provided vdev list. It is assumed 329 * that the consumer has already validated the contents of the nvlist, so we 330 * don't have to worry about error semantics. 331 */ 332 int 333 zpool_create(const char *pool, nvlist_t *nvroot, const char *altroot) 334 { 335 zfs_cmd_t zc = { 0 }; 336 char *packed; 337 size_t len; 338 int err; 339 char reason[64]; 340 341 if (!zpool_name_valid(pool, reason, sizeof (reason))) { 342 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': %s"), 343 pool, reason); 344 return (-1); 345 } 346 347 if (altroot != NULL && altroot[0] != '/') { 348 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': alternate " 349 "root '%s' must be a complete path"), pool, altroot); 350 return (-1); 351 } 352 353 if ((err = nvlist_size(nvroot, &len, NV_ENCODE_NATIVE)) != 0) 354 zfs_baderror(err); 355 356 packed = zfs_malloc(len); 357 358 if ((err = nvlist_pack(nvroot, &packed, &len, 359 NV_ENCODE_NATIVE, 0)) != 0) 360 zfs_baderror(err); 361 362 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 363 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 364 zc.zc_config_src_size = len; 365 366 if (altroot != NULL) 367 (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); 368 369 if (zfs_ioctl(ZFS_IOC_POOL_CREATE, &zc) != 0) { 370 switch (errno) { 371 case EEXIST: 372 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 373 "pool exists"), pool); 374 break; 375 376 case EPERM: 377 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 378 "permission denied"), pool); 379 break; 380 381 case EBUSY: 382 /* 383 * This can happen if the user has specified the same 384 * device multiple times. We can't reliably detect this 385 * until we try to add it and see we already have a 386 * label. 387 */ 388 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 389 "one or more vdevs refer to the same device"), 390 pool); 391 break; 392 393 case EOVERFLOW: 394 /* 395 * This occurrs when one of the devices is below 396 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 397 * device was the problem device since there's no 398 * reliable way to determine device size from userland. 399 */ 400 { 401 char buf[64]; 402 403 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 404 405 zfs_error(dgettext(TEXT_DOMAIN, "cannot " 406 "create '%s': one or more devices is less " 407 "than the minimum size (%s)"), pool, 408 buf); 409 } 410 break; 411 412 case ENAMETOOLONG: 413 /* 414 * One of the vdevs has exceeded VDEV_SPEC_MAX length in 415 * its plaintext representation. 416 */ 417 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 418 "too many devices in a single vdev"), pool); 419 break; 420 421 case EIO: 422 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 423 "I/O error on one or more devices"), pool); 424 break; 425 426 case ENXIO: 427 /* 428 * This is unlikely to happen since we've verified that 429 * all the devices can be opened from userland, but it's 430 * still possible in some circumstances. 431 */ 432 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 433 "one or more devices is unavailable"), pool); 434 break; 435 436 case ENOSPC: 437 /* 438 * This can occur if we were incapable of writing to a 439 * file vdev because the underlying filesystem is out of 440 * space. This is very similar to EOVERFLOW, but we'll 441 * produce a slightly different message. 442 */ 443 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 444 "one or more devices is out of space"), pool); 445 break; 446 447 default: 448 zfs_baderror(errno); 449 } 450 451 return (-1); 452 } 453 454 free(packed); 455 456 /* 457 * If this is an alternate root pool, then we automatically set the 458 * moutnpoint of the root dataset to be '/'. 459 */ 460 if (altroot != NULL) { 461 zfs_handle_t *zhp; 462 463 verify((zhp = zfs_open(pool, ZFS_TYPE_ANY)) != NULL); 464 verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0); 465 466 zfs_close(zhp); 467 } 468 469 return (0); 470 } 471 472 /* 473 * Destroy the given pool. It is up to the caller to ensure that there are no 474 * datasets left in the pool. 475 */ 476 int 477 zpool_destroy(zpool_handle_t *zhp) 478 { 479 zfs_cmd_t zc = { 0 }; 480 zfs_handle_t *zfp = NULL; 481 482 if (zhp->zpool_state == POOL_STATE_ACTIVE && 483 (zfp = zfs_open(zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 484 return (-1); 485 486 if (zpool_remove_zvol_links(zhp) != NULL) 487 return (-1); 488 489 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 490 491 if (zfs_ioctl(ZFS_IOC_POOL_DESTROY, &zc) != 0) { 492 switch (errno) { 493 case EPERM: 494 zfs_error(dgettext(TEXT_DOMAIN, 495 "cannot destroy '%s': permission denied"), 496 zhp->zpool_name); 497 break; 498 499 case EBUSY: 500 zfs_error(dgettext(TEXT_DOMAIN, 501 "cannot destroy '%s': pool busy"), 502 zhp->zpool_name); 503 break; 504 505 case ENOENT: 506 zfs_error(dgettext(TEXT_DOMAIN, 507 "cannot destroy '%s': no such pool"), 508 zhp->zpool_name); 509 break; 510 511 case EROFS: 512 zfs_error(dgettext(TEXT_DOMAIN, 513 "cannot destroy '%s': one or more devices is " 514 "read only, or '/' is mounted read only"), 515 zhp->zpool_name); 516 break; 517 518 default: 519 zfs_baderror(errno); 520 } 521 522 if (zfp) 523 zfs_close(zfp); 524 return (-1); 525 } 526 527 if (zfp) { 528 remove_mountpoint(zfp); 529 zfs_close(zfp); 530 } 531 532 return (0); 533 } 534 535 /* 536 * Add the given vdevs to the pool. The caller must have already performed the 537 * necessary verification to ensure that the vdev specification is well-formed. 538 */ 539 int 540 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 541 { 542 char *packed; 543 size_t len; 544 zfs_cmd_t zc; 545 546 verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); 547 548 packed = zfs_malloc(len); 549 550 verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 551 552 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 553 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 554 zc.zc_config_src_size = len; 555 556 if (zfs_ioctl(ZFS_IOC_VDEV_ADD, &zc) != 0) { 557 switch (errno) { 558 case EPERM: 559 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 560 "permission denied"), zhp->zpool_name); 561 break; 562 563 case EBUSY: 564 /* 565 * This can happen if the user has specified the same 566 * device multiple times. We can't reliably detect this 567 * until we try to add it and see we already have a 568 * label. 569 */ 570 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 571 "one or more vdevs refer to the same device"), 572 zhp->zpool_name); 573 break; 574 575 case ENAMETOOLONG: 576 /* 577 * One of the vdevs has exceeded VDEV_SPEC_MAX length in 578 * its plaintext representation. 579 */ 580 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 581 "too many devices in a single vdev"), 582 zhp->zpool_name); 583 break; 584 585 case ENXIO: 586 /* 587 * This is unlikely to happen since we've verified that 588 * all the devices can be opened from userland, but it's 589 * still possible in some circumstances. 590 */ 591 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 592 "one or more devices is unavailable"), 593 zhp->zpool_name); 594 break; 595 596 case EOVERFLOW: 597 /* 598 * This occurrs when one of the devices is below 599 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 600 * device was the problem device since there's no 601 * reliable way to determine device size from userland. 602 */ 603 { 604 char buf[64]; 605 606 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 607 608 zfs_error(dgettext(TEXT_DOMAIN, "cannot " 609 "add to '%s': one or more devices is less " 610 "than the minimum size (%s)"), 611 zhp->zpool_name, buf); 612 } 613 break; 614 615 default: 616 zfs_baderror(errno); 617 } 618 619 return (-1); 620 } 621 622 free(packed); 623 624 return (0); 625 } 626 627 /* 628 * Exports the pool from the system. The caller must ensure that there are no 629 * mounted datasets in the pool. 630 */ 631 int 632 zpool_export(zpool_handle_t *zhp) 633 { 634 zfs_cmd_t zc = { 0 }; 635 636 if (zpool_remove_zvol_links(zhp) != 0) 637 return (-1); 638 639 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 640 641 if (zfs_ioctl(ZFS_IOC_POOL_EXPORT, &zc) != 0) { 642 switch (errno) { 643 case EPERM: 644 zfs_error(dgettext(TEXT_DOMAIN, 645 "cannot export '%s': permission denied"), 646 zhp->zpool_name); 647 break; 648 649 case EBUSY: 650 zfs_error(dgettext(TEXT_DOMAIN, 651 "cannot export '%s': pool is in use"), 652 zhp->zpool_name); 653 break; 654 655 case ENOENT: 656 zfs_error(dgettext(TEXT_DOMAIN, 657 "cannot export '%s': no such pool"), 658 zhp->zpool_name); 659 break; 660 661 default: 662 zfs_baderror(errno); 663 } 664 665 return (-1); 666 } 667 668 return (0); 669 } 670 671 /* 672 * Import the given pool using the known configuration. The configuration 673 * should have come from zpool_find_import(). The 'newname' and 'altroot' 674 * parameters control whether the pool is imported with a different name or with 675 * an alternate root, respectively. 676 */ 677 int 678 zpool_import(nvlist_t *config, const char *newname, const char *altroot) 679 { 680 zfs_cmd_t zc; 681 char *packed; 682 size_t len; 683 char *thename; 684 char *origname; 685 int ret; 686 687 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 688 &origname) == 0); 689 690 if (newname != NULL) { 691 if (!zpool_name_valid(newname, NULL, 0)) { 692 zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': " 693 "invalid pool name"), newname); 694 return (-1); 695 } 696 thename = (char *)newname; 697 } else { 698 thename = origname; 699 } 700 701 if (altroot != NULL && altroot[0] != '/') { 702 zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': alternate " 703 "root '%s' must be a complete path"), thename, 704 altroot); 705 return (-1); 706 } 707 708 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 709 710 if (altroot != NULL) 711 (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); 712 else 713 zc.zc_root[0] = '\0'; 714 715 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 716 &zc.zc_guid) == 0); 717 718 verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0); 719 720 packed = zfs_malloc(len); 721 722 verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 723 724 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 725 zc.zc_config_src_size = len; 726 727 ret = 0; 728 if (zfs_ioctl(ZFS_IOC_POOL_IMPORT, &zc) != 0) { 729 char desc[1024]; 730 if (newname == NULL) 731 (void) snprintf(desc, sizeof (desc), 732 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 733 thename); 734 else 735 (void) snprintf(desc, sizeof (desc), 736 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 737 origname, thename); 738 739 switch (errno) { 740 case EEXIST: 741 /* 742 * A pool with that name already exists. 743 */ 744 zfs_error(dgettext(TEXT_DOMAIN, "%s: pool exists"), 745 desc); 746 break; 747 748 case EPERM: 749 /* 750 * The user doesn't have permission to create pools. 751 */ 752 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission " 753 "denied"), desc); 754 break; 755 756 case ENXIO: 757 case EDOM: 758 /* 759 * Device is unavailable, or vdev sum didn't match. 760 */ 761 zfs_error(dgettext(TEXT_DOMAIN, "%s: one or more " 762 "devices is unavailable"), 763 desc); 764 break; 765 766 case ENOTSUP: 767 /* 768 * Unsupported version. 769 */ 770 zfs_error(dgettext(TEXT_DOMAIN, 771 "%s: unsupported version"), desc); 772 break; 773 774 default: 775 zfs_baderror(errno); 776 } 777 778 ret = -1; 779 } else { 780 zpool_handle_t *zhp; 781 /* 782 * This should never fail, but play it safe anyway. 783 */ 784 if ((zhp = zpool_open_silent(thename)) != NULL) { 785 ret = zpool_create_zvol_links(zhp); 786 zpool_close(zhp); 787 } 788 } 789 790 free(packed); 791 return (ret); 792 } 793 794 /* 795 * Scrub the pool. 796 */ 797 int 798 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 799 { 800 zfs_cmd_t zc = { 0 }; 801 char msg[1024]; 802 803 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 804 zc.zc_cookie = type; 805 806 if (zfs_ioctl(ZFS_IOC_POOL_SCRUB, &zc) == 0) 807 return (0); 808 809 (void) snprintf(msg, sizeof (msg), 810 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 811 812 switch (errno) { 813 case EPERM: 814 /* 815 * No permission to scrub this pool. 816 */ 817 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 818 break; 819 820 case EBUSY: 821 /* 822 * Resilver in progress. 823 */ 824 zfs_error(dgettext(TEXT_DOMAIN, "%s: currently resilvering"), 825 msg); 826 break; 827 828 default: 829 zfs_baderror(errno); 830 } 831 return (-1); 832 } 833 834 static uint64_t 835 vdev_to_guid(nvlist_t *nv, const char *search, uint64_t guid) 836 { 837 uint_t c, children; 838 nvlist_t **child; 839 uint64_t ret, present; 840 char *path; 841 uint64_t wholedisk = 0; 842 843 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &ret) == 0); 844 845 if (search == NULL && 846 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) { 847 /* 848 * If the device has never been present since import, the only 849 * reliable way to match the vdev is by GUID. 850 */ 851 if (ret == guid) 852 return (ret); 853 } else if (search != NULL && 854 nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 855 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 856 &wholedisk); 857 if (wholedisk) { 858 /* 859 * For whole disks, the internal path has 's0', but the 860 * path passed in by the user doesn't. 861 */ 862 if (strlen(search) == strlen(path) - 2 && 863 strncmp(search, path, strlen(search)) == 0) 864 return (ret); 865 } else if (strcmp(search, path) == 0) { 866 return (ret); 867 } 868 } 869 870 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 871 &child, &children) != 0) 872 return (0); 873 874 for (c = 0; c < children; c++) 875 if ((ret = vdev_to_guid(child[c], search, guid)) != 0) 876 return (ret); 877 878 return (0); 879 } 880 881 /* 882 * Given a string describing a vdev, returns the matching GUID, or 0 if none. 883 */ 884 uint64_t 885 zpool_vdev_to_guid(zpool_handle_t *zhp, const char *path) 886 { 887 char buf[MAXPATHLEN]; 888 const char *search; 889 char *end; 890 nvlist_t *nvroot; 891 uint64_t guid; 892 893 guid = strtoull(path, &end, 10); 894 if (guid != 0 && *end == '\0') { 895 search = NULL; 896 } else if (path[0] != '/') { 897 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 898 search = buf; 899 } else { 900 search = path; 901 } 902 903 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 904 &nvroot) == 0); 905 906 return (vdev_to_guid(nvroot, search, guid)); 907 } 908 909 /* 910 * Bring the specified vdev online 911 */ 912 int 913 zpool_vdev_online(zpool_handle_t *zhp, const char *path) 914 { 915 zfs_cmd_t zc = { 0 }; 916 char msg[1024]; 917 918 (void) snprintf(msg, sizeof (msg), 919 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 920 921 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 922 if ((zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) { 923 zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"), 924 msg); 925 return (-1); 926 } 927 928 if (zfs_ioctl(ZFS_IOC_VDEV_ONLINE, &zc) == 0) 929 return (0); 930 931 switch (errno) { 932 case ENODEV: 933 /* 934 * Device doesn't exist 935 */ 936 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 937 break; 938 939 case EPERM: 940 /* 941 * No permission to bring this vdev online. 942 */ 943 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 944 break; 945 946 default: 947 zfs_baderror(errno); 948 } 949 return (-1); 950 } 951 952 /* 953 * Take the specified vdev offline 954 */ 955 int 956 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp) 957 { 958 zfs_cmd_t zc = { 0 }; 959 char msg[1024]; 960 961 (void) snprintf(msg, sizeof (msg), 962 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 963 964 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 965 if ((zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) { 966 zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"), 967 msg); 968 return (-1); 969 } 970 971 zc.zc_cookie = istmp; 972 973 if (zfs_ioctl(ZFS_IOC_VDEV_OFFLINE, &zc) == 0) 974 return (0); 975 976 switch (errno) { 977 case ENODEV: 978 /* 979 * Device doesn't exist 980 */ 981 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 982 break; 983 984 case EPERM: 985 /* 986 * No permission to take this vdev offline. 987 */ 988 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 989 break; 990 991 case EBUSY: 992 /* 993 * There are no other replicas of this device. 994 */ 995 zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg); 996 break; 997 998 default: 999 zfs_baderror(errno); 1000 } 1001 return (-1); 1002 } 1003 1004 /* 1005 * Attach new_disk (fully described by nvroot) to old_disk. 1006 * If 'replacing' is specified, tne new disk will replace the old one. 1007 */ 1008 int 1009 zpool_vdev_attach(zpool_handle_t *zhp, 1010 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 1011 { 1012 zfs_cmd_t zc = { 0 }; 1013 char msg[1024]; 1014 char *packed; 1015 int ret; 1016 size_t len; 1017 1018 if (replacing) 1019 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1020 "cannot replace %s with %s"), old_disk, new_disk); 1021 else 1022 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1023 "cannot attach %s to %s"), new_disk, old_disk); 1024 1025 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1026 if ((zc.zc_guid = zpool_vdev_to_guid(zhp, old_disk)) == 0) { 1027 zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"), 1028 msg); 1029 return (-1); 1030 } 1031 zc.zc_cookie = replacing; 1032 1033 verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); 1034 1035 packed = zfs_malloc(len); 1036 1037 verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 1038 1039 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 1040 zc.zc_config_src_size = len; 1041 1042 ret = zfs_ioctl(ZFS_IOC_VDEV_ATTACH, &zc); 1043 1044 free(packed); 1045 1046 if (ret == 0) 1047 return (0); 1048 1049 switch (errno) { 1050 case EPERM: 1051 /* 1052 * No permission to mess with the config. 1053 */ 1054 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 1055 break; 1056 1057 case ENODEV: 1058 /* 1059 * Device doesn't exist. 1060 */ 1061 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s not in pool"), 1062 msg, old_disk); 1063 break; 1064 1065 case ENOTSUP: 1066 /* 1067 * Can't attach to or replace this type of vdev. 1068 */ 1069 if (replacing) 1070 zfs_error(dgettext(TEXT_DOMAIN, 1071 "%s: cannot replace a replacing device"), msg); 1072 else 1073 zfs_error(dgettext(TEXT_DOMAIN, 1074 "%s: attach is only applicable to mirrors"), msg); 1075 break; 1076 1077 case EINVAL: 1078 /* 1079 * The new device must be a single disk. 1080 */ 1081 zfs_error(dgettext(TEXT_DOMAIN, 1082 "%s: <new_device> must be a single disk"), msg); 1083 break; 1084 1085 case ENXIO: 1086 /* 1087 * This is unlikely to happen since we've verified that 1088 * all the devices can be opened from userland, but it's 1089 * still possible in some circumstances. 1090 */ 1091 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is unavailable"), 1092 msg, new_disk); 1093 break; 1094 1095 case EBUSY: 1096 /* 1097 * The new device is is use. 1098 */ 1099 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s busy"), msg, new_disk); 1100 break; 1101 1102 case EOVERFLOW: 1103 /* 1104 * The new device is too small. 1105 */ 1106 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is too small"), 1107 msg, new_disk); 1108 break; 1109 1110 case EDOM: 1111 /* 1112 * The new device has a different alignment requirement. 1113 */ 1114 zfs_error(dgettext(TEXT_DOMAIN, 1115 "%s: devices have different sector alignment"), msg); 1116 break; 1117 1118 case ENAMETOOLONG: 1119 /* 1120 * The resulting top-level vdev spec won't fit in the label. 1121 */ 1122 zfs_error(dgettext(TEXT_DOMAIN, 1123 "%s: too many devices in a single vdev"), msg); 1124 break; 1125 1126 default: 1127 zfs_baderror(errno); 1128 } 1129 1130 return (1); 1131 } 1132 1133 /* 1134 * Detach the specified device. 1135 */ 1136 int 1137 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 1138 { 1139 zfs_cmd_t zc = { 0 }; 1140 char msg[1024]; 1141 1142 (void) snprintf(msg, sizeof (msg), 1143 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 1144 1145 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1146 if ((zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) { 1147 zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool")); 1148 return (-1); 1149 } 1150 1151 if (zfs_ioctl(ZFS_IOC_VDEV_DETACH, &zc) == 0) 1152 return (0); 1153 1154 switch (errno) { 1155 case EPERM: 1156 /* 1157 * No permission to mess with the config. 1158 */ 1159 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 1160 break; 1161 1162 case ENODEV: 1163 /* 1164 * Device doesn't exist. 1165 */ 1166 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 1167 break; 1168 1169 case ENOTSUP: 1170 /* 1171 * Can't detach from this type of vdev. 1172 */ 1173 zfs_error(dgettext(TEXT_DOMAIN, 1174 "%s: only applicable to mirror and replacing vdevs"), msg); 1175 break; 1176 1177 case EBUSY: 1178 /* 1179 * There are no other replicas of this device. 1180 */ 1181 zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg); 1182 break; 1183 1184 default: 1185 zfs_baderror(errno); 1186 } 1187 1188 return (1); 1189 } 1190 1191 /* 1192 * Clear the errors for the pool, or the particular device if specified. 1193 */ 1194 int 1195 zpool_clear(zpool_handle_t *zhp, const char *path) 1196 { 1197 zfs_cmd_t zc = { 0 }; 1198 char msg[1024]; 1199 1200 if (path) 1201 (void) snprintf(msg, sizeof (msg), 1202 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 1203 zc.zc_prop_value); 1204 else 1205 (void) snprintf(msg, sizeof (msg), 1206 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 1207 zhp->zpool_name); 1208 1209 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1210 if (path && (zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) { 1211 zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"), 1212 msg); 1213 return (-1); 1214 } 1215 1216 if (zfs_ioctl(ZFS_IOC_CLEAR, &zc) == 0) 1217 return (0); 1218 1219 switch (errno) { 1220 case EPERM: 1221 /* 1222 * No permission to mess with the config. 1223 */ 1224 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 1225 break; 1226 1227 case ENODEV: 1228 /* 1229 * Device doesn't exist. 1230 */ 1231 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 1232 break; 1233 1234 default: 1235 zfs_baderror(errno); 1236 } 1237 1238 return (1); 1239 } 1240 1241 static int 1242 do_zvol(zfs_handle_t *zhp, void *data) 1243 { 1244 int linktype = (int)(uintptr_t)data; 1245 int ret; 1246 1247 /* 1248 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we 1249 * correctly handle snapshots of volumes. 1250 */ 1251 if (zhp->zfs_volblocksize != 0) { 1252 if (linktype) 1253 ret = zvol_create_link(zhp->zfs_name); 1254 else 1255 ret = zvol_remove_link(zhp->zfs_name); 1256 } 1257 1258 ret = zfs_iter_children(zhp, do_zvol, data); 1259 1260 zfs_close(zhp); 1261 return (ret); 1262 } 1263 1264 /* 1265 * Iterate over all zvols in the pool and make any necessary minor nodes. 1266 */ 1267 int 1268 zpool_create_zvol_links(zpool_handle_t *zhp) 1269 { 1270 zfs_handle_t *zfp; 1271 int ret; 1272 1273 /* 1274 * If the pool is unavailable, just return success. 1275 */ 1276 if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL) 1277 return (0); 1278 1279 ret = zfs_iter_children(zfp, do_zvol, (void *)TRUE); 1280 1281 zfs_close(zfp); 1282 return (ret); 1283 } 1284 1285 /* 1286 * Iterate over all zvols in the poool and remove any minor nodes. 1287 */ 1288 int 1289 zpool_remove_zvol_links(zpool_handle_t *zhp) 1290 { 1291 zfs_handle_t *zfp; 1292 int ret; 1293 1294 /* 1295 * If the pool is unavailable, just return success. 1296 */ 1297 if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL) 1298 return (0); 1299 1300 ret = zfs_iter_children(zfp, do_zvol, (void *)FALSE); 1301 1302 zfs_close(zfp); 1303 return (ret); 1304 } 1305 1306 /* 1307 * Convert from a devid string to a path. 1308 */ 1309 static char * 1310 devid_to_path(char *devid_str) 1311 { 1312 ddi_devid_t devid; 1313 char *minor; 1314 char *path; 1315 devid_nmlist_t *list = NULL; 1316 int ret; 1317 1318 if (devid_str_decode(devid_str, &devid, &minor) != 0) 1319 return (NULL); 1320 1321 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 1322 1323 devid_str_free(minor); 1324 devid_free(devid); 1325 1326 if (ret != 0) 1327 return (NULL); 1328 1329 path = zfs_strdup(list[0].devname); 1330 devid_free_nmlist(list); 1331 1332 return (path); 1333 } 1334 1335 /* 1336 * Convert from a path to a devid string. 1337 */ 1338 static char * 1339 path_to_devid(const char *path) 1340 { 1341 int fd; 1342 ddi_devid_t devid; 1343 char *minor, *ret; 1344 1345 if ((fd = open(path, O_RDONLY)) < 0) 1346 return (NULL); 1347 1348 minor = NULL; 1349 ret = NULL; 1350 if (devid_get(fd, &devid) == 0) { 1351 if (devid_get_minor_name(fd, &minor) == 0) 1352 ret = devid_str_encode(devid, minor); 1353 if (minor != NULL) 1354 devid_str_free(minor); 1355 devid_free(devid); 1356 } 1357 (void) close(fd); 1358 1359 return (ret); 1360 } 1361 1362 /* 1363 * Issue the necessary ioctl() to update the stored path value for the vdev. We 1364 * ignore any failure here, since a common case is for an unprivileged user to 1365 * type 'zpool status', and we'll display the correct information anyway. 1366 */ 1367 static void 1368 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 1369 { 1370 zfs_cmd_t zc = { 0 }; 1371 1372 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1373 (void) strncpy(zc.zc_prop_value, path, sizeof (zc.zc_prop_value)); 1374 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1375 &zc.zc_guid) == 0); 1376 1377 (void) zfs_ioctl(ZFS_IOC_VDEV_SETPATH, &zc); 1378 } 1379 1380 /* 1381 * Given a vdev, return the name to display in iostat. If the vdev has a path, 1382 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 1383 * We also check if this is a whole disk, in which case we strip off the 1384 * trailing 's0' slice name. 1385 * 1386 * This routine is also responsible for identifying when disks have been 1387 * reconfigured in a new location. The kernel will have opened the device by 1388 * devid, but the path will still refer to the old location. To catch this, we 1389 * first do a path -> devid translation (which is fast for the common case). If 1390 * the devid matches, we're done. If not, we do a reverse devid -> path 1391 * translation and issue the appropriate ioctl() to update the path of the vdev. 1392 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 1393 * of these checks. 1394 */ 1395 char * 1396 zpool_vdev_name(zpool_handle_t *zhp, nvlist_t *nv) 1397 { 1398 char *path, *devid; 1399 uint64_t value; 1400 char buf[64]; 1401 1402 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1403 &value) == 0) { 1404 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1405 &value) == 0); 1406 (void) snprintf(buf, sizeof (buf), "%llu", value); 1407 path = buf; 1408 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 1409 1410 if (zhp != NULL && 1411 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 1412 /* 1413 * Determine if the current path is correct. 1414 */ 1415 char *newdevid = path_to_devid(path); 1416 1417 if (newdevid == NULL || 1418 strcmp(devid, newdevid) != 0) { 1419 char *newpath; 1420 1421 if ((newpath = devid_to_path(devid)) != NULL) { 1422 /* 1423 * Update the path appropriately. 1424 */ 1425 set_path(zhp, nv, newpath); 1426 verify(nvlist_add_string(nv, 1427 ZPOOL_CONFIG_PATH, newpath) == 0); 1428 free(newpath); 1429 verify(nvlist_lookup_string(nv, 1430 ZPOOL_CONFIG_PATH, &path) == 0); 1431 } 1432 1433 if (newdevid) 1434 devid_str_free(newdevid); 1435 } 1436 1437 } 1438 1439 if (strncmp(path, "/dev/dsk/", 9) == 0) 1440 path += 9; 1441 1442 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1443 &value) == 0 && value) { 1444 char *tmp = zfs_strdup(path); 1445 tmp[strlen(path) - 2] = '\0'; 1446 return (tmp); 1447 } 1448 } else { 1449 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 1450 } 1451 1452 return (zfs_strdup(path)); 1453 } 1454 1455 static int 1456 zbookmark_compare(const void *a, const void *b) 1457 { 1458 return (memcmp(a, b, sizeof (zbookmark_t))); 1459 } 1460 1461 /* 1462 * Retrieve the persistent error log, uniquify the members, and return to the 1463 * caller. 1464 */ 1465 int 1466 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem) 1467 { 1468 zfs_cmd_t zc = { 0 }; 1469 uint64_t count; 1470 zbookmark_t *zb; 1471 int i, j; 1472 1473 if (zhp->zpool_error_log != NULL) { 1474 *list = zhp->zpool_error_log; 1475 *nelem = zhp->zpool_error_count; 1476 return (0); 1477 } 1478 1479 /* 1480 * Retrieve the raw error list from the kernel. If the number of errors 1481 * has increased, allocate more space and continue until we get the 1482 * entire list. 1483 */ 1484 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 1485 &count) == 0); 1486 zc.zc_config_dst = (uintptr_t)zfs_malloc(count * sizeof (zbookmark_t)); 1487 zc.zc_config_dst_size = count; 1488 (void) strcpy(zc.zc_name, zhp->zpool_name); 1489 for (;;) { 1490 if (zfs_ioctl(ZFS_IOC_ERROR_LOG, &zc) != 0) { 1491 if (errno == ENOMEM) { 1492 free((void *)(uintptr_t)zc.zc_config_dst); 1493 zc.zc_config_dst = (uintptr_t) 1494 zfs_malloc(zc.zc_config_dst_size); 1495 } else { 1496 return (-1); 1497 } 1498 } else { 1499 break; 1500 } 1501 } 1502 1503 /* 1504 * Sort the resulting bookmarks. This is a little confusing due to the 1505 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 1506 * to first, and 'zc_config_dst_size' indicates the number of boomarks 1507 * _not_ copied as part of the process. So we point the start of our 1508 * array appropriate and decrement the total number of elements. 1509 */ 1510 zb = ((zbookmark_t *)(uintptr_t)zc.zc_config_dst) + 1511 zc.zc_config_dst_size; 1512 count -= zc.zc_config_dst_size; 1513 1514 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 1515 1516 /* 1517 * Count the number of unique elements 1518 */ 1519 j = 0; 1520 for (i = 0; i < count; i++) { 1521 if (i > 0 && memcmp(&zb[i - 1], &zb[i], 1522 sizeof (zbookmark_t)) == 0) 1523 continue; 1524 j++; 1525 } 1526 1527 /* 1528 * If the user has only requested the number of items, return it now 1529 * without bothering with the extra work. 1530 */ 1531 if (list == NULL) { 1532 *nelem = j; 1533 return (0); 1534 } 1535 1536 zhp->zpool_error_count = j; 1537 1538 /* 1539 * Allocate an array of nvlists to hold the results 1540 */ 1541 zhp->zpool_error_log = zfs_malloc(j * sizeof (nvlist_t *)); 1542 1543 /* 1544 * Fill in the results with names from the kernel. 1545 */ 1546 j = 0; 1547 for (i = 0; i < count; i++) { 1548 char buf[64]; 1549 nvlist_t *nv; 1550 1551 if (i > 0 && memcmp(&zb[i - 1], &zb[i], 1552 sizeof (zbookmark_t)) == 0) 1553 continue; 1554 1555 verify(nvlist_alloc(&nv, NV_UNIQUE_NAME, 1556 0) == 0); 1557 zhp->zpool_error_log[j] = nv; 1558 1559 zc.zc_bookmark = zb[i]; 1560 if (zfs_ioctl(ZFS_IOC_BOOKMARK_NAME, &zc) == 0) { 1561 verify(nvlist_add_string(nv, ZPOOL_ERR_DATASET, 1562 zc.zc_prop_name) == 0); 1563 verify(nvlist_add_string(nv, ZPOOL_ERR_OBJECT, 1564 zc.zc_prop_value) == 0); 1565 verify(nvlist_add_string(nv, ZPOOL_ERR_RANGE, 1566 zc.zc_filename) == 0); 1567 } else { 1568 (void) snprintf(buf, sizeof (buf), "%llx", 1569 zb[i].zb_objset); 1570 verify(nvlist_add_string(nv, 1571 ZPOOL_ERR_DATASET, buf) == 0); 1572 (void) snprintf(buf, sizeof (buf), "%llx", 1573 zb[i].zb_object); 1574 verify(nvlist_add_string(nv, ZPOOL_ERR_OBJECT, 1575 buf) == 0); 1576 (void) snprintf(buf, sizeof (buf), "lvl=%u blkid=%llu", 1577 (int)zb[i].zb_level, (long long)zb[i].zb_blkid); 1578 verify(nvlist_add_string(nv, ZPOOL_ERR_RANGE, 1579 buf) == 0); 1580 } 1581 1582 j++; 1583 } 1584 1585 *list = zhp->zpool_error_log; 1586 *nelem = zhp->zpool_error_count; 1587 1588 free((void *)(uintptr_t)zc.zc_config_dst); 1589 1590 return (0); 1591 } 1592