1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <assert.h> 29 #include <ctype.h> 30 #include <errno.h> 31 #include <devid.h> 32 #include <fcntl.h> 33 #include <libintl.h> 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <unistd.h> 38 #include <sys/zfs_ioctl.h> 39 #include <sys/zio.h> 40 41 #include "zfs_namecheck.h" 42 #include "libzfs_impl.h" 43 44 /* 45 * Validate the given pool name, optionally putting an extended error message in 46 * 'buf'. 47 */ 48 static int 49 zpool_name_valid(const char *pool, char *buf, size_t buflen) 50 { 51 namecheck_err_t why; 52 char what; 53 54 if (pool_namecheck(pool, &why, &what) != 0) { 55 if (buf != NULL) { 56 switch (why) { 57 case NAME_ERR_TOOLONG: 58 (void) snprintf(buf, buflen, 59 dgettext(TEXT_DOMAIN, "name is too long")); 60 break; 61 62 case NAME_ERR_INVALCHAR: 63 (void) snprintf(buf, buflen, 64 dgettext(TEXT_DOMAIN, "invalid character " 65 "'%c' in pool name"), what); 66 break; 67 68 case NAME_ERR_NOLETTER: 69 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 70 "name must begin with a letter"), buflen); 71 break; 72 73 case NAME_ERR_RESERVED: 74 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 75 "name is reserved\n" 76 "pool name may have been omitted"), buflen); 77 break; 78 79 case NAME_ERR_DISKLIKE: 80 (void) strlcpy(buf, dgettext(TEXT_DOMAIN, 81 "pool name is reserved\n" 82 "pool name may have been omitted"), buflen); 83 break; 84 } 85 } 86 return (FALSE); 87 } 88 89 return (TRUE); 90 } 91 92 /* 93 * Set the pool-wide health based on the vdev state of the root vdev. 94 */ 95 void 96 set_pool_health(nvlist_t *config) 97 { 98 nvlist_t *nvroot; 99 vdev_stat_t *vs; 100 uint_t vsc; 101 char *health; 102 103 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 104 &nvroot) == 0); 105 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 106 (uint64_t **)&vs, &vsc) == 0); 107 108 switch (vs->vs_state) { 109 110 case VDEV_STATE_CLOSED: 111 case VDEV_STATE_CANT_OPEN: 112 case VDEV_STATE_OFFLINE: 113 health = dgettext(TEXT_DOMAIN, "FAULTED"); 114 break; 115 116 case VDEV_STATE_DEGRADED: 117 health = dgettext(TEXT_DOMAIN, "DEGRADED"); 118 break; 119 120 case VDEV_STATE_HEALTHY: 121 health = dgettext(TEXT_DOMAIN, "ONLINE"); 122 break; 123 124 default: 125 zfs_baderror(vs->vs_state); 126 } 127 128 verify(nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, 129 health) == 0); 130 } 131 132 /* 133 * Open a handle to the given pool, even if the pool is currently in the FAULTED 134 * state. 135 */ 136 zpool_handle_t * 137 zpool_open_canfail(const char *pool) 138 { 139 zpool_handle_t *zhp; 140 int error; 141 142 /* 143 * Make sure the pool name is valid. 144 */ 145 if (!zpool_name_valid(pool, NULL, 0)) { 146 zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': invalid " 147 "pool name"), pool); 148 return (NULL); 149 } 150 151 zhp = zfs_malloc(sizeof (zpool_handle_t)); 152 153 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 154 155 if ((error = zpool_refresh_stats(zhp)) != 0) { 156 if (error == ENOENT || error == EINVAL) { 157 zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': no " 158 "such pool"), pool); 159 free(zhp); 160 return (NULL); 161 } else { 162 zhp->zpool_state = POOL_STATE_UNAVAIL; 163 } 164 } else { 165 zhp->zpool_state = POOL_STATE_ACTIVE; 166 } 167 168 return (zhp); 169 } 170 171 /* 172 * Like the above, but silent on error. Used when iterating over pools (because 173 * the configuration cache may be out of date). 174 */ 175 zpool_handle_t * 176 zpool_open_silent(const char *pool) 177 { 178 zpool_handle_t *zhp; 179 int error; 180 181 zhp = zfs_malloc(sizeof (zpool_handle_t)); 182 183 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 184 185 if ((error = zpool_refresh_stats(zhp)) != 0) { 186 if (error == ENOENT || error == EINVAL) { 187 free(zhp); 188 return (NULL); 189 } else { 190 zhp->zpool_state = POOL_STATE_UNAVAIL; 191 } 192 } else { 193 zhp->zpool_state = POOL_STATE_ACTIVE; 194 } 195 196 return (zhp); 197 } 198 199 /* 200 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 201 * state. 202 */ 203 zpool_handle_t * 204 zpool_open(const char *pool) 205 { 206 zpool_handle_t *zhp; 207 208 if ((zhp = zpool_open_canfail(pool)) == NULL) 209 return (NULL); 210 211 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 212 zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': pool is " 213 "currently unavailable"), zhp->zpool_name); 214 zfs_error(dgettext(TEXT_DOMAIN, "run 'zpool status %s' for " 215 "detailed information"), zhp->zpool_name); 216 zpool_close(zhp); 217 return (NULL); 218 } 219 220 return (zhp); 221 } 222 223 /* 224 * Close the handle. Simply frees the memory associated with the handle. 225 */ 226 void 227 zpool_close(zpool_handle_t *zhp) 228 { 229 if (zhp->zpool_config) 230 nvlist_free(zhp->zpool_config); 231 if (zhp->zpool_old_config) 232 nvlist_free(zhp->zpool_old_config); 233 if (zhp->zpool_error_log) { 234 int i; 235 for (i = 0; i < zhp->zpool_error_count; i++) 236 free(zhp->zpool_error_log[i]); 237 free(zhp->zpool_error_log); 238 } 239 free(zhp); 240 } 241 242 /* 243 * Return the name of the pool. 244 */ 245 const char * 246 zpool_get_name(zpool_handle_t *zhp) 247 { 248 return (zhp->zpool_name); 249 } 250 251 /* 252 * Return the GUID of the pool. 253 */ 254 uint64_t 255 zpool_get_guid(zpool_handle_t *zhp) 256 { 257 uint64_t guid; 258 259 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, 260 &guid) == 0); 261 return (guid); 262 } 263 264 /* 265 * Return the amount of space currently consumed by the pool. 266 */ 267 uint64_t 268 zpool_get_space_used(zpool_handle_t *zhp) 269 { 270 nvlist_t *nvroot; 271 vdev_stat_t *vs; 272 uint_t vsc; 273 274 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 275 &nvroot) == 0); 276 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 277 (uint64_t **)&vs, &vsc) == 0); 278 279 return (vs->vs_alloc); 280 } 281 282 /* 283 * Return the total space in the pool. 284 */ 285 uint64_t 286 zpool_get_space_total(zpool_handle_t *zhp) 287 { 288 nvlist_t *nvroot; 289 vdev_stat_t *vs; 290 uint_t vsc; 291 292 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 293 &nvroot) == 0); 294 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, 295 (uint64_t **)&vs, &vsc) == 0); 296 297 return (vs->vs_space); 298 } 299 300 /* 301 * Return the alternate root for this pool, if any. 302 */ 303 int 304 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen) 305 { 306 zfs_cmd_t zc = { 0 }; 307 308 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 309 if (zfs_ioctl(ZFS_IOC_OBJSET_STATS, &zc) != 0 || 310 zc.zc_root[0] == '\0') 311 return (-1); 312 313 (void) strlcpy(buf, zc.zc_root, buflen); 314 315 return (0); 316 } 317 318 /* 319 * Return the state of the pool (ACTIVE or UNAVAILABLE) 320 */ 321 int 322 zpool_get_state(zpool_handle_t *zhp) 323 { 324 return (zhp->zpool_state); 325 } 326 327 /* 328 * Create the named pool, using the provided vdev list. It is assumed 329 * that the consumer has already validated the contents of the nvlist, so we 330 * don't have to worry about error semantics. 331 */ 332 int 333 zpool_create(const char *pool, nvlist_t *nvroot, const char *altroot) 334 { 335 zfs_cmd_t zc = { 0 }; 336 char *packed; 337 size_t len; 338 int err; 339 char reason[64]; 340 341 if (!zpool_name_valid(pool, reason, sizeof (reason))) { 342 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': %s"), 343 pool, reason); 344 return (-1); 345 } 346 347 if (altroot != NULL && altroot[0] != '/') { 348 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': alternate " 349 "root '%s' must be a complete path"), pool, altroot); 350 return (-1); 351 } 352 353 if ((err = nvlist_size(nvroot, &len, NV_ENCODE_NATIVE)) != 0) 354 zfs_baderror(err); 355 356 packed = zfs_malloc(len); 357 358 if ((err = nvlist_pack(nvroot, &packed, &len, 359 NV_ENCODE_NATIVE, 0)) != 0) 360 zfs_baderror(err); 361 362 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 363 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 364 zc.zc_config_src_size = len; 365 366 if (altroot != NULL) 367 (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); 368 369 if (zfs_ioctl(ZFS_IOC_POOL_CREATE, &zc) != 0) { 370 switch (errno) { 371 case EEXIST: 372 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 373 "pool exists"), pool); 374 break; 375 376 case EPERM: 377 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 378 "permission denied"), pool); 379 break; 380 381 case EBUSY: 382 /* 383 * This can happen if the user has specified the same 384 * device multiple times. We can't reliably detect this 385 * until we try to add it and see we already have a 386 * label. 387 */ 388 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 389 "one or more vdevs refer to the same device"), 390 pool); 391 break; 392 393 case EOVERFLOW: 394 /* 395 * This occurrs when one of the devices is below 396 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 397 * device was the problem device since there's no 398 * reliable way to determine device size from userland. 399 */ 400 { 401 char buf[64]; 402 403 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 404 405 zfs_error(dgettext(TEXT_DOMAIN, "cannot " 406 "create '%s': one or more devices is less " 407 "than the minimum size (%s)"), pool, 408 buf); 409 } 410 break; 411 412 case ENAMETOOLONG: 413 /* 414 * One of the vdevs has exceeded VDEV_SPEC_MAX length in 415 * its plaintext representation. 416 */ 417 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 418 "too many devices in a single vdev"), pool); 419 break; 420 421 case EIO: 422 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 423 "I/O error on one or more devices"), pool); 424 break; 425 426 case ENXIO: 427 /* 428 * This is unlikely to happen since we've verified that 429 * all the devices can be opened from userland, but it's 430 * still possible in some circumstances. 431 */ 432 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 433 "one or more devices is unavailable"), pool); 434 break; 435 436 case ENOSPC: 437 /* 438 * This can occur if we were incapable of writing to a 439 * file vdev because the underlying filesystem is out of 440 * space. This is very similar to EOVERFLOW, but we'll 441 * produce a slightly different message. 442 */ 443 zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': " 444 "one or more devices is out of space"), pool); 445 break; 446 447 default: 448 zfs_baderror(errno); 449 } 450 451 return (-1); 452 } 453 454 free(packed); 455 456 /* 457 * If this is an alternate root pool, then we automatically set the 458 * moutnpoint of the root dataset to be '/'. 459 */ 460 if (altroot != NULL) { 461 zfs_handle_t *zhp; 462 463 verify((zhp = zfs_open(pool, ZFS_TYPE_ANY)) != NULL); 464 verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0); 465 466 zfs_close(zhp); 467 } 468 469 return (0); 470 } 471 472 /* 473 * Destroy the given pool. It is up to the caller to ensure that there are no 474 * datasets left in the pool. 475 */ 476 int 477 zpool_destroy(zpool_handle_t *zhp) 478 { 479 zfs_cmd_t zc = { 0 }; 480 zfs_handle_t *zfp = NULL; 481 482 if (zhp->zpool_state == POOL_STATE_ACTIVE && 483 (zfp = zfs_open(zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 484 return (-1); 485 486 if (zpool_remove_zvol_links(zhp) != NULL) 487 return (-1); 488 489 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 490 491 if (zfs_ioctl(ZFS_IOC_POOL_DESTROY, &zc) != 0) { 492 switch (errno) { 493 case EPERM: 494 zfs_error(dgettext(TEXT_DOMAIN, 495 "cannot destroy '%s': permission denied"), 496 zhp->zpool_name); 497 break; 498 499 case EBUSY: 500 zfs_error(dgettext(TEXT_DOMAIN, 501 "cannot destroy '%s': pool busy"), 502 zhp->zpool_name); 503 break; 504 505 case ENOENT: 506 zfs_error(dgettext(TEXT_DOMAIN, 507 "cannot destroy '%s': no such pool"), 508 zhp->zpool_name); 509 break; 510 511 case EROFS: 512 zfs_error(dgettext(TEXT_DOMAIN, 513 "cannot destroy '%s': one or more devices is " 514 "read only, or '/' is mounted read only"), 515 zhp->zpool_name); 516 break; 517 518 default: 519 zfs_baderror(errno); 520 } 521 522 if (zfp) 523 zfs_close(zfp); 524 return (-1); 525 } 526 527 if (zfp) { 528 remove_mountpoint(zfp); 529 zfs_close(zfp); 530 } 531 532 return (0); 533 } 534 535 /* 536 * Add the given vdevs to the pool. The caller must have already performed the 537 * necessary verification to ensure that the vdev specification is well-formed. 538 */ 539 int 540 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 541 { 542 char *packed; 543 size_t len; 544 zfs_cmd_t zc; 545 546 verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); 547 548 packed = zfs_malloc(len); 549 550 verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 551 552 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 553 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 554 zc.zc_config_src_size = len; 555 556 if (zfs_ioctl(ZFS_IOC_VDEV_ADD, &zc) != 0) { 557 switch (errno) { 558 case EPERM: 559 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 560 "permission denied"), zhp->zpool_name); 561 break; 562 563 case EBUSY: 564 /* 565 * This can happen if the user has specified the same 566 * device multiple times. We can't reliably detect this 567 * until we try to add it and see we already have a 568 * label. 569 */ 570 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 571 "one or more vdevs refer to the same device"), 572 zhp->zpool_name); 573 break; 574 575 case ENAMETOOLONG: 576 /* 577 * One of the vdevs has exceeded VDEV_SPEC_MAX length in 578 * its plaintext representation. 579 */ 580 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 581 "too many devices in a single vdev"), 582 zhp->zpool_name); 583 break; 584 585 case ENXIO: 586 /* 587 * This is unlikely to happen since we've verified that 588 * all the devices can be opened from userland, but it's 589 * still possible in some circumstances. 590 */ 591 zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': " 592 "one or more devices is unavailable"), 593 zhp->zpool_name); 594 break; 595 596 case EOVERFLOW: 597 /* 598 * This occurrs when one of the devices is below 599 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 600 * device was the problem device since there's no 601 * reliable way to determine device size from userland. 602 */ 603 { 604 char buf[64]; 605 606 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 607 608 zfs_error(dgettext(TEXT_DOMAIN, "cannot " 609 "add to '%s': one or more devices is less " 610 "than the minimum size (%s)"), 611 zhp->zpool_name, buf); 612 } 613 break; 614 615 default: 616 zfs_baderror(errno); 617 } 618 619 return (-1); 620 } 621 622 free(packed); 623 624 return (0); 625 } 626 627 /* 628 * Exports the pool from the system. The caller must ensure that there are no 629 * mounted datasets in the pool. 630 */ 631 int 632 zpool_export(zpool_handle_t *zhp) 633 { 634 zfs_cmd_t zc = { 0 }; 635 636 if (zpool_remove_zvol_links(zhp) != 0) 637 return (-1); 638 639 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 640 641 if (zfs_ioctl(ZFS_IOC_POOL_EXPORT, &zc) != 0) { 642 switch (errno) { 643 case EPERM: 644 zfs_error(dgettext(TEXT_DOMAIN, 645 "cannot export '%s': permission denied"), 646 zhp->zpool_name); 647 break; 648 649 case EBUSY: 650 zfs_error(dgettext(TEXT_DOMAIN, 651 "cannot export '%s': pool is in use"), 652 zhp->zpool_name); 653 break; 654 655 case ENOENT: 656 zfs_error(dgettext(TEXT_DOMAIN, 657 "cannot export '%s': no such pool"), 658 zhp->zpool_name); 659 break; 660 661 default: 662 zfs_baderror(errno); 663 } 664 665 return (-1); 666 } 667 668 return (0); 669 } 670 671 /* 672 * Import the given pool using the known configuration. The configuration 673 * should have come from zpool_find_import(). The 'newname' and 'altroot' 674 * parameters control whether the pool is imported with a different name or with 675 * an alternate root, respectively. 676 */ 677 int 678 zpool_import(nvlist_t *config, const char *newname, const char *altroot) 679 { 680 zfs_cmd_t zc; 681 char *packed; 682 size_t len; 683 char *thename; 684 char *origname; 685 int ret; 686 687 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 688 &origname) == 0); 689 690 if (newname != NULL) { 691 if (!zpool_name_valid(newname, NULL, 0)) { 692 zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': " 693 "invalid pool name"), newname); 694 return (-1); 695 } 696 thename = (char *)newname; 697 } else { 698 thename = origname; 699 } 700 701 if (altroot != NULL && altroot[0] != '/') { 702 zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': alternate " 703 "root '%s' must be a complete path"), thename, 704 altroot); 705 return (-1); 706 } 707 708 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 709 710 if (altroot != NULL) 711 (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); 712 else 713 zc.zc_root[0] = '\0'; 714 715 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 716 &zc.zc_guid) == 0); 717 718 verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0); 719 720 packed = zfs_malloc(len); 721 722 verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 723 724 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 725 zc.zc_config_src_size = len; 726 727 ret = 0; 728 if (zfs_ioctl(ZFS_IOC_POOL_IMPORT, &zc) != 0) { 729 char desc[1024]; 730 if (newname == NULL) 731 (void) snprintf(desc, sizeof (desc), 732 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 733 thename); 734 else 735 (void) snprintf(desc, sizeof (desc), 736 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 737 origname, thename); 738 739 switch (errno) { 740 case EEXIST: 741 /* 742 * A pool with that name already exists. 743 */ 744 zfs_error(dgettext(TEXT_DOMAIN, "%s: pool exists"), 745 desc); 746 break; 747 748 case EPERM: 749 /* 750 * The user doesn't have permission to create pools. 751 */ 752 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission " 753 "denied"), desc); 754 break; 755 756 case ENXIO: 757 case EDOM: 758 /* 759 * Device is unavailable, or vdev sum didn't match. 760 */ 761 zfs_error(dgettext(TEXT_DOMAIN, "%s: one or more " 762 "devices is unavailable"), 763 desc); 764 break; 765 766 case ENOTSUP: 767 /* 768 * Unsupported version. 769 */ 770 zfs_error(dgettext(TEXT_DOMAIN, 771 "%s: unsupported version"), desc); 772 break; 773 774 default: 775 zfs_baderror(errno); 776 } 777 778 ret = -1; 779 } else { 780 zpool_handle_t *zhp; 781 /* 782 * This should never fail, but play it safe anyway. 783 */ 784 if ((zhp = zpool_open_silent(thename)) != NULL) { 785 ret = zpool_create_zvol_links(zhp); 786 zpool_close(zhp); 787 } 788 } 789 790 free(packed); 791 return (ret); 792 } 793 794 /* 795 * Scrub the pool. 796 */ 797 int 798 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 799 { 800 zfs_cmd_t zc = { 0 }; 801 char msg[1024]; 802 803 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 804 zc.zc_cookie = type; 805 806 if (zfs_ioctl(ZFS_IOC_POOL_SCRUB, &zc) == 0) 807 return (0); 808 809 (void) snprintf(msg, sizeof (msg), 810 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 811 812 switch (errno) { 813 case EPERM: 814 /* 815 * No permission to scrub this pool. 816 */ 817 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 818 break; 819 820 case EBUSY: 821 /* 822 * Resilver in progress. 823 */ 824 zfs_error(dgettext(TEXT_DOMAIN, "%s: currently resilvering"), 825 msg); 826 break; 827 828 default: 829 zfs_baderror(errno); 830 } 831 return (-1); 832 } 833 834 static uint64_t 835 vdev_to_guid(nvlist_t *nv, const char *search, uint64_t guid) 836 { 837 uint_t c, children; 838 nvlist_t **child; 839 uint64_t ret, present; 840 char *path; 841 uint64_t wholedisk = 0; 842 843 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &ret) == 0); 844 845 if (search == NULL && 846 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) { 847 /* 848 * If the device has never been present since import, the only 849 * reliable way to match the vdev is by GUID. 850 */ 851 if (ret == guid) 852 return (ret); 853 } else if (search != NULL && 854 nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 855 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 856 &wholedisk); 857 if (wholedisk) { 858 /* 859 * For whole disks, the internal path has 's0', but the 860 * path passed in by the user doesn't. 861 */ 862 if (strlen(search) == strlen(path) - 2 && 863 strncmp(search, path, strlen(search)) == 0) 864 return (ret); 865 } else if (strcmp(search, path) == 0) { 866 return (ret); 867 } 868 } 869 870 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 871 &child, &children) != 0) 872 return (0); 873 874 for (c = 0; c < children; c++) 875 if ((ret = vdev_to_guid(child[c], search, guid)) != 0) 876 return (ret); 877 878 return (0); 879 } 880 881 /* 882 * Given a string describing a vdev, returns the matching GUID, or 0 if none. 883 */ 884 uint64_t 885 zpool_vdev_to_guid(zpool_handle_t *zhp, const char *path) 886 { 887 char buf[MAXPATHLEN]; 888 const char *search; 889 char *end; 890 nvlist_t *nvroot; 891 uint64_t guid; 892 893 guid = strtoull(path, &end, 10); 894 if (guid != 0 && *end == '\0') { 895 search = NULL; 896 } else if (path[0] != '/') { 897 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 898 search = buf; 899 } else { 900 search = path; 901 } 902 903 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 904 &nvroot) == 0); 905 906 return (vdev_to_guid(nvroot, search, guid)); 907 } 908 909 /* 910 * Bring the specified vdev online 911 */ 912 int 913 zpool_vdev_online(zpool_handle_t *zhp, const char *path) 914 { 915 zfs_cmd_t zc = { 0 }; 916 char msg[1024]; 917 918 (void) snprintf(msg, sizeof (msg), 919 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 920 921 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 922 if ((zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) { 923 zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"), 924 msg); 925 return (-1); 926 } 927 928 if (zfs_ioctl(ZFS_IOC_VDEV_ONLINE, &zc) == 0) 929 return (0); 930 931 switch (errno) { 932 case ENODEV: 933 /* 934 * Device doesn't exist 935 */ 936 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 937 break; 938 939 case EPERM: 940 /* 941 * No permission to bring this vdev online. 942 */ 943 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 944 break; 945 946 default: 947 zfs_baderror(errno); 948 } 949 return (-1); 950 } 951 952 /* 953 * Take the specified vdev offline 954 */ 955 int 956 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp) 957 { 958 zfs_cmd_t zc = { 0 }; 959 char msg[1024]; 960 961 (void) snprintf(msg, sizeof (msg), 962 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 963 964 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 965 if ((zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) { 966 zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"), 967 msg); 968 return (-1); 969 } 970 971 zc.zc_cookie = istmp; 972 973 if (zfs_ioctl(ZFS_IOC_VDEV_OFFLINE, &zc) == 0) 974 return (0); 975 976 switch (errno) { 977 case ENODEV: 978 /* 979 * Device doesn't exist 980 */ 981 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 982 break; 983 984 case EPERM: 985 /* 986 * No permission to take this vdev offline. 987 */ 988 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 989 break; 990 991 case EBUSY: 992 /* 993 * There are no other replicas of this device. 994 */ 995 zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg); 996 break; 997 998 default: 999 zfs_baderror(errno); 1000 } 1001 return (-1); 1002 } 1003 1004 /* 1005 * Attach new_disk (fully described by nvroot) to old_disk. 1006 * If 'replacing' is specified, tne new disk will replace the old one. 1007 */ 1008 int 1009 zpool_vdev_attach(zpool_handle_t *zhp, 1010 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 1011 { 1012 zfs_cmd_t zc = { 0 }; 1013 char msg[1024]; 1014 char *packed; 1015 int ret; 1016 size_t len; 1017 1018 if (replacing) 1019 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1020 "cannot replace %s with %s"), old_disk, new_disk); 1021 else 1022 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1023 "cannot attach %s to %s"), new_disk, old_disk); 1024 1025 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1026 if ((zc.zc_guid = zpool_vdev_to_guid(zhp, old_disk)) == 0) { 1027 zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"), 1028 msg); 1029 return (-1); 1030 } 1031 zc.zc_cookie = replacing; 1032 1033 verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); 1034 1035 packed = zfs_malloc(len); 1036 1037 verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); 1038 1039 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 1040 zc.zc_config_src_size = len; 1041 1042 ret = zfs_ioctl(ZFS_IOC_VDEV_ATTACH, &zc); 1043 1044 free(packed); 1045 1046 if (ret == 0) 1047 return (0); 1048 1049 switch (errno) { 1050 case EPERM: 1051 /* 1052 * No permission to mess with the config. 1053 */ 1054 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 1055 break; 1056 1057 case ENODEV: 1058 /* 1059 * Device doesn't exist. 1060 */ 1061 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s not in pool"), 1062 msg, old_disk); 1063 break; 1064 1065 case ENOTSUP: 1066 /* 1067 * Can't attach to or replace this type of vdev. 1068 */ 1069 if (replacing) 1070 zfs_error(dgettext(TEXT_DOMAIN, 1071 "%s: cannot replace a replacing device"), msg); 1072 else 1073 zfs_error(dgettext(TEXT_DOMAIN, 1074 "%s: attach is only applicable to mirrors"), msg); 1075 break; 1076 1077 case EINVAL: 1078 /* 1079 * The new device must be a single disk. 1080 */ 1081 zfs_error(dgettext(TEXT_DOMAIN, 1082 "%s: <new_device> must be a single disk"), msg); 1083 break; 1084 1085 case ENXIO: 1086 /* 1087 * This is unlikely to happen since we've verified that 1088 * all the devices can be opened from userland, but it's 1089 * still possible in some circumstances. 1090 */ 1091 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is unavailable"), 1092 msg, new_disk); 1093 break; 1094 1095 case EBUSY: 1096 /* 1097 * The new device is is use. 1098 */ 1099 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s busy"), msg, new_disk); 1100 break; 1101 1102 case EOVERFLOW: 1103 /* 1104 * The new device is too small. 1105 */ 1106 zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is too small"), 1107 msg, new_disk); 1108 break; 1109 1110 case EDOM: 1111 /* 1112 * The new device has a different alignment requirement. 1113 */ 1114 zfs_error(dgettext(TEXT_DOMAIN, 1115 "%s: devices have different sector alignment"), msg); 1116 break; 1117 1118 case ENAMETOOLONG: 1119 /* 1120 * The resulting top-level vdev spec won't fit in the label. 1121 */ 1122 zfs_error(dgettext(TEXT_DOMAIN, 1123 "%s: too many devices in a single vdev"), msg); 1124 break; 1125 1126 default: 1127 zfs_baderror(errno); 1128 } 1129 1130 return (1); 1131 } 1132 1133 /* 1134 * Detach the specified device. 1135 */ 1136 int 1137 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 1138 { 1139 zfs_cmd_t zc = { 0 }; 1140 char msg[1024]; 1141 1142 (void) snprintf(msg, sizeof (msg), 1143 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 1144 1145 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1146 if ((zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) { 1147 zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"), 1148 msg); 1149 return (-1); 1150 } 1151 1152 if (zfs_ioctl(ZFS_IOC_VDEV_DETACH, &zc) == 0) 1153 return (0); 1154 1155 switch (errno) { 1156 case EPERM: 1157 /* 1158 * No permission to mess with the config. 1159 */ 1160 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 1161 break; 1162 1163 case ENODEV: 1164 /* 1165 * Device doesn't exist. 1166 */ 1167 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 1168 break; 1169 1170 case ENOTSUP: 1171 /* 1172 * Can't detach from this type of vdev. 1173 */ 1174 zfs_error(dgettext(TEXT_DOMAIN, 1175 "%s: only applicable to mirror and replacing vdevs"), msg); 1176 break; 1177 1178 case EBUSY: 1179 /* 1180 * There are no other replicas of this device. 1181 */ 1182 zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg); 1183 break; 1184 1185 default: 1186 zfs_baderror(errno); 1187 } 1188 1189 return (1); 1190 } 1191 1192 /* 1193 * Clear the errors for the pool, or the particular device if specified. 1194 */ 1195 int 1196 zpool_clear(zpool_handle_t *zhp, const char *path) 1197 { 1198 zfs_cmd_t zc = { 0 }; 1199 char msg[1024]; 1200 1201 if (path) 1202 (void) snprintf(msg, sizeof (msg), 1203 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 1204 zc.zc_prop_value); 1205 else 1206 (void) snprintf(msg, sizeof (msg), 1207 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 1208 zhp->zpool_name); 1209 1210 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1211 if (path && (zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) { 1212 zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"), 1213 msg); 1214 return (-1); 1215 } 1216 1217 if (zfs_ioctl(ZFS_IOC_CLEAR, &zc) == 0) 1218 return (0); 1219 1220 switch (errno) { 1221 case EPERM: 1222 /* 1223 * No permission to mess with the config. 1224 */ 1225 zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg); 1226 break; 1227 1228 case ENODEV: 1229 /* 1230 * Device doesn't exist. 1231 */ 1232 zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg); 1233 break; 1234 1235 default: 1236 zfs_baderror(errno); 1237 } 1238 1239 return (1); 1240 } 1241 1242 static int 1243 do_zvol(zfs_handle_t *zhp, void *data) 1244 { 1245 int linktype = (int)(uintptr_t)data; 1246 int ret; 1247 1248 /* 1249 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we 1250 * correctly handle snapshots of volumes. 1251 */ 1252 if (zhp->zfs_volblocksize != 0) { 1253 if (linktype) 1254 ret = zvol_create_link(zhp->zfs_name); 1255 else 1256 ret = zvol_remove_link(zhp->zfs_name); 1257 } 1258 1259 ret = zfs_iter_children(zhp, do_zvol, data); 1260 1261 zfs_close(zhp); 1262 return (ret); 1263 } 1264 1265 /* 1266 * Iterate over all zvols in the pool and make any necessary minor nodes. 1267 */ 1268 int 1269 zpool_create_zvol_links(zpool_handle_t *zhp) 1270 { 1271 zfs_handle_t *zfp; 1272 int ret; 1273 1274 /* 1275 * If the pool is unavailable, just return success. 1276 */ 1277 if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL) 1278 return (0); 1279 1280 ret = zfs_iter_children(zfp, do_zvol, (void *)TRUE); 1281 1282 zfs_close(zfp); 1283 return (ret); 1284 } 1285 1286 /* 1287 * Iterate over all zvols in the poool and remove any minor nodes. 1288 */ 1289 int 1290 zpool_remove_zvol_links(zpool_handle_t *zhp) 1291 { 1292 zfs_handle_t *zfp; 1293 int ret; 1294 1295 /* 1296 * If the pool is unavailable, just return success. 1297 */ 1298 if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL) 1299 return (0); 1300 1301 ret = zfs_iter_children(zfp, do_zvol, (void *)FALSE); 1302 1303 zfs_close(zfp); 1304 return (ret); 1305 } 1306 1307 /* 1308 * Convert from a devid string to a path. 1309 */ 1310 static char * 1311 devid_to_path(char *devid_str) 1312 { 1313 ddi_devid_t devid; 1314 char *minor; 1315 char *path; 1316 devid_nmlist_t *list = NULL; 1317 int ret; 1318 1319 if (devid_str_decode(devid_str, &devid, &minor) != 0) 1320 return (NULL); 1321 1322 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 1323 1324 devid_str_free(minor); 1325 devid_free(devid); 1326 1327 if (ret != 0) 1328 return (NULL); 1329 1330 path = zfs_strdup(list[0].devname); 1331 devid_free_nmlist(list); 1332 1333 return (path); 1334 } 1335 1336 /* 1337 * Convert from a path to a devid string. 1338 */ 1339 static char * 1340 path_to_devid(const char *path) 1341 { 1342 int fd; 1343 ddi_devid_t devid; 1344 char *minor, *ret; 1345 1346 if ((fd = open(path, O_RDONLY)) < 0) 1347 return (NULL); 1348 1349 minor = NULL; 1350 ret = NULL; 1351 if (devid_get(fd, &devid) == 0) { 1352 if (devid_get_minor_name(fd, &minor) == 0) 1353 ret = devid_str_encode(devid, minor); 1354 if (minor != NULL) 1355 devid_str_free(minor); 1356 devid_free(devid); 1357 } 1358 (void) close(fd); 1359 1360 return (ret); 1361 } 1362 1363 /* 1364 * Issue the necessary ioctl() to update the stored path value for the vdev. We 1365 * ignore any failure here, since a common case is for an unprivileged user to 1366 * type 'zpool status', and we'll display the correct information anyway. 1367 */ 1368 static void 1369 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 1370 { 1371 zfs_cmd_t zc = { 0 }; 1372 1373 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1374 (void) strncpy(zc.zc_prop_value, path, sizeof (zc.zc_prop_value)); 1375 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1376 &zc.zc_guid) == 0); 1377 1378 (void) zfs_ioctl(ZFS_IOC_VDEV_SETPATH, &zc); 1379 } 1380 1381 /* 1382 * Given a vdev, return the name to display in iostat. If the vdev has a path, 1383 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 1384 * We also check if this is a whole disk, in which case we strip off the 1385 * trailing 's0' slice name. 1386 * 1387 * This routine is also responsible for identifying when disks have been 1388 * reconfigured in a new location. The kernel will have opened the device by 1389 * devid, but the path will still refer to the old location. To catch this, we 1390 * first do a path -> devid translation (which is fast for the common case). If 1391 * the devid matches, we're done. If not, we do a reverse devid -> path 1392 * translation and issue the appropriate ioctl() to update the path of the vdev. 1393 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 1394 * of these checks. 1395 */ 1396 char * 1397 zpool_vdev_name(zpool_handle_t *zhp, nvlist_t *nv) 1398 { 1399 char *path, *devid; 1400 uint64_t value; 1401 char buf[64]; 1402 1403 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1404 &value) == 0) { 1405 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1406 &value) == 0); 1407 (void) snprintf(buf, sizeof (buf), "%llu", value); 1408 path = buf; 1409 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 1410 1411 if (zhp != NULL && 1412 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 1413 /* 1414 * Determine if the current path is correct. 1415 */ 1416 char *newdevid = path_to_devid(path); 1417 1418 if (newdevid == NULL || 1419 strcmp(devid, newdevid) != 0) { 1420 char *newpath; 1421 1422 if ((newpath = devid_to_path(devid)) != NULL) { 1423 /* 1424 * Update the path appropriately. 1425 */ 1426 set_path(zhp, nv, newpath); 1427 verify(nvlist_add_string(nv, 1428 ZPOOL_CONFIG_PATH, newpath) == 0); 1429 free(newpath); 1430 verify(nvlist_lookup_string(nv, 1431 ZPOOL_CONFIG_PATH, &path) == 0); 1432 } 1433 1434 if (newdevid) 1435 devid_str_free(newdevid); 1436 } 1437 1438 } 1439 1440 if (strncmp(path, "/dev/dsk/", 9) == 0) 1441 path += 9; 1442 1443 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1444 &value) == 0 && value) { 1445 char *tmp = zfs_strdup(path); 1446 tmp[strlen(path) - 2] = '\0'; 1447 return (tmp); 1448 } 1449 } else { 1450 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 1451 } 1452 1453 return (zfs_strdup(path)); 1454 } 1455 1456 static int 1457 zbookmark_compare(const void *a, const void *b) 1458 { 1459 return (memcmp(a, b, sizeof (zbookmark_t))); 1460 } 1461 1462 /* 1463 * Retrieve the persistent error log, uniquify the members, and return to the 1464 * caller. 1465 */ 1466 int 1467 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem) 1468 { 1469 zfs_cmd_t zc = { 0 }; 1470 uint64_t count; 1471 zbookmark_t *zb; 1472 int i, j; 1473 1474 if (zhp->zpool_error_log != NULL) { 1475 *list = zhp->zpool_error_log; 1476 *nelem = zhp->zpool_error_count; 1477 return (0); 1478 } 1479 1480 /* 1481 * Retrieve the raw error list from the kernel. If the number of errors 1482 * has increased, allocate more space and continue until we get the 1483 * entire list. 1484 */ 1485 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 1486 &count) == 0); 1487 zc.zc_config_dst = (uintptr_t)zfs_malloc(count * sizeof (zbookmark_t)); 1488 zc.zc_config_dst_size = count; 1489 (void) strcpy(zc.zc_name, zhp->zpool_name); 1490 for (;;) { 1491 if (zfs_ioctl(ZFS_IOC_ERROR_LOG, &zc) != 0) { 1492 if (errno == ENOMEM) { 1493 free((void *)(uintptr_t)zc.zc_config_dst); 1494 zc.zc_config_dst = (uintptr_t) 1495 zfs_malloc(zc.zc_config_dst_size); 1496 } else { 1497 return (-1); 1498 } 1499 } else { 1500 break; 1501 } 1502 } 1503 1504 /* 1505 * Sort the resulting bookmarks. This is a little confusing due to the 1506 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 1507 * to first, and 'zc_config_dst_size' indicates the number of boomarks 1508 * _not_ copied as part of the process. So we point the start of our 1509 * array appropriate and decrement the total number of elements. 1510 */ 1511 zb = ((zbookmark_t *)(uintptr_t)zc.zc_config_dst) + 1512 zc.zc_config_dst_size; 1513 count -= zc.zc_config_dst_size; 1514 1515 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 1516 1517 /* 1518 * Count the number of unique elements 1519 */ 1520 j = 0; 1521 for (i = 0; i < count; i++) { 1522 if (i > 0 && memcmp(&zb[i - 1], &zb[i], 1523 sizeof (zbookmark_t)) == 0) 1524 continue; 1525 j++; 1526 } 1527 1528 /* 1529 * If the user has only requested the number of items, return it now 1530 * without bothering with the extra work. 1531 */ 1532 if (list == NULL) { 1533 *nelem = j; 1534 return (0); 1535 } 1536 1537 zhp->zpool_error_count = j; 1538 1539 /* 1540 * Allocate an array of nvlists to hold the results 1541 */ 1542 zhp->zpool_error_log = zfs_malloc(j * sizeof (nvlist_t *)); 1543 1544 /* 1545 * Fill in the results with names from the kernel. 1546 */ 1547 j = 0; 1548 for (i = 0; i < count; i++) { 1549 char buf[64]; 1550 nvlist_t *nv; 1551 1552 if (i > 0 && memcmp(&zb[i - 1], &zb[i], 1553 sizeof (zbookmark_t)) == 0) 1554 continue; 1555 1556 verify(nvlist_alloc(&nv, NV_UNIQUE_NAME, 1557 0) == 0); 1558 zhp->zpool_error_log[j] = nv; 1559 1560 zc.zc_bookmark = zb[i]; 1561 if (zfs_ioctl(ZFS_IOC_BOOKMARK_NAME, &zc) == 0) { 1562 verify(nvlist_add_string(nv, ZPOOL_ERR_DATASET, 1563 zc.zc_prop_name) == 0); 1564 verify(nvlist_add_string(nv, ZPOOL_ERR_OBJECT, 1565 zc.zc_prop_value) == 0); 1566 verify(nvlist_add_string(nv, ZPOOL_ERR_RANGE, 1567 zc.zc_filename) == 0); 1568 } else { 1569 (void) snprintf(buf, sizeof (buf), "%llx", 1570 zb[i].zb_objset); 1571 verify(nvlist_add_string(nv, 1572 ZPOOL_ERR_DATASET, buf) == 0); 1573 (void) snprintf(buf, sizeof (buf), "%llx", 1574 zb[i].zb_object); 1575 verify(nvlist_add_string(nv, ZPOOL_ERR_OBJECT, 1576 buf) == 0); 1577 (void) snprintf(buf, sizeof (buf), "lvl=%u blkid=%llu", 1578 (int)zb[i].zb_level, (long long)zb[i].zb_blkid); 1579 verify(nvlist_add_string(nv, ZPOOL_ERR_RANGE, 1580 buf) == 0); 1581 } 1582 1583 j++; 1584 } 1585 1586 *list = zhp->zpool_error_log; 1587 *nelem = zhp->zpool_error_count; 1588 1589 free((void *)(uintptr_t)zc.zc_config_dst); 1590 1591 return (0); 1592 } 1593