1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <ctype.h> 28 #include <errno.h> 29 #include <devid.h> 30 #include <fcntl.h> 31 #include <libintl.h> 32 #include <stdio.h> 33 #include <stdlib.h> 34 #include <strings.h> 35 #include <unistd.h> 36 #include <sys/efi_partition.h> 37 #include <sys/vtoc.h> 38 #include <sys/zfs_ioctl.h> 39 #include <dlfcn.h> 40 41 #include "zfs_namecheck.h" 42 #include "zfs_prop.h" 43 #include "libzfs_impl.h" 44 45 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 46 47 #if defined(__i386) || defined(__amd64) 48 #define BOOTCMD "installgrub(1M)" 49 #else 50 #define BOOTCMD "installboot(1M)" 51 #endif 52 53 #define DISK_ROOT "/dev/dsk" 54 #define RDISK_ROOT "/dev/rdsk" 55 #define BACKUP_SLICE "s2" 56 57 /* 58 * ==================================================================== 59 * zpool property functions 60 * ==================================================================== 61 */ 62 63 static int 64 zpool_get_all_props(zpool_handle_t *zhp) 65 { 66 zfs_cmd_t zc = { 0 }; 67 libzfs_handle_t *hdl = zhp->zpool_hdl; 68 69 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 70 71 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 72 return (-1); 73 74 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 75 if (errno == ENOMEM) { 76 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 77 zcmd_free_nvlists(&zc); 78 return (-1); 79 } 80 } else { 81 zcmd_free_nvlists(&zc); 82 return (-1); 83 } 84 } 85 86 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 87 zcmd_free_nvlists(&zc); 88 return (-1); 89 } 90 91 zcmd_free_nvlists(&zc); 92 93 return (0); 94 } 95 96 static int 97 zpool_props_refresh(zpool_handle_t *zhp) 98 { 99 nvlist_t *old_props; 100 101 old_props = zhp->zpool_props; 102 103 if (zpool_get_all_props(zhp) != 0) 104 return (-1); 105 106 nvlist_free(old_props); 107 return (0); 108 } 109 110 static char * 111 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 112 zprop_source_t *src) 113 { 114 nvlist_t *nv, *nvl; 115 uint64_t ival; 116 char *value; 117 zprop_source_t source; 118 119 nvl = zhp->zpool_props; 120 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 121 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 122 source = ival; 123 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 124 } else { 125 source = ZPROP_SRC_DEFAULT; 126 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 127 value = "-"; 128 } 129 130 if (src) 131 *src = source; 132 133 return (value); 134 } 135 136 uint64_t 137 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 138 { 139 nvlist_t *nv, *nvl; 140 uint64_t value; 141 zprop_source_t source; 142 143 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 144 /* 145 * zpool_get_all_props() has most likely failed because 146 * the pool is faulted, but if all we need is the top level 147 * vdev's guid then get it from the zhp config nvlist. 148 */ 149 if ((prop == ZPOOL_PROP_GUID) && 150 (nvlist_lookup_nvlist(zhp->zpool_config, 151 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 152 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 153 == 0)) { 154 return (value); 155 } 156 return (zpool_prop_default_numeric(prop)); 157 } 158 159 nvl = zhp->zpool_props; 160 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 161 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 162 source = value; 163 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 164 } else { 165 source = ZPROP_SRC_DEFAULT; 166 value = zpool_prop_default_numeric(prop); 167 } 168 169 if (src) 170 *src = source; 171 172 return (value); 173 } 174 175 /* 176 * Map VDEV STATE to printed strings. 177 */ 178 char * 179 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 180 { 181 switch (state) { 182 case VDEV_STATE_CLOSED: 183 case VDEV_STATE_OFFLINE: 184 return (gettext("OFFLINE")); 185 case VDEV_STATE_REMOVED: 186 return (gettext("REMOVED")); 187 case VDEV_STATE_CANT_OPEN: 188 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 189 return (gettext("FAULTED")); 190 else 191 return (gettext("UNAVAIL")); 192 case VDEV_STATE_FAULTED: 193 return (gettext("FAULTED")); 194 case VDEV_STATE_DEGRADED: 195 return (gettext("DEGRADED")); 196 case VDEV_STATE_HEALTHY: 197 return (gettext("ONLINE")); 198 } 199 200 return (gettext("UNKNOWN")); 201 } 202 203 /* 204 * Get a zpool property value for 'prop' and return the value in 205 * a pre-allocated buffer. 206 */ 207 int 208 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 209 zprop_source_t *srctype) 210 { 211 uint64_t intval; 212 const char *strval; 213 zprop_source_t src = ZPROP_SRC_NONE; 214 nvlist_t *nvroot; 215 vdev_stat_t *vs; 216 uint_t vsc; 217 218 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 219 switch (prop) { 220 case ZPOOL_PROP_NAME: 221 (void) strlcpy(buf, zpool_get_name(zhp), len); 222 break; 223 224 case ZPOOL_PROP_HEALTH: 225 (void) strlcpy(buf, "FAULTED", len); 226 break; 227 228 case ZPOOL_PROP_GUID: 229 intval = zpool_get_prop_int(zhp, prop, &src); 230 (void) snprintf(buf, len, "%llu", intval); 231 break; 232 233 case ZPOOL_PROP_ALTROOT: 234 case ZPOOL_PROP_CACHEFILE: 235 if (zhp->zpool_props != NULL || 236 zpool_get_all_props(zhp) == 0) { 237 (void) strlcpy(buf, 238 zpool_get_prop_string(zhp, prop, &src), 239 len); 240 if (srctype != NULL) 241 *srctype = src; 242 return (0); 243 } 244 /* FALLTHROUGH */ 245 default: 246 (void) strlcpy(buf, "-", len); 247 break; 248 } 249 250 if (srctype != NULL) 251 *srctype = src; 252 return (0); 253 } 254 255 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 256 prop != ZPOOL_PROP_NAME) 257 return (-1); 258 259 switch (zpool_prop_get_type(prop)) { 260 case PROP_TYPE_STRING: 261 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 262 len); 263 break; 264 265 case PROP_TYPE_NUMBER: 266 intval = zpool_get_prop_int(zhp, prop, &src); 267 268 switch (prop) { 269 case ZPOOL_PROP_SIZE: 270 case ZPOOL_PROP_USED: 271 case ZPOOL_PROP_AVAILABLE: 272 (void) zfs_nicenum(intval, buf, len); 273 break; 274 275 case ZPOOL_PROP_CAPACITY: 276 (void) snprintf(buf, len, "%llu%%", 277 (u_longlong_t)intval); 278 break; 279 280 case ZPOOL_PROP_HEALTH: 281 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 282 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 283 verify(nvlist_lookup_uint64_array(nvroot, 284 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 285 286 (void) strlcpy(buf, zpool_state_to_name(intval, 287 vs->vs_aux), len); 288 break; 289 default: 290 (void) snprintf(buf, len, "%llu", intval); 291 } 292 break; 293 294 case PROP_TYPE_INDEX: 295 intval = zpool_get_prop_int(zhp, prop, &src); 296 if (zpool_prop_index_to_string(prop, intval, &strval) 297 != 0) 298 return (-1); 299 (void) strlcpy(buf, strval, len); 300 break; 301 302 default: 303 abort(); 304 } 305 306 if (srctype) 307 *srctype = src; 308 309 return (0); 310 } 311 312 /* 313 * Check if the bootfs name has the same pool name as it is set to. 314 * Assuming bootfs is a valid dataset name. 315 */ 316 static boolean_t 317 bootfs_name_valid(const char *pool, char *bootfs) 318 { 319 int len = strlen(pool); 320 321 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 322 return (B_FALSE); 323 324 if (strncmp(pool, bootfs, len) == 0 && 325 (bootfs[len] == '/' || bootfs[len] == '\0')) 326 return (B_TRUE); 327 328 return (B_FALSE); 329 } 330 331 /* 332 * Inspect the configuration to determine if any of the devices contain 333 * an EFI label. 334 */ 335 static boolean_t 336 pool_uses_efi(nvlist_t *config) 337 { 338 nvlist_t **child; 339 uint_t c, children; 340 341 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 342 &child, &children) != 0) 343 return (read_efi_label(config, NULL) >= 0); 344 345 for (c = 0; c < children; c++) { 346 if (pool_uses_efi(child[c])) 347 return (B_TRUE); 348 } 349 return (B_FALSE); 350 } 351 352 static boolean_t 353 pool_is_bootable(zpool_handle_t *zhp) 354 { 355 char bootfs[ZPOOL_MAXNAMELEN]; 356 357 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 358 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", 359 sizeof (bootfs)) != 0); 360 } 361 362 363 /* 364 * Given an nvlist of zpool properties to be set, validate that they are 365 * correct, and parse any numeric properties (index, boolean, etc) if they are 366 * specified as strings. 367 */ 368 static nvlist_t * 369 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 370 nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf) 371 { 372 nvpair_t *elem; 373 nvlist_t *retprops; 374 zpool_prop_t prop; 375 char *strval; 376 uint64_t intval; 377 char *slash; 378 struct stat64 statbuf; 379 zpool_handle_t *zhp; 380 nvlist_t *nvroot; 381 382 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 383 (void) no_memory(hdl); 384 return (NULL); 385 } 386 387 elem = NULL; 388 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 389 const char *propname = nvpair_name(elem); 390 391 /* 392 * Make sure this property is valid and applies to this type. 393 */ 394 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { 395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 396 "invalid property '%s'"), propname); 397 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 398 goto error; 399 } 400 401 if (zpool_prop_readonly(prop)) { 402 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 403 "is readonly"), propname); 404 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 405 goto error; 406 } 407 408 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 409 &strval, &intval, errbuf) != 0) 410 goto error; 411 412 /* 413 * Perform additional checking for specific properties. 414 */ 415 switch (prop) { 416 case ZPOOL_PROP_VERSION: 417 if (intval < version || intval > SPA_VERSION) { 418 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 419 "property '%s' number %d is invalid."), 420 propname, intval); 421 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 422 goto error; 423 } 424 break; 425 426 case ZPOOL_PROP_BOOTFS: 427 if (create_or_import) { 428 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 429 "property '%s' cannot be set at creation " 430 "or import time"), propname); 431 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 432 goto error; 433 } 434 435 if (version < SPA_VERSION_BOOTFS) { 436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 437 "pool must be upgraded to support " 438 "'%s' property"), propname); 439 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 440 goto error; 441 } 442 443 /* 444 * bootfs property value has to be a dataset name and 445 * the dataset has to be in the same pool as it sets to. 446 */ 447 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 448 strval)) { 449 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 450 "is an invalid name"), strval); 451 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 452 goto error; 453 } 454 455 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 456 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 457 "could not open pool '%s'"), poolname); 458 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 459 goto error; 460 } 461 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 462 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 463 464 /* 465 * bootfs property cannot be set on a disk which has 466 * been EFI labeled. 467 */ 468 if (pool_uses_efi(nvroot)) { 469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 470 "property '%s' not supported on " 471 "EFI labeled devices"), propname); 472 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 473 zpool_close(zhp); 474 goto error; 475 } 476 zpool_close(zhp); 477 break; 478 479 case ZPOOL_PROP_ALTROOT: 480 if (!create_or_import) { 481 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 482 "property '%s' can only be set during pool " 483 "creation or import"), propname); 484 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 485 goto error; 486 } 487 488 if (strval[0] != '/') { 489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 490 "bad alternate root '%s'"), strval); 491 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 492 goto error; 493 } 494 break; 495 496 case ZPOOL_PROP_CACHEFILE: 497 if (strval[0] == '\0') 498 break; 499 500 if (strcmp(strval, "none") == 0) 501 break; 502 503 if (strval[0] != '/') { 504 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 505 "property '%s' must be empty, an " 506 "absolute path, or 'none'"), propname); 507 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 508 goto error; 509 } 510 511 slash = strrchr(strval, '/'); 512 513 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 514 strcmp(slash, "/..") == 0) { 515 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 516 "'%s' is not a valid file"), strval); 517 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 518 goto error; 519 } 520 521 *slash = '\0'; 522 523 if (strval[0] != '\0' && 524 (stat64(strval, &statbuf) != 0 || 525 !S_ISDIR(statbuf.st_mode))) { 526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 527 "'%s' is not a valid directory"), 528 strval); 529 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 530 goto error; 531 } 532 533 *slash = '/'; 534 break; 535 } 536 } 537 538 return (retprops); 539 error: 540 nvlist_free(retprops); 541 return (NULL); 542 } 543 544 /* 545 * Set zpool property : propname=propval. 546 */ 547 int 548 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 549 { 550 zfs_cmd_t zc = { 0 }; 551 int ret = -1; 552 char errbuf[1024]; 553 nvlist_t *nvl = NULL; 554 nvlist_t *realprops; 555 uint64_t version; 556 557 (void) snprintf(errbuf, sizeof (errbuf), 558 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 559 zhp->zpool_name); 560 561 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 562 return (no_memory(zhp->zpool_hdl)); 563 564 if (nvlist_add_string(nvl, propname, propval) != 0) { 565 nvlist_free(nvl); 566 return (no_memory(zhp->zpool_hdl)); 567 } 568 569 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 570 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 571 zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) { 572 nvlist_free(nvl); 573 return (-1); 574 } 575 576 nvlist_free(nvl); 577 nvl = realprops; 578 579 /* 580 * Execute the corresponding ioctl() to set this property. 581 */ 582 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 583 584 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 585 nvlist_free(nvl); 586 return (-1); 587 } 588 589 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 590 591 zcmd_free_nvlists(&zc); 592 nvlist_free(nvl); 593 594 if (ret) 595 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 596 else 597 (void) zpool_props_refresh(zhp); 598 599 return (ret); 600 } 601 602 int 603 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 604 { 605 libzfs_handle_t *hdl = zhp->zpool_hdl; 606 zprop_list_t *entry; 607 char buf[ZFS_MAXPROPLEN]; 608 609 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 610 return (-1); 611 612 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 613 614 if (entry->pl_fixed) 615 continue; 616 617 if (entry->pl_prop != ZPROP_INVAL && 618 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 619 NULL) == 0) { 620 if (strlen(buf) > entry->pl_width) 621 entry->pl_width = strlen(buf); 622 } 623 } 624 625 return (0); 626 } 627 628 629 /* 630 * Don't start the slice at the default block of 34; many storage 631 * devices will use a stripe width of 128k, so start there instead. 632 */ 633 #define NEW_START_BLOCK 256 634 635 /* 636 * Validate the given pool name, optionally putting an extended error message in 637 * 'buf'. 638 */ 639 boolean_t 640 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 641 { 642 namecheck_err_t why; 643 char what; 644 int ret; 645 646 ret = pool_namecheck(pool, &why, &what); 647 648 /* 649 * The rules for reserved pool names were extended at a later point. 650 * But we need to support users with existing pools that may now be 651 * invalid. So we only check for this expanded set of names during a 652 * create (or import), and only in userland. 653 */ 654 if (ret == 0 && !isopen && 655 (strncmp(pool, "mirror", 6) == 0 || 656 strncmp(pool, "raidz", 5) == 0 || 657 strncmp(pool, "spare", 5) == 0 || 658 strcmp(pool, "log") == 0)) { 659 if (hdl != NULL) 660 zfs_error_aux(hdl, 661 dgettext(TEXT_DOMAIN, "name is reserved")); 662 return (B_FALSE); 663 } 664 665 666 if (ret != 0) { 667 if (hdl != NULL) { 668 switch (why) { 669 case NAME_ERR_TOOLONG: 670 zfs_error_aux(hdl, 671 dgettext(TEXT_DOMAIN, "name is too long")); 672 break; 673 674 case NAME_ERR_INVALCHAR: 675 zfs_error_aux(hdl, 676 dgettext(TEXT_DOMAIN, "invalid character " 677 "'%c' in pool name"), what); 678 break; 679 680 case NAME_ERR_NOLETTER: 681 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 682 "name must begin with a letter")); 683 break; 684 685 case NAME_ERR_RESERVED: 686 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 687 "name is reserved")); 688 break; 689 690 case NAME_ERR_DISKLIKE: 691 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 692 "pool name is reserved")); 693 break; 694 695 case NAME_ERR_LEADING_SLASH: 696 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 697 "leading slash in name")); 698 break; 699 700 case NAME_ERR_EMPTY_COMPONENT: 701 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 702 "empty component in name")); 703 break; 704 705 case NAME_ERR_TRAILING_SLASH: 706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 707 "trailing slash in name")); 708 break; 709 710 case NAME_ERR_MULTIPLE_AT: 711 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 712 "multiple '@' delimiters in name")); 713 break; 714 715 } 716 } 717 return (B_FALSE); 718 } 719 720 return (B_TRUE); 721 } 722 723 /* 724 * Open a handle to the given pool, even if the pool is currently in the FAULTED 725 * state. 726 */ 727 zpool_handle_t * 728 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 729 { 730 zpool_handle_t *zhp; 731 boolean_t missing; 732 733 /* 734 * Make sure the pool name is valid. 735 */ 736 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 737 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 738 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 739 pool); 740 return (NULL); 741 } 742 743 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 744 return (NULL); 745 746 zhp->zpool_hdl = hdl; 747 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 748 749 if (zpool_refresh_stats(zhp, &missing) != 0) { 750 zpool_close(zhp); 751 return (NULL); 752 } 753 754 if (missing) { 755 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 756 (void) zfs_error_fmt(hdl, EZFS_NOENT, 757 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 758 zpool_close(zhp); 759 return (NULL); 760 } 761 762 return (zhp); 763 } 764 765 /* 766 * Like the above, but silent on error. Used when iterating over pools (because 767 * the configuration cache may be out of date). 768 */ 769 int 770 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 771 { 772 zpool_handle_t *zhp; 773 boolean_t missing; 774 775 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 776 return (-1); 777 778 zhp->zpool_hdl = hdl; 779 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 780 781 if (zpool_refresh_stats(zhp, &missing) != 0) { 782 zpool_close(zhp); 783 return (-1); 784 } 785 786 if (missing) { 787 zpool_close(zhp); 788 *ret = NULL; 789 return (0); 790 } 791 792 *ret = zhp; 793 return (0); 794 } 795 796 /* 797 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 798 * state. 799 */ 800 zpool_handle_t * 801 zpool_open(libzfs_handle_t *hdl, const char *pool) 802 { 803 zpool_handle_t *zhp; 804 805 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 806 return (NULL); 807 808 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 809 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 810 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 811 zpool_close(zhp); 812 return (NULL); 813 } 814 815 return (zhp); 816 } 817 818 /* 819 * Close the handle. Simply frees the memory associated with the handle. 820 */ 821 void 822 zpool_close(zpool_handle_t *zhp) 823 { 824 if (zhp->zpool_config) 825 nvlist_free(zhp->zpool_config); 826 if (zhp->zpool_old_config) 827 nvlist_free(zhp->zpool_old_config); 828 if (zhp->zpool_props) 829 nvlist_free(zhp->zpool_props); 830 free(zhp); 831 } 832 833 /* 834 * Return the name of the pool. 835 */ 836 const char * 837 zpool_get_name(zpool_handle_t *zhp) 838 { 839 return (zhp->zpool_name); 840 } 841 842 843 /* 844 * Return the state of the pool (ACTIVE or UNAVAILABLE) 845 */ 846 int 847 zpool_get_state(zpool_handle_t *zhp) 848 { 849 return (zhp->zpool_state); 850 } 851 852 /* 853 * Create the named pool, using the provided vdev list. It is assumed 854 * that the consumer has already validated the contents of the nvlist, so we 855 * don't have to worry about error semantics. 856 */ 857 int 858 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 859 nvlist_t *props, nvlist_t *fsprops) 860 { 861 zfs_cmd_t zc = { 0 }; 862 nvlist_t *zc_fsprops = NULL; 863 nvlist_t *zc_props = NULL; 864 char msg[1024]; 865 char *altroot; 866 int ret = -1; 867 868 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 869 "cannot create '%s'"), pool); 870 871 if (!zpool_name_valid(hdl, B_FALSE, pool)) 872 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 873 874 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 875 return (-1); 876 877 if (props) { 878 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 879 SPA_VERSION_1, B_TRUE, msg)) == NULL) { 880 goto create_failed; 881 } 882 } 883 884 if (fsprops) { 885 uint64_t zoned; 886 char *zonestr; 887 888 zoned = ((nvlist_lookup_string(fsprops, 889 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 890 strcmp(zonestr, "on") == 0); 891 892 if ((zc_fsprops = zfs_valid_proplist(hdl, 893 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 894 goto create_failed; 895 } 896 if (!zc_props && 897 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 898 goto create_failed; 899 } 900 if (nvlist_add_nvlist(zc_props, 901 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 902 goto create_failed; 903 } 904 } 905 906 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 907 goto create_failed; 908 909 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 910 911 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 912 913 zcmd_free_nvlists(&zc); 914 nvlist_free(zc_props); 915 nvlist_free(zc_fsprops); 916 917 switch (errno) { 918 case EBUSY: 919 /* 920 * This can happen if the user has specified the same 921 * device multiple times. We can't reliably detect this 922 * until we try to add it and see we already have a 923 * label. 924 */ 925 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 926 "one or more vdevs refer to the same device")); 927 return (zfs_error(hdl, EZFS_BADDEV, msg)); 928 929 case EOVERFLOW: 930 /* 931 * This occurs when one of the devices is below 932 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 933 * device was the problem device since there's no 934 * reliable way to determine device size from userland. 935 */ 936 { 937 char buf[64]; 938 939 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 940 941 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 942 "one or more devices is less than the " 943 "minimum size (%s)"), buf); 944 } 945 return (zfs_error(hdl, EZFS_BADDEV, msg)); 946 947 case ENOSPC: 948 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 949 "one or more devices is out of space")); 950 return (zfs_error(hdl, EZFS_BADDEV, msg)); 951 952 case ENOTBLK: 953 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 954 "cache device must be a disk or disk slice")); 955 return (zfs_error(hdl, EZFS_BADDEV, msg)); 956 957 default: 958 return (zpool_standard_error(hdl, errno, msg)); 959 } 960 } 961 962 /* 963 * If this is an alternate root pool, then we automatically set the 964 * mountpoint of the root dataset to be '/'. 965 */ 966 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), 967 &altroot) == 0) { 968 zfs_handle_t *zhp; 969 970 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); 971 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 972 "/") == 0); 973 974 zfs_close(zhp); 975 } 976 977 create_failed: 978 zcmd_free_nvlists(&zc); 979 nvlist_free(zc_props); 980 nvlist_free(zc_fsprops); 981 return (ret); 982 } 983 984 /* 985 * Destroy the given pool. It is up to the caller to ensure that there are no 986 * datasets left in the pool. 987 */ 988 int 989 zpool_destroy(zpool_handle_t *zhp) 990 { 991 zfs_cmd_t zc = { 0 }; 992 zfs_handle_t *zfp = NULL; 993 libzfs_handle_t *hdl = zhp->zpool_hdl; 994 char msg[1024]; 995 996 if (zhp->zpool_state == POOL_STATE_ACTIVE && 997 (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, 998 ZFS_TYPE_FILESYSTEM)) == NULL) 999 return (-1); 1000 1001 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1002 1003 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1004 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1005 "cannot destroy '%s'"), zhp->zpool_name); 1006 1007 if (errno == EROFS) { 1008 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1009 "one or more devices is read only")); 1010 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1011 } else { 1012 (void) zpool_standard_error(hdl, errno, msg); 1013 } 1014 1015 if (zfp) 1016 zfs_close(zfp); 1017 return (-1); 1018 } 1019 1020 if (zfp) { 1021 remove_mountpoint(zfp); 1022 zfs_close(zfp); 1023 } 1024 1025 return (0); 1026 } 1027 1028 /* 1029 * Add the given vdevs to the pool. The caller must have already performed the 1030 * necessary verification to ensure that the vdev specification is well-formed. 1031 */ 1032 int 1033 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1034 { 1035 zfs_cmd_t zc = { 0 }; 1036 int ret; 1037 libzfs_handle_t *hdl = zhp->zpool_hdl; 1038 char msg[1024]; 1039 nvlist_t **spares, **l2cache; 1040 uint_t nspares, nl2cache; 1041 1042 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1043 "cannot add to '%s'"), zhp->zpool_name); 1044 1045 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1046 SPA_VERSION_SPARES && 1047 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1048 &spares, &nspares) == 0) { 1049 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1050 "upgraded to add hot spares")); 1051 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1052 } 1053 1054 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1055 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1056 uint64_t s; 1057 1058 for (s = 0; s < nspares; s++) { 1059 char *path; 1060 1061 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1062 &path) == 0 && pool_uses_efi(spares[s])) { 1063 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1064 "device '%s' contains an EFI label and " 1065 "cannot be used on root pools."), 1066 zpool_vdev_name(hdl, NULL, spares[s], 1067 B_FALSE)); 1068 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1069 } 1070 } 1071 } 1072 1073 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1074 SPA_VERSION_L2CACHE && 1075 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1076 &l2cache, &nl2cache) == 0) { 1077 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1078 "upgraded to add cache devices")); 1079 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1080 } 1081 1082 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1083 return (-1); 1084 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1085 1086 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1087 switch (errno) { 1088 case EBUSY: 1089 /* 1090 * This can happen if the user has specified the same 1091 * device multiple times. We can't reliably detect this 1092 * until we try to add it and see we already have a 1093 * label. 1094 */ 1095 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1096 "one or more vdevs refer to the same device")); 1097 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1098 break; 1099 1100 case EOVERFLOW: 1101 /* 1102 * This occurrs when one of the devices is below 1103 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1104 * device was the problem device since there's no 1105 * reliable way to determine device size from userland. 1106 */ 1107 { 1108 char buf[64]; 1109 1110 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1111 1112 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1113 "device is less than the minimum " 1114 "size (%s)"), buf); 1115 } 1116 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1117 break; 1118 1119 case ENOTSUP: 1120 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1121 "pool must be upgraded to add these vdevs")); 1122 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1123 break; 1124 1125 case EDOM: 1126 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1127 "root pool can not have multiple vdevs" 1128 " or separate logs")); 1129 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1130 break; 1131 1132 case ENOTBLK: 1133 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1134 "cache device must be a disk or disk slice")); 1135 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1136 break; 1137 1138 default: 1139 (void) zpool_standard_error(hdl, errno, msg); 1140 } 1141 1142 ret = -1; 1143 } else { 1144 ret = 0; 1145 } 1146 1147 zcmd_free_nvlists(&zc); 1148 1149 return (ret); 1150 } 1151 1152 /* 1153 * Exports the pool from the system. The caller must ensure that there are no 1154 * mounted datasets in the pool. 1155 */ 1156 int 1157 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce) 1158 { 1159 zfs_cmd_t zc = { 0 }; 1160 char msg[1024]; 1161 1162 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1163 "cannot export '%s'"), zhp->zpool_name); 1164 1165 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1166 zc.zc_cookie = force; 1167 zc.zc_guid = hardforce; 1168 1169 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1170 switch (errno) { 1171 case EXDEV: 1172 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1173 "use '-f' to override the following errors:\n" 1174 "'%s' has an active shared spare which could be" 1175 " used by other pools once '%s' is exported."), 1176 zhp->zpool_name, zhp->zpool_name); 1177 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1178 msg)); 1179 default: 1180 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1181 msg)); 1182 } 1183 } 1184 1185 return (0); 1186 } 1187 1188 int 1189 zpool_export(zpool_handle_t *zhp, boolean_t force) 1190 { 1191 return (zpool_export_common(zhp, force, B_FALSE)); 1192 } 1193 1194 int 1195 zpool_export_force(zpool_handle_t *zhp) 1196 { 1197 return (zpool_export_common(zhp, B_TRUE, B_TRUE)); 1198 } 1199 1200 /* 1201 * zpool_import() is a contracted interface. Should be kept the same 1202 * if possible. 1203 * 1204 * Applications should use zpool_import_props() to import a pool with 1205 * new properties value to be set. 1206 */ 1207 int 1208 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1209 char *altroot) 1210 { 1211 nvlist_t *props = NULL; 1212 int ret; 1213 1214 if (altroot != NULL) { 1215 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1216 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1217 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1218 newname)); 1219 } 1220 1221 if (nvlist_add_string(props, 1222 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1223 nvlist_add_string(props, 1224 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1225 nvlist_free(props); 1226 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1227 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1228 newname)); 1229 } 1230 } 1231 1232 ret = zpool_import_props(hdl, config, newname, props, B_FALSE); 1233 if (props) 1234 nvlist_free(props); 1235 return (ret); 1236 } 1237 1238 /* 1239 * Import the given pool using the known configuration and a list of 1240 * properties to be set. The configuration should have come from 1241 * zpool_find_import(). The 'newname' parameters control whether the pool 1242 * is imported with a different name. 1243 */ 1244 int 1245 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1246 nvlist_t *props, boolean_t importfaulted) 1247 { 1248 zfs_cmd_t zc = { 0 }; 1249 char *thename; 1250 char *origname; 1251 int ret; 1252 char errbuf[1024]; 1253 1254 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1255 &origname) == 0); 1256 1257 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1258 "cannot import pool '%s'"), origname); 1259 1260 if (newname != NULL) { 1261 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1262 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1263 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1264 newname)); 1265 thename = (char *)newname; 1266 } else { 1267 thename = origname; 1268 } 1269 1270 if (props) { 1271 uint64_t version; 1272 1273 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1274 &version) == 0); 1275 1276 if ((props = zpool_valid_proplist(hdl, origname, 1277 props, version, B_TRUE, errbuf)) == NULL) { 1278 return (-1); 1279 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1280 nvlist_free(props); 1281 return (-1); 1282 } 1283 } 1284 1285 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1286 1287 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1288 &zc.zc_guid) == 0); 1289 1290 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1291 nvlist_free(props); 1292 return (-1); 1293 } 1294 1295 zc.zc_cookie = (uint64_t)importfaulted; 1296 ret = 0; 1297 if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) { 1298 char desc[1024]; 1299 if (newname == NULL) 1300 (void) snprintf(desc, sizeof (desc), 1301 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1302 thename); 1303 else 1304 (void) snprintf(desc, sizeof (desc), 1305 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1306 origname, thename); 1307 1308 switch (errno) { 1309 case ENOTSUP: 1310 /* 1311 * Unsupported version. 1312 */ 1313 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1314 break; 1315 1316 case EINVAL: 1317 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1318 break; 1319 1320 default: 1321 (void) zpool_standard_error(hdl, errno, desc); 1322 } 1323 1324 ret = -1; 1325 } else { 1326 zpool_handle_t *zhp; 1327 1328 /* 1329 * This should never fail, but play it safe anyway. 1330 */ 1331 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1332 ret = -1; 1333 else if (zhp != NULL) 1334 zpool_close(zhp); 1335 } 1336 1337 zcmd_free_nvlists(&zc); 1338 nvlist_free(props); 1339 1340 return (ret); 1341 } 1342 1343 /* 1344 * Scrub the pool. 1345 */ 1346 int 1347 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 1348 { 1349 zfs_cmd_t zc = { 0 }; 1350 char msg[1024]; 1351 libzfs_handle_t *hdl = zhp->zpool_hdl; 1352 1353 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1354 zc.zc_cookie = type; 1355 1356 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0) 1357 return (0); 1358 1359 (void) snprintf(msg, sizeof (msg), 1360 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1361 1362 if (errno == EBUSY) 1363 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1364 else 1365 return (zpool_standard_error(hdl, errno, msg)); 1366 } 1367 1368 /* 1369 * Find a vdev that matches the search criteria specified. We use the 1370 * the nvpair name to determine how we should look for the device. 1371 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1372 * spare; but FALSE if its an INUSE spare. 1373 */ 1374 static nvlist_t * 1375 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1376 boolean_t *l2cache, boolean_t *log) 1377 { 1378 uint_t c, children; 1379 nvlist_t **child; 1380 nvlist_t *ret; 1381 uint64_t is_log; 1382 char *srchkey; 1383 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1384 1385 /* Nothing to look for */ 1386 if (search == NULL || pair == NULL) 1387 return (NULL); 1388 1389 /* Obtain the key we will use to search */ 1390 srchkey = nvpair_name(pair); 1391 1392 switch (nvpair_type(pair)) { 1393 case DATA_TYPE_UINT64: { 1394 uint64_t srchval, theguid, present; 1395 1396 verify(nvpair_value_uint64(pair, &srchval) == 0); 1397 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1398 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1399 &present) == 0) { 1400 /* 1401 * If the device has never been present since 1402 * import, the only reliable way to match the 1403 * vdev is by GUID. 1404 */ 1405 verify(nvlist_lookup_uint64(nv, 1406 ZPOOL_CONFIG_GUID, &theguid) == 0); 1407 if (theguid == srchval) 1408 return (nv); 1409 } 1410 } 1411 break; 1412 } 1413 1414 case DATA_TYPE_STRING: { 1415 char *srchval, *val; 1416 1417 verify(nvpair_value_string(pair, &srchval) == 0); 1418 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1419 break; 1420 1421 /* 1422 * Search for the requested value. We special case the search 1423 * for ZPOOL_CONFIG_PATH when it's a wholedisk and when 1424 * Looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1425 * Otherwise, all other searches are simple string compares. 1426 */ 1427 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && val) { 1428 uint64_t wholedisk = 0; 1429 1430 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1431 &wholedisk); 1432 if (wholedisk) { 1433 /* 1434 * For whole disks, the internal path has 's0', 1435 * but the path passed in by the user doesn't. 1436 */ 1437 if (strlen(srchval) == strlen(val) - 2 && 1438 strncmp(srchval, val, strlen(srchval)) == 0) 1439 return (nv); 1440 break; 1441 } 1442 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 1443 char *type, *idx, *end, *p; 1444 uint64_t id, vdev_id; 1445 1446 /* 1447 * Determine our vdev type, keeping in mind 1448 * that the srchval is composed of a type and 1449 * vdev id pair (i.e. mirror-4). 1450 */ 1451 if ((type = strdup(srchval)) == NULL) 1452 return (NULL); 1453 1454 if ((p = strrchr(type, '-')) == NULL) { 1455 free(type); 1456 break; 1457 } 1458 idx = p + 1; 1459 *p = '\0'; 1460 1461 /* 1462 * If the types don't match then keep looking. 1463 */ 1464 if (strncmp(val, type, strlen(val)) != 0) { 1465 free(type); 1466 break; 1467 } 1468 1469 verify(strncmp(type, VDEV_TYPE_RAIDZ, 1470 strlen(VDEV_TYPE_RAIDZ)) == 0 || 1471 strncmp(type, VDEV_TYPE_MIRROR, 1472 strlen(VDEV_TYPE_MIRROR)) == 0); 1473 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 1474 &id) == 0); 1475 1476 errno = 0; 1477 vdev_id = strtoull(idx, &end, 10); 1478 1479 free(type); 1480 if (errno != 0) 1481 return (NULL); 1482 1483 /* 1484 * Now verify that we have the correct vdev id. 1485 */ 1486 if (vdev_id == id) 1487 return (nv); 1488 } 1489 1490 /* 1491 * Common case 1492 */ 1493 if (strcmp(srchval, val) == 0) 1494 return (nv); 1495 break; 1496 } 1497 1498 default: 1499 break; 1500 } 1501 1502 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1503 &child, &children) != 0) 1504 return (NULL); 1505 1506 for (c = 0; c < children; c++) { 1507 if ((ret = vdev_to_nvlist_iter(child[c], search, 1508 avail_spare, l2cache, NULL)) != NULL) { 1509 /* 1510 * The 'is_log' value is only set for the toplevel 1511 * vdev, not the leaf vdevs. So we always lookup the 1512 * log device from the root of the vdev tree (where 1513 * 'log' is non-NULL). 1514 */ 1515 if (log != NULL && 1516 nvlist_lookup_uint64(child[c], 1517 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 1518 is_log) { 1519 *log = B_TRUE; 1520 } 1521 return (ret); 1522 } 1523 } 1524 1525 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1526 &child, &children) == 0) { 1527 for (c = 0; c < children; c++) { 1528 if ((ret = vdev_to_nvlist_iter(child[c], search, 1529 avail_spare, l2cache, NULL)) != NULL) { 1530 *avail_spare = B_TRUE; 1531 return (ret); 1532 } 1533 } 1534 } 1535 1536 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1537 &child, &children) == 0) { 1538 for (c = 0; c < children; c++) { 1539 if ((ret = vdev_to_nvlist_iter(child[c], search, 1540 avail_spare, l2cache, NULL)) != NULL) { 1541 *l2cache = B_TRUE; 1542 return (ret); 1543 } 1544 } 1545 } 1546 1547 return (NULL); 1548 } 1549 1550 /* 1551 * Given a physical path (minus the "/devices" prefix), find the 1552 * associated vdev. 1553 */ 1554 nvlist_t * 1555 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 1556 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 1557 { 1558 nvlist_t *search, *nvroot, *ret; 1559 1560 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1561 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 1562 1563 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1564 &nvroot) == 0); 1565 1566 *avail_spare = B_FALSE; 1567 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1568 nvlist_free(search); 1569 1570 return (ret); 1571 } 1572 1573 /* 1574 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 1575 */ 1576 boolean_t 1577 zpool_vdev_is_interior(const char *name) 1578 { 1579 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 1580 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 1581 return (B_TRUE); 1582 return (B_FALSE); 1583 } 1584 1585 nvlist_t * 1586 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 1587 boolean_t *l2cache, boolean_t *log) 1588 { 1589 char buf[MAXPATHLEN]; 1590 char *end; 1591 nvlist_t *nvroot, *search, *ret; 1592 uint64_t guid; 1593 1594 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1595 1596 guid = strtoull(path, &end, 10); 1597 if (guid != 0 && *end == '\0') { 1598 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 1599 } else if (zpool_vdev_is_interior(path)) { 1600 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 1601 } else if (path[0] != '/') { 1602 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 1603 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 1604 } else { 1605 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 1606 } 1607 1608 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1609 &nvroot) == 0); 1610 1611 *avail_spare = B_FALSE; 1612 *l2cache = B_FALSE; 1613 if (log != NULL) 1614 *log = B_FALSE; 1615 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1616 nvlist_free(search); 1617 1618 return (ret); 1619 } 1620 1621 static int 1622 vdev_online(nvlist_t *nv) 1623 { 1624 uint64_t ival; 1625 1626 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 1627 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 1628 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 1629 return (0); 1630 1631 return (1); 1632 } 1633 1634 /* 1635 * Helper function for zpool_get_physpaths(). 1636 */ 1637 static int 1638 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 1639 size_t *bytes_written) 1640 { 1641 size_t bytes_left, pos, rsz; 1642 char *tmppath; 1643 const char *format; 1644 1645 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 1646 &tmppath) != 0) 1647 return (EZFS_NODEVICE); 1648 1649 pos = *bytes_written; 1650 bytes_left = physpath_size - pos; 1651 format = (pos == 0) ? "%s" : " %s"; 1652 1653 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 1654 *bytes_written += rsz; 1655 1656 if (rsz >= bytes_left) { 1657 /* if physpath was not copied properly, clear it */ 1658 if (bytes_left != 0) { 1659 physpath[pos] = 0; 1660 } 1661 return (EZFS_NOSPC); 1662 } 1663 return (0); 1664 } 1665 1666 static int 1667 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 1668 size_t *rsz, boolean_t is_spare) 1669 { 1670 char *type; 1671 int ret; 1672 1673 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 1674 return (EZFS_INVALCONFIG); 1675 1676 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 1677 /* 1678 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 1679 * For a spare vdev, we only want to boot from the active 1680 * spare device. 1681 */ 1682 if (is_spare) { 1683 uint64_t spare = 0; 1684 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1685 &spare); 1686 if (!spare) 1687 return (EZFS_INVALCONFIG); 1688 } 1689 1690 if (vdev_online(nv)) { 1691 if ((ret = vdev_get_one_physpath(nv, physpath, 1692 phypath_size, rsz)) != 0) 1693 return (ret); 1694 } 1695 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 1696 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 1697 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 1698 nvlist_t **child; 1699 uint_t count; 1700 int i, ret; 1701 1702 if (nvlist_lookup_nvlist_array(nv, 1703 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 1704 return (EZFS_INVALCONFIG); 1705 1706 for (i = 0; i < count; i++) { 1707 ret = vdev_get_physpaths(child[i], physpath, 1708 phypath_size, rsz, is_spare); 1709 if (ret == EZFS_NOSPC) 1710 return (ret); 1711 } 1712 } 1713 1714 return (EZFS_POOL_INVALARG); 1715 } 1716 1717 /* 1718 * Get phys_path for a root pool config. 1719 * Return 0 on success; non-zero on failure. 1720 */ 1721 static int 1722 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 1723 { 1724 size_t rsz; 1725 nvlist_t *vdev_root; 1726 nvlist_t **child; 1727 uint_t count; 1728 char *type; 1729 1730 rsz = 0; 1731 1732 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1733 &vdev_root) != 0) 1734 return (EZFS_INVALCONFIG); 1735 1736 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 1737 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 1738 &child, &count) != 0) 1739 return (EZFS_INVALCONFIG); 1740 1741 /* 1742 * root pool can not have EFI labeled disks and can only have 1743 * a single top-level vdev. 1744 */ 1745 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 1746 pool_uses_efi(vdev_root)) 1747 return (EZFS_POOL_INVALARG); 1748 1749 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 1750 B_FALSE); 1751 1752 /* No online devices */ 1753 if (rsz == 0) 1754 return (EZFS_NODEVICE); 1755 1756 return (0); 1757 } 1758 1759 /* 1760 * Get phys_path for a root pool 1761 * Return 0 on success; non-zero on failure. 1762 */ 1763 int 1764 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 1765 { 1766 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 1767 phypath_size)); 1768 } 1769 1770 /* 1771 * Returns TRUE if the given guid corresponds to the given type. 1772 * This is used to check for hot spares (INUSE or not), and level 2 cache 1773 * devices. 1774 */ 1775 static boolean_t 1776 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type) 1777 { 1778 uint64_t target_guid; 1779 nvlist_t *nvroot; 1780 nvlist_t **list; 1781 uint_t count; 1782 int i; 1783 1784 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1785 &nvroot) == 0); 1786 if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) { 1787 for (i = 0; i < count; i++) { 1788 verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID, 1789 &target_guid) == 0); 1790 if (guid == target_guid) 1791 return (B_TRUE); 1792 } 1793 } 1794 1795 return (B_FALSE); 1796 } 1797 1798 /* 1799 * If the device has being dynamically expanded then we need to relabel 1800 * the disk to use the new unallocated space. 1801 */ 1802 static int 1803 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 1804 { 1805 char path[MAXPATHLEN]; 1806 char errbuf[1024]; 1807 int fd, error; 1808 int (*_efi_use_whole_disk)(int); 1809 1810 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 1811 "efi_use_whole_disk")) == NULL) 1812 return (-1); 1813 1814 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 1815 1816 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 1817 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 1818 "relabel '%s': unable to open device"), name); 1819 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 1820 } 1821 1822 /* 1823 * It's possible that we might encounter an error if the device 1824 * does not have any unallocated space left. If so, we simply 1825 * ignore that error and continue on. 1826 */ 1827 error = _efi_use_whole_disk(fd); 1828 (void) close(fd); 1829 if (error && error != VT_ENOSPC) { 1830 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 1831 "relabel '%s': unable to read disk capacity"), name); 1832 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 1833 } 1834 return (0); 1835 } 1836 1837 /* 1838 * Bring the specified vdev online. The 'flags' parameter is a set of the 1839 * ZFS_ONLINE_* flags. 1840 */ 1841 int 1842 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 1843 vdev_state_t *newstate) 1844 { 1845 zfs_cmd_t zc = { 0 }; 1846 char msg[1024]; 1847 nvlist_t *tgt; 1848 boolean_t avail_spare, l2cache, islog; 1849 libzfs_handle_t *hdl = zhp->zpool_hdl; 1850 1851 if (flags & ZFS_ONLINE_EXPAND) { 1852 (void) snprintf(msg, sizeof (msg), 1853 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 1854 } else { 1855 (void) snprintf(msg, sizeof (msg), 1856 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 1857 } 1858 1859 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1860 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1861 &islog)) == NULL) 1862 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1863 1864 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1865 1866 if (avail_spare || 1867 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) 1868 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1869 1870 if (flags & ZFS_ONLINE_EXPAND || 1871 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 1872 char *pathname = NULL; 1873 uint64_t wholedisk = 0; 1874 1875 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 1876 &wholedisk); 1877 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 1878 &pathname) == 0); 1879 1880 /* 1881 * XXX - L2ARC 1.0 devices can't support expansion. 1882 */ 1883 if (l2cache) { 1884 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1885 "cannot expand cache devices")); 1886 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 1887 } 1888 1889 if (wholedisk) { 1890 pathname += strlen(DISK_ROOT) + 1; 1891 (void) zpool_relabel_disk(zhp->zpool_hdl, pathname); 1892 } 1893 } 1894 1895 zc.zc_cookie = VDEV_STATE_ONLINE; 1896 zc.zc_obj = flags; 1897 1898 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) 1899 return (zpool_standard_error(hdl, errno, msg)); 1900 1901 *newstate = zc.zc_cookie; 1902 return (0); 1903 } 1904 1905 /* 1906 * Take the specified vdev offline 1907 */ 1908 int 1909 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 1910 { 1911 zfs_cmd_t zc = { 0 }; 1912 char msg[1024]; 1913 nvlist_t *tgt; 1914 boolean_t avail_spare, l2cache; 1915 libzfs_handle_t *hdl = zhp->zpool_hdl; 1916 1917 (void) snprintf(msg, sizeof (msg), 1918 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 1919 1920 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1921 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1922 NULL)) == NULL) 1923 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1924 1925 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1926 1927 if (avail_spare || 1928 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) 1929 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1930 1931 zc.zc_cookie = VDEV_STATE_OFFLINE; 1932 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 1933 1934 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1935 return (0); 1936 1937 switch (errno) { 1938 case EBUSY: 1939 1940 /* 1941 * There are no other replicas of this device. 1942 */ 1943 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 1944 1945 case EEXIST: 1946 /* 1947 * The log device has unplayed logs 1948 */ 1949 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 1950 1951 default: 1952 return (zpool_standard_error(hdl, errno, msg)); 1953 } 1954 } 1955 1956 /* 1957 * Mark the given vdev faulted. 1958 */ 1959 int 1960 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid) 1961 { 1962 zfs_cmd_t zc = { 0 }; 1963 char msg[1024]; 1964 libzfs_handle_t *hdl = zhp->zpool_hdl; 1965 1966 (void) snprintf(msg, sizeof (msg), 1967 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 1968 1969 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1970 zc.zc_guid = guid; 1971 zc.zc_cookie = VDEV_STATE_FAULTED; 1972 1973 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1974 return (0); 1975 1976 switch (errno) { 1977 case EBUSY: 1978 1979 /* 1980 * There are no other replicas of this device. 1981 */ 1982 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 1983 1984 default: 1985 return (zpool_standard_error(hdl, errno, msg)); 1986 } 1987 1988 } 1989 1990 /* 1991 * Mark the given vdev degraded. 1992 */ 1993 int 1994 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid) 1995 { 1996 zfs_cmd_t zc = { 0 }; 1997 char msg[1024]; 1998 libzfs_handle_t *hdl = zhp->zpool_hdl; 1999 2000 (void) snprintf(msg, sizeof (msg), 2001 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2002 2003 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2004 zc.zc_guid = guid; 2005 zc.zc_cookie = VDEV_STATE_DEGRADED; 2006 2007 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2008 return (0); 2009 2010 return (zpool_standard_error(hdl, errno, msg)); 2011 } 2012 2013 /* 2014 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2015 * a hot spare. 2016 */ 2017 static boolean_t 2018 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2019 { 2020 nvlist_t **child; 2021 uint_t c, children; 2022 char *type; 2023 2024 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2025 &children) == 0) { 2026 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2027 &type) == 0); 2028 2029 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2030 children == 2 && child[which] == tgt) 2031 return (B_TRUE); 2032 2033 for (c = 0; c < children; c++) 2034 if (is_replacing_spare(child[c], tgt, which)) 2035 return (B_TRUE); 2036 } 2037 2038 return (B_FALSE); 2039 } 2040 2041 /* 2042 * Attach new_disk (fully described by nvroot) to old_disk. 2043 * If 'replacing' is specified, the new disk will replace the old one. 2044 */ 2045 int 2046 zpool_vdev_attach(zpool_handle_t *zhp, 2047 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2048 { 2049 zfs_cmd_t zc = { 0 }; 2050 char msg[1024]; 2051 int ret; 2052 nvlist_t *tgt; 2053 boolean_t avail_spare, l2cache, islog; 2054 uint64_t val; 2055 char *path, *newname; 2056 nvlist_t **child; 2057 uint_t children; 2058 nvlist_t *config_root; 2059 libzfs_handle_t *hdl = zhp->zpool_hdl; 2060 boolean_t rootpool = pool_is_bootable(zhp); 2061 2062 if (replacing) 2063 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2064 "cannot replace %s with %s"), old_disk, new_disk); 2065 else 2066 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2067 "cannot attach %s to %s"), new_disk, old_disk); 2068 2069 /* 2070 * If this is a root pool, make sure that we're not attaching an 2071 * EFI labeled device. 2072 */ 2073 if (rootpool && pool_uses_efi(nvroot)) { 2074 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2075 "EFI labeled devices are not supported on root pools.")); 2076 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2077 } 2078 2079 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2080 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2081 &islog)) == 0) 2082 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2083 2084 if (avail_spare) 2085 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2086 2087 if (l2cache) 2088 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2089 2090 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2091 zc.zc_cookie = replacing; 2092 2093 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2094 &child, &children) != 0 || children != 1) { 2095 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2096 "new device must be a single disk")); 2097 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2098 } 2099 2100 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2101 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2102 2103 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2104 return (-1); 2105 2106 /* 2107 * If the target is a hot spare that has been swapped in, we can only 2108 * replace it with another hot spare. 2109 */ 2110 if (replacing && 2111 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2112 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2113 NULL) == NULL || !avail_spare) && 2114 is_replacing_spare(config_root, tgt, 1)) { 2115 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2116 "can only be replaced by another hot spare")); 2117 free(newname); 2118 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2119 } 2120 2121 /* 2122 * If we are attempting to replace a spare, it canot be applied to an 2123 * already spared device. 2124 */ 2125 if (replacing && 2126 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && 2127 zpool_find_vdev(zhp, newname, &avail_spare, 2128 &l2cache, NULL) != NULL && avail_spare && 2129 is_replacing_spare(config_root, tgt, 0)) { 2130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2131 "device has already been replaced with a spare")); 2132 free(newname); 2133 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2134 } 2135 2136 free(newname); 2137 2138 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2139 return (-1); 2140 2141 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2142 2143 zcmd_free_nvlists(&zc); 2144 2145 if (ret == 0) { 2146 if (rootpool) { 2147 /* 2148 * XXX - This should be removed once we can 2149 * automatically install the bootblocks on the 2150 * newly attached disk. 2151 */ 2152 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please " 2153 "be sure to invoke %s to make '%s' bootable.\n"), 2154 BOOTCMD, new_disk); 2155 2156 /* 2157 * XXX need a better way to prevent user from 2158 * booting up a half-baked vdev. 2159 */ 2160 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2161 "sure to wait until resilver is done " 2162 "before rebooting.\n")); 2163 } 2164 return (0); 2165 } 2166 2167 switch (errno) { 2168 case ENOTSUP: 2169 /* 2170 * Can't attach to or replace this type of vdev. 2171 */ 2172 if (replacing) { 2173 if (islog) 2174 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2175 "cannot replace a log with a spare")); 2176 else 2177 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2178 "cannot replace a replacing device")); 2179 } else { 2180 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2181 "can only attach to mirrors and top-level " 2182 "disks")); 2183 } 2184 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2185 break; 2186 2187 case EINVAL: 2188 /* 2189 * The new device must be a single disk. 2190 */ 2191 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2192 "new device must be a single disk")); 2193 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2194 break; 2195 2196 case EBUSY: 2197 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2198 new_disk); 2199 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2200 break; 2201 2202 case EOVERFLOW: 2203 /* 2204 * The new device is too small. 2205 */ 2206 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2207 "device is too small")); 2208 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2209 break; 2210 2211 case EDOM: 2212 /* 2213 * The new device has a different alignment requirement. 2214 */ 2215 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2216 "devices have different sector alignment")); 2217 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2218 break; 2219 2220 case ENAMETOOLONG: 2221 /* 2222 * The resulting top-level vdev spec won't fit in the label. 2223 */ 2224 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2225 break; 2226 2227 default: 2228 (void) zpool_standard_error(hdl, errno, msg); 2229 } 2230 2231 return (-1); 2232 } 2233 2234 /* 2235 * Detach the specified device. 2236 */ 2237 int 2238 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2239 { 2240 zfs_cmd_t zc = { 0 }; 2241 char msg[1024]; 2242 nvlist_t *tgt; 2243 boolean_t avail_spare, l2cache; 2244 libzfs_handle_t *hdl = zhp->zpool_hdl; 2245 2246 (void) snprintf(msg, sizeof (msg), 2247 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2248 2249 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2250 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2251 NULL)) == 0) 2252 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2253 2254 if (avail_spare) 2255 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2256 2257 if (l2cache) 2258 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2259 2260 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2261 2262 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2263 return (0); 2264 2265 switch (errno) { 2266 2267 case ENOTSUP: 2268 /* 2269 * Can't detach from this type of vdev. 2270 */ 2271 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2272 "applicable to mirror and replacing vdevs")); 2273 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg); 2274 break; 2275 2276 case EBUSY: 2277 /* 2278 * There are no other replicas of this device. 2279 */ 2280 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2281 break; 2282 2283 default: 2284 (void) zpool_standard_error(hdl, errno, msg); 2285 } 2286 2287 return (-1); 2288 } 2289 2290 /* 2291 * Remove the given device. Currently, this is supported only for hot spares 2292 * and level 2 cache devices. 2293 */ 2294 int 2295 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 2296 { 2297 zfs_cmd_t zc = { 0 }; 2298 char msg[1024]; 2299 nvlist_t *tgt; 2300 boolean_t avail_spare, l2cache, islog; 2301 libzfs_handle_t *hdl = zhp->zpool_hdl; 2302 uint64_t version; 2303 2304 (void) snprintf(msg, sizeof (msg), 2305 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 2306 2307 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2308 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2309 &islog)) == 0) 2310 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2311 /* 2312 * XXX - this should just go away. 2313 */ 2314 if (!avail_spare && !l2cache && !islog) { 2315 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2316 "only inactive hot spares, cache, top-level, " 2317 "or log devices can be removed")); 2318 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2319 } 2320 2321 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 2322 if (islog && version < SPA_VERSION_HOLES) { 2323 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2324 "pool must be upgrade to support log removal")); 2325 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 2326 } 2327 2328 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2329 2330 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 2331 return (0); 2332 2333 return (zpool_standard_error(hdl, errno, msg)); 2334 } 2335 2336 /* 2337 * Clear the errors for the pool, or the particular device if specified. 2338 */ 2339 int 2340 zpool_clear(zpool_handle_t *zhp, const char *path) 2341 { 2342 zfs_cmd_t zc = { 0 }; 2343 char msg[1024]; 2344 nvlist_t *tgt; 2345 boolean_t avail_spare, l2cache; 2346 libzfs_handle_t *hdl = zhp->zpool_hdl; 2347 2348 if (path) 2349 (void) snprintf(msg, sizeof (msg), 2350 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2351 path); 2352 else 2353 (void) snprintf(msg, sizeof (msg), 2354 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2355 zhp->zpool_name); 2356 2357 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2358 if (path) { 2359 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 2360 &l2cache, NULL)) == 0) 2361 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2362 2363 /* 2364 * Don't allow error clearing for hot spares. Do allow 2365 * error clearing for l2cache devices. 2366 */ 2367 if (avail_spare) 2368 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2369 2370 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 2371 &zc.zc_guid) == 0); 2372 } 2373 2374 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) 2375 return (0); 2376 2377 return (zpool_standard_error(hdl, errno, msg)); 2378 } 2379 2380 /* 2381 * Similar to zpool_clear(), but takes a GUID (used by fmd). 2382 */ 2383 int 2384 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 2385 { 2386 zfs_cmd_t zc = { 0 }; 2387 char msg[1024]; 2388 libzfs_handle_t *hdl = zhp->zpool_hdl; 2389 2390 (void) snprintf(msg, sizeof (msg), 2391 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 2392 guid); 2393 2394 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2395 zc.zc_guid = guid; 2396 2397 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 2398 return (0); 2399 2400 return (zpool_standard_error(hdl, errno, msg)); 2401 } 2402 2403 /* 2404 * Convert from a devid string to a path. 2405 */ 2406 static char * 2407 devid_to_path(char *devid_str) 2408 { 2409 ddi_devid_t devid; 2410 char *minor; 2411 char *path; 2412 devid_nmlist_t *list = NULL; 2413 int ret; 2414 2415 if (devid_str_decode(devid_str, &devid, &minor) != 0) 2416 return (NULL); 2417 2418 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 2419 2420 devid_str_free(minor); 2421 devid_free(devid); 2422 2423 if (ret != 0) 2424 return (NULL); 2425 2426 if ((path = strdup(list[0].devname)) == NULL) 2427 return (NULL); 2428 2429 devid_free_nmlist(list); 2430 2431 return (path); 2432 } 2433 2434 /* 2435 * Convert from a path to a devid string. 2436 */ 2437 static char * 2438 path_to_devid(const char *path) 2439 { 2440 int fd; 2441 ddi_devid_t devid; 2442 char *minor, *ret; 2443 2444 if ((fd = open(path, O_RDONLY)) < 0) 2445 return (NULL); 2446 2447 minor = NULL; 2448 ret = NULL; 2449 if (devid_get(fd, &devid) == 0) { 2450 if (devid_get_minor_name(fd, &minor) == 0) 2451 ret = devid_str_encode(devid, minor); 2452 if (minor != NULL) 2453 devid_str_free(minor); 2454 devid_free(devid); 2455 } 2456 (void) close(fd); 2457 2458 return (ret); 2459 } 2460 2461 /* 2462 * Issue the necessary ioctl() to update the stored path value for the vdev. We 2463 * ignore any failure here, since a common case is for an unprivileged user to 2464 * type 'zpool status', and we'll display the correct information anyway. 2465 */ 2466 static void 2467 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 2468 { 2469 zfs_cmd_t zc = { 0 }; 2470 2471 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2472 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 2473 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2474 &zc.zc_guid) == 0); 2475 2476 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 2477 } 2478 2479 /* 2480 * Given a vdev, return the name to display in iostat. If the vdev has a path, 2481 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 2482 * We also check if this is a whole disk, in which case we strip off the 2483 * trailing 's0' slice name. 2484 * 2485 * This routine is also responsible for identifying when disks have been 2486 * reconfigured in a new location. The kernel will have opened the device by 2487 * devid, but the path will still refer to the old location. To catch this, we 2488 * first do a path -> devid translation (which is fast for the common case). If 2489 * the devid matches, we're done. If not, we do a reverse devid -> path 2490 * translation and issue the appropriate ioctl() to update the path of the vdev. 2491 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 2492 * of these checks. 2493 */ 2494 char * 2495 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 2496 boolean_t verbose) 2497 { 2498 char *path, *devid; 2499 uint64_t value; 2500 char buf[64]; 2501 vdev_stat_t *vs; 2502 uint_t vsc; 2503 2504 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 2505 &value) == 0) { 2506 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2507 &value) == 0); 2508 (void) snprintf(buf, sizeof (buf), "%llu", 2509 (u_longlong_t)value); 2510 path = buf; 2511 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 2512 2513 /* 2514 * If the device is dead (faulted, offline, etc) then don't 2515 * bother opening it. Otherwise we may be forcing the user to 2516 * open a misbehaving device, which can have undesirable 2517 * effects. 2518 */ 2519 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, 2520 (uint64_t **)&vs, &vsc) != 0 || 2521 vs->vs_state >= VDEV_STATE_DEGRADED) && 2522 zhp != NULL && 2523 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 2524 /* 2525 * Determine if the current path is correct. 2526 */ 2527 char *newdevid = path_to_devid(path); 2528 2529 if (newdevid == NULL || 2530 strcmp(devid, newdevid) != 0) { 2531 char *newpath; 2532 2533 if ((newpath = devid_to_path(devid)) != NULL) { 2534 /* 2535 * Update the path appropriately. 2536 */ 2537 set_path(zhp, nv, newpath); 2538 if (nvlist_add_string(nv, 2539 ZPOOL_CONFIG_PATH, newpath) == 0) 2540 verify(nvlist_lookup_string(nv, 2541 ZPOOL_CONFIG_PATH, 2542 &path) == 0); 2543 free(newpath); 2544 } 2545 } 2546 2547 if (newdevid) 2548 devid_str_free(newdevid); 2549 } 2550 2551 if (strncmp(path, "/dev/dsk/", 9) == 0) 2552 path += 9; 2553 2554 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2555 &value) == 0 && value) { 2556 char *tmp = zfs_strdup(hdl, path); 2557 if (tmp == NULL) 2558 return (NULL); 2559 tmp[strlen(path) - 2] = '\0'; 2560 return (tmp); 2561 } 2562 } else { 2563 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 2564 2565 /* 2566 * If it's a raidz device, we need to stick in the parity level. 2567 */ 2568 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 2569 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 2570 &value) == 0); 2571 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 2572 (u_longlong_t)value); 2573 path = buf; 2574 } 2575 2576 /* 2577 * We identify each top-level vdev by using a <type-id> 2578 * naming convention. 2579 */ 2580 if (verbose) { 2581 uint64_t id; 2582 2583 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2584 &id) == 0); 2585 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 2586 (u_longlong_t)id); 2587 path = buf; 2588 } 2589 } 2590 2591 return (zfs_strdup(hdl, path)); 2592 } 2593 2594 static int 2595 zbookmark_compare(const void *a, const void *b) 2596 { 2597 return (memcmp(a, b, sizeof (zbookmark_t))); 2598 } 2599 2600 /* 2601 * Retrieve the persistent error log, uniquify the members, and return to the 2602 * caller. 2603 */ 2604 int 2605 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 2606 { 2607 zfs_cmd_t zc = { 0 }; 2608 uint64_t count; 2609 zbookmark_t *zb = NULL; 2610 int i; 2611 2612 /* 2613 * Retrieve the raw error list from the kernel. If the number of errors 2614 * has increased, allocate more space and continue until we get the 2615 * entire list. 2616 */ 2617 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 2618 &count) == 0); 2619 if (count == 0) 2620 return (0); 2621 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 2622 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 2623 return (-1); 2624 zc.zc_nvlist_dst_size = count; 2625 (void) strcpy(zc.zc_name, zhp->zpool_name); 2626 for (;;) { 2627 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 2628 &zc) != 0) { 2629 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2630 if (errno == ENOMEM) { 2631 count = zc.zc_nvlist_dst_size; 2632 if ((zc.zc_nvlist_dst = (uintptr_t) 2633 zfs_alloc(zhp->zpool_hdl, count * 2634 sizeof (zbookmark_t))) == (uintptr_t)NULL) 2635 return (-1); 2636 } else { 2637 return (-1); 2638 } 2639 } else { 2640 break; 2641 } 2642 } 2643 2644 /* 2645 * Sort the resulting bookmarks. This is a little confusing due to the 2646 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 2647 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 2648 * _not_ copied as part of the process. So we point the start of our 2649 * array appropriate and decrement the total number of elements. 2650 */ 2651 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 2652 zc.zc_nvlist_dst_size; 2653 count -= zc.zc_nvlist_dst_size; 2654 2655 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 2656 2657 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 2658 2659 /* 2660 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 2661 */ 2662 for (i = 0; i < count; i++) { 2663 nvlist_t *nv; 2664 2665 /* ignoring zb_blkid and zb_level for now */ 2666 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 2667 zb[i-1].zb_object == zb[i].zb_object) 2668 continue; 2669 2670 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 2671 goto nomem; 2672 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 2673 zb[i].zb_objset) != 0) { 2674 nvlist_free(nv); 2675 goto nomem; 2676 } 2677 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 2678 zb[i].zb_object) != 0) { 2679 nvlist_free(nv); 2680 goto nomem; 2681 } 2682 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 2683 nvlist_free(nv); 2684 goto nomem; 2685 } 2686 nvlist_free(nv); 2687 } 2688 2689 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2690 return (0); 2691 2692 nomem: 2693 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2694 return (no_memory(zhp->zpool_hdl)); 2695 } 2696 2697 /* 2698 * Upgrade a ZFS pool to the latest on-disk version. 2699 */ 2700 int 2701 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 2702 { 2703 zfs_cmd_t zc = { 0 }; 2704 libzfs_handle_t *hdl = zhp->zpool_hdl; 2705 2706 (void) strcpy(zc.zc_name, zhp->zpool_name); 2707 zc.zc_cookie = new_version; 2708 2709 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 2710 return (zpool_standard_error_fmt(hdl, errno, 2711 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 2712 zhp->zpool_name)); 2713 return (0); 2714 } 2715 2716 void 2717 zpool_set_history_str(const char *subcommand, int argc, char **argv, 2718 char *history_str) 2719 { 2720 int i; 2721 2722 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); 2723 for (i = 1; i < argc; i++) { 2724 if (strlen(history_str) + 1 + strlen(argv[i]) > 2725 HIS_MAX_RECORD_LEN) 2726 break; 2727 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); 2728 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); 2729 } 2730 } 2731 2732 /* 2733 * Stage command history for logging. 2734 */ 2735 int 2736 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) 2737 { 2738 if (history_str == NULL) 2739 return (EINVAL); 2740 2741 if (strlen(history_str) > HIS_MAX_RECORD_LEN) 2742 return (EINVAL); 2743 2744 if (hdl->libzfs_log_str != NULL) 2745 free(hdl->libzfs_log_str); 2746 2747 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) 2748 return (no_memory(hdl)); 2749 2750 return (0); 2751 } 2752 2753 /* 2754 * Perform ioctl to get some command history of a pool. 2755 * 2756 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 2757 * logical offset of the history buffer to start reading from. 2758 * 2759 * Upon return, 'off' is the next logical offset to read from and 2760 * 'len' is the actual amount of bytes read into 'buf'. 2761 */ 2762 static int 2763 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 2764 { 2765 zfs_cmd_t zc = { 0 }; 2766 libzfs_handle_t *hdl = zhp->zpool_hdl; 2767 2768 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2769 2770 zc.zc_history = (uint64_t)(uintptr_t)buf; 2771 zc.zc_history_len = *len; 2772 zc.zc_history_offset = *off; 2773 2774 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 2775 switch (errno) { 2776 case EPERM: 2777 return (zfs_error_fmt(hdl, EZFS_PERM, 2778 dgettext(TEXT_DOMAIN, 2779 "cannot show history for pool '%s'"), 2780 zhp->zpool_name)); 2781 case ENOENT: 2782 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 2783 dgettext(TEXT_DOMAIN, "cannot get history for pool " 2784 "'%s'"), zhp->zpool_name)); 2785 case ENOTSUP: 2786 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 2787 dgettext(TEXT_DOMAIN, "cannot get history for pool " 2788 "'%s', pool must be upgraded"), zhp->zpool_name)); 2789 default: 2790 return (zpool_standard_error_fmt(hdl, errno, 2791 dgettext(TEXT_DOMAIN, 2792 "cannot get history for '%s'"), zhp->zpool_name)); 2793 } 2794 } 2795 2796 *len = zc.zc_history_len; 2797 *off = zc.zc_history_offset; 2798 2799 return (0); 2800 } 2801 2802 /* 2803 * Process the buffer of nvlists, unpacking and storing each nvlist record 2804 * into 'records'. 'leftover' is set to the number of bytes that weren't 2805 * processed as there wasn't a complete record. 2806 */ 2807 static int 2808 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 2809 nvlist_t ***records, uint_t *numrecords) 2810 { 2811 uint64_t reclen; 2812 nvlist_t *nv; 2813 int i; 2814 2815 while (bytes_read > sizeof (reclen)) { 2816 2817 /* get length of packed record (stored as little endian) */ 2818 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 2819 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 2820 2821 if (bytes_read < sizeof (reclen) + reclen) 2822 break; 2823 2824 /* unpack record */ 2825 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 2826 return (ENOMEM); 2827 bytes_read -= sizeof (reclen) + reclen; 2828 buf += sizeof (reclen) + reclen; 2829 2830 /* add record to nvlist array */ 2831 (*numrecords)++; 2832 if (ISP2(*numrecords + 1)) { 2833 *records = realloc(*records, 2834 *numrecords * 2 * sizeof (nvlist_t *)); 2835 } 2836 (*records)[*numrecords - 1] = nv; 2837 } 2838 2839 *leftover = bytes_read; 2840 return (0); 2841 } 2842 2843 #define HIS_BUF_LEN (128*1024) 2844 2845 /* 2846 * Retrieve the command history of a pool. 2847 */ 2848 int 2849 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 2850 { 2851 char buf[HIS_BUF_LEN]; 2852 uint64_t off = 0; 2853 nvlist_t **records = NULL; 2854 uint_t numrecords = 0; 2855 int err, i; 2856 2857 do { 2858 uint64_t bytes_read = sizeof (buf); 2859 uint64_t leftover; 2860 2861 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 2862 break; 2863 2864 /* if nothing else was read in, we're at EOF, just return */ 2865 if (!bytes_read) 2866 break; 2867 2868 if ((err = zpool_history_unpack(buf, bytes_read, 2869 &leftover, &records, &numrecords)) != 0) 2870 break; 2871 off -= leftover; 2872 2873 /* CONSTCOND */ 2874 } while (1); 2875 2876 if (!err) { 2877 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 2878 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 2879 records, numrecords) == 0); 2880 } 2881 for (i = 0; i < numrecords; i++) 2882 nvlist_free(records[i]); 2883 free(records); 2884 2885 return (err); 2886 } 2887 2888 void 2889 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 2890 char *pathname, size_t len) 2891 { 2892 zfs_cmd_t zc = { 0 }; 2893 boolean_t mounted = B_FALSE; 2894 char *mntpnt = NULL; 2895 char dsname[MAXNAMELEN]; 2896 2897 if (dsobj == 0) { 2898 /* special case for the MOS */ 2899 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 2900 return; 2901 } 2902 2903 /* get the dataset's name */ 2904 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2905 zc.zc_obj = dsobj; 2906 if (ioctl(zhp->zpool_hdl->libzfs_fd, 2907 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 2908 /* just write out a path of two object numbers */ 2909 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 2910 dsobj, obj); 2911 return; 2912 } 2913 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 2914 2915 /* find out if the dataset is mounted */ 2916 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 2917 2918 /* get the corrupted object's path */ 2919 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 2920 zc.zc_obj = obj; 2921 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 2922 &zc) == 0) { 2923 if (mounted) { 2924 (void) snprintf(pathname, len, "%s%s", mntpnt, 2925 zc.zc_value); 2926 } else { 2927 (void) snprintf(pathname, len, "%s:%s", 2928 dsname, zc.zc_value); 2929 } 2930 } else { 2931 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 2932 } 2933 free(mntpnt); 2934 } 2935 2936 /* 2937 * Read the EFI label from the config, if a label does not exist then 2938 * pass back the error to the caller. If the caller has passed a non-NULL 2939 * diskaddr argument then we set it to the starting address of the EFI 2940 * partition. 2941 */ 2942 static int 2943 read_efi_label(nvlist_t *config, diskaddr_t *sb) 2944 { 2945 char *path; 2946 int fd; 2947 char diskname[MAXPATHLEN]; 2948 int err = -1; 2949 2950 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 2951 return (err); 2952 2953 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 2954 strrchr(path, '/')); 2955 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 2956 struct dk_gpt *vtoc; 2957 2958 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 2959 if (sb != NULL) 2960 *sb = vtoc->efi_parts[0].p_start; 2961 efi_free(vtoc); 2962 } 2963 (void) close(fd); 2964 } 2965 return (err); 2966 } 2967 2968 /* 2969 * determine where a partition starts on a disk in the current 2970 * configuration 2971 */ 2972 static diskaddr_t 2973 find_start_block(nvlist_t *config) 2974 { 2975 nvlist_t **child; 2976 uint_t c, children; 2977 diskaddr_t sb = MAXOFFSET_T; 2978 uint64_t wholedisk; 2979 2980 if (nvlist_lookup_nvlist_array(config, 2981 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 2982 if (nvlist_lookup_uint64(config, 2983 ZPOOL_CONFIG_WHOLE_DISK, 2984 &wholedisk) != 0 || !wholedisk) { 2985 return (MAXOFFSET_T); 2986 } 2987 if (read_efi_label(config, &sb) < 0) 2988 sb = MAXOFFSET_T; 2989 return (sb); 2990 } 2991 2992 for (c = 0; c < children; c++) { 2993 sb = find_start_block(child[c]); 2994 if (sb != MAXOFFSET_T) { 2995 return (sb); 2996 } 2997 } 2998 return (MAXOFFSET_T); 2999 } 3000 3001 /* 3002 * Label an individual disk. The name provided is the short name, 3003 * stripped of any leading /dev path. 3004 */ 3005 int 3006 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) 3007 { 3008 char path[MAXPATHLEN]; 3009 struct dk_gpt *vtoc; 3010 int fd; 3011 size_t resv = EFI_MIN_RESV_SIZE; 3012 uint64_t slice_size; 3013 diskaddr_t start_block; 3014 char errbuf[1024]; 3015 3016 /* prepare an error message just in case */ 3017 (void) snprintf(errbuf, sizeof (errbuf), 3018 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3019 3020 if (zhp) { 3021 nvlist_t *nvroot; 3022 3023 if (pool_is_bootable(zhp)) { 3024 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3025 "EFI labeled devices are not supported on root " 3026 "pools.")); 3027 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3028 } 3029 3030 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3031 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3032 3033 if (zhp->zpool_start_block == 0) 3034 start_block = find_start_block(nvroot); 3035 else 3036 start_block = zhp->zpool_start_block; 3037 zhp->zpool_start_block = start_block; 3038 } else { 3039 /* new pool */ 3040 start_block = NEW_START_BLOCK; 3041 } 3042 3043 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3044 BACKUP_SLICE); 3045 3046 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3047 /* 3048 * This shouldn't happen. We've long since verified that this 3049 * is a valid device. 3050 */ 3051 zfs_error_aux(hdl, 3052 dgettext(TEXT_DOMAIN, "unable to open device")); 3053 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3054 } 3055 3056 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3057 /* 3058 * The only way this can fail is if we run out of memory, or we 3059 * were unable to read the disk's capacity 3060 */ 3061 if (errno == ENOMEM) 3062 (void) no_memory(hdl); 3063 3064 (void) close(fd); 3065 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3066 "unable to read disk capacity"), name); 3067 3068 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3069 } 3070 3071 slice_size = vtoc->efi_last_u_lba + 1; 3072 slice_size -= EFI_MIN_RESV_SIZE; 3073 if (start_block == MAXOFFSET_T) 3074 start_block = NEW_START_BLOCK; 3075 slice_size -= start_block; 3076 3077 vtoc->efi_parts[0].p_start = start_block; 3078 vtoc->efi_parts[0].p_size = slice_size; 3079 3080 /* 3081 * Why we use V_USR: V_BACKUP confuses users, and is considered 3082 * disposable by some EFI utilities (since EFI doesn't have a backup 3083 * slice). V_UNASSIGNED is supposed to be used only for zero size 3084 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3085 * etc. were all pretty specific. V_USR is as close to reality as we 3086 * can get, in the absence of V_OTHER. 3087 */ 3088 vtoc->efi_parts[0].p_tag = V_USR; 3089 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3090 3091 vtoc->efi_parts[8].p_start = slice_size + start_block; 3092 vtoc->efi_parts[8].p_size = resv; 3093 vtoc->efi_parts[8].p_tag = V_RESERVED; 3094 3095 if (efi_write(fd, vtoc) != 0) { 3096 /* 3097 * Some block drivers (like pcata) may not support EFI 3098 * GPT labels. Print out a helpful error message dir- 3099 * ecting the user to manually label the disk and give 3100 * a specific slice. 3101 */ 3102 (void) close(fd); 3103 efi_free(vtoc); 3104 3105 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3106 "try using fdisk(1M) and then provide a specific slice")); 3107 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3108 } 3109 3110 (void) close(fd); 3111 efi_free(vtoc); 3112 return (0); 3113 } 3114 3115 static boolean_t 3116 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3117 { 3118 char *type; 3119 nvlist_t **child; 3120 uint_t children, c; 3121 3122 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3123 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 3124 strcmp(type, VDEV_TYPE_FILE) == 0 || 3125 strcmp(type, VDEV_TYPE_LOG) == 0 || 3126 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3127 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3128 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3129 "vdev type '%s' is not supported"), type); 3130 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3131 return (B_FALSE); 3132 } 3133 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3134 &child, &children) == 0) { 3135 for (c = 0; c < children; c++) { 3136 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3137 return (B_FALSE); 3138 } 3139 } 3140 return (B_TRUE); 3141 } 3142 3143 /* 3144 * check if this zvol is allowable for use as a dump device; zero if 3145 * it is, > 0 if it isn't, < 0 if it isn't a zvol 3146 */ 3147 int 3148 zvol_check_dump_config(char *arg) 3149 { 3150 zpool_handle_t *zhp = NULL; 3151 nvlist_t *config, *nvroot; 3152 char *p, *volname; 3153 nvlist_t **top; 3154 uint_t toplevels; 3155 libzfs_handle_t *hdl; 3156 char errbuf[1024]; 3157 char poolname[ZPOOL_MAXNAMELEN]; 3158 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 3159 int ret = 1; 3160 3161 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 3162 return (-1); 3163 } 3164 3165 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3166 "dump is not supported on device '%s'"), arg); 3167 3168 if ((hdl = libzfs_init()) == NULL) 3169 return (1); 3170 libzfs_print_on_error(hdl, B_TRUE); 3171 3172 volname = arg + pathlen; 3173 3174 /* check the configuration of the pool */ 3175 if ((p = strchr(volname, '/')) == NULL) { 3176 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3177 "malformed dataset name")); 3178 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 3179 return (1); 3180 } else if (p - volname >= ZFS_MAXNAMELEN) { 3181 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3182 "dataset name is too long")); 3183 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 3184 return (1); 3185 } else { 3186 (void) strncpy(poolname, volname, p - volname); 3187 poolname[p - volname] = '\0'; 3188 } 3189 3190 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 3191 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3192 "could not open pool '%s'"), poolname); 3193 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 3194 goto out; 3195 } 3196 config = zpool_get_config(zhp, NULL); 3197 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3198 &nvroot) != 0) { 3199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3200 "could not obtain vdev configuration for '%s'"), poolname); 3201 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3202 goto out; 3203 } 3204 3205 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3206 &top, &toplevels) == 0); 3207 if (toplevels != 1) { 3208 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3209 "'%s' has multiple top level vdevs"), poolname); 3210 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3211 goto out; 3212 } 3213 3214 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 3215 goto out; 3216 } 3217 ret = 0; 3218 3219 out: 3220 if (zhp) 3221 zpool_close(zhp); 3222 libzfs_fini(hdl); 3223 return (ret); 3224 } 3225