1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <alloca.h> 28 #include <assert.h> 29 #include <ctype.h> 30 #include <errno.h> 31 #include <devid.h> 32 #include <dirent.h> 33 #include <fcntl.h> 34 #include <libintl.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <strings.h> 38 #include <unistd.h> 39 #include <zone.h> 40 #include <sys/efi_partition.h> 41 #include <sys/vtoc.h> 42 #include <sys/zfs_ioctl.h> 43 #include <sys/zio.h> 44 #include <strings.h> 45 46 #include "zfs_namecheck.h" 47 #include "zfs_prop.h" 48 #include "libzfs_impl.h" 49 50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52 #if defined(__i386) || defined(__amd64) 53 #define BOOTCMD "installgrub(1M)" 54 #else 55 #define BOOTCMD "installboot(1M)" 56 #endif 57 58 /* 59 * ==================================================================== 60 * zpool property functions 61 * ==================================================================== 62 */ 63 64 static int 65 zpool_get_all_props(zpool_handle_t *zhp) 66 { 67 zfs_cmd_t zc = { 0 }; 68 libzfs_handle_t *hdl = zhp->zpool_hdl; 69 70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 71 72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 73 return (-1); 74 75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 76 if (errno == ENOMEM) { 77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 78 zcmd_free_nvlists(&zc); 79 return (-1); 80 } 81 } else { 82 zcmd_free_nvlists(&zc); 83 return (-1); 84 } 85 } 86 87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 88 zcmd_free_nvlists(&zc); 89 return (-1); 90 } 91 92 zcmd_free_nvlists(&zc); 93 94 return (0); 95 } 96 97 static int 98 zpool_props_refresh(zpool_handle_t *zhp) 99 { 100 nvlist_t *old_props; 101 102 old_props = zhp->zpool_props; 103 104 if (zpool_get_all_props(zhp) != 0) 105 return (-1); 106 107 nvlist_free(old_props); 108 return (0); 109 } 110 111 static char * 112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 113 zprop_source_t *src) 114 { 115 nvlist_t *nv, *nvl; 116 uint64_t ival; 117 char *value; 118 zprop_source_t source; 119 120 nvl = zhp->zpool_props; 121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 123 source = ival; 124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 125 } else { 126 source = ZPROP_SRC_DEFAULT; 127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 128 value = "-"; 129 } 130 131 if (src) 132 *src = source; 133 134 return (value); 135 } 136 137 uint64_t 138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 139 { 140 nvlist_t *nv, *nvl; 141 uint64_t value; 142 zprop_source_t source; 143 144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 145 /* 146 * zpool_get_all_props() has most likely failed because 147 * the pool is faulted, but if all we need is the top level 148 * vdev's guid then get it from the zhp config nvlist. 149 */ 150 if ((prop == ZPOOL_PROP_GUID) && 151 (nvlist_lookup_nvlist(zhp->zpool_config, 152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 154 == 0)) { 155 return (value); 156 } 157 return (zpool_prop_default_numeric(prop)); 158 } 159 160 nvl = zhp->zpool_props; 161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 163 source = value; 164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 165 } else { 166 source = ZPROP_SRC_DEFAULT; 167 value = zpool_prop_default_numeric(prop); 168 } 169 170 if (src) 171 *src = source; 172 173 return (value); 174 } 175 176 /* 177 * Map VDEV STATE to printed strings. 178 */ 179 char * 180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 181 { 182 switch (state) { 183 case VDEV_STATE_CLOSED: 184 case VDEV_STATE_OFFLINE: 185 return (gettext("OFFLINE")); 186 case VDEV_STATE_REMOVED: 187 return (gettext("REMOVED")); 188 case VDEV_STATE_CANT_OPEN: 189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 190 return (gettext("FAULTED")); 191 else 192 return (gettext("UNAVAIL")); 193 case VDEV_STATE_FAULTED: 194 return (gettext("FAULTED")); 195 case VDEV_STATE_DEGRADED: 196 return (gettext("DEGRADED")); 197 case VDEV_STATE_HEALTHY: 198 return (gettext("ONLINE")); 199 } 200 201 return (gettext("UNKNOWN")); 202 } 203 204 /* 205 * Get a zpool property value for 'prop' and return the value in 206 * a pre-allocated buffer. 207 */ 208 int 209 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 210 zprop_source_t *srctype) 211 { 212 uint64_t intval; 213 const char *strval; 214 zprop_source_t src = ZPROP_SRC_NONE; 215 nvlist_t *nvroot; 216 vdev_stat_t *vs; 217 uint_t vsc; 218 219 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 220 if (prop == ZPOOL_PROP_NAME) 221 (void) strlcpy(buf, zpool_get_name(zhp), len); 222 else if (prop == ZPOOL_PROP_HEALTH) 223 (void) strlcpy(buf, "FAULTED", len); 224 else 225 (void) strlcpy(buf, "-", len); 226 return (0); 227 } 228 229 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 230 prop != ZPOOL_PROP_NAME) 231 return (-1); 232 233 switch (zpool_prop_get_type(prop)) { 234 case PROP_TYPE_STRING: 235 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 236 len); 237 break; 238 239 case PROP_TYPE_NUMBER: 240 intval = zpool_get_prop_int(zhp, prop, &src); 241 242 switch (prop) { 243 case ZPOOL_PROP_SIZE: 244 case ZPOOL_PROP_USED: 245 case ZPOOL_PROP_AVAILABLE: 246 (void) zfs_nicenum(intval, buf, len); 247 break; 248 249 case ZPOOL_PROP_CAPACITY: 250 (void) snprintf(buf, len, "%llu%%", 251 (u_longlong_t)intval); 252 break; 253 254 case ZPOOL_PROP_HEALTH: 255 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 256 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 257 verify(nvlist_lookup_uint64_array(nvroot, 258 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 259 260 (void) strlcpy(buf, zpool_state_to_name(intval, 261 vs->vs_aux), len); 262 break; 263 default: 264 (void) snprintf(buf, len, "%llu", intval); 265 } 266 break; 267 268 case PROP_TYPE_INDEX: 269 intval = zpool_get_prop_int(zhp, prop, &src); 270 if (zpool_prop_index_to_string(prop, intval, &strval) 271 != 0) 272 return (-1); 273 (void) strlcpy(buf, strval, len); 274 break; 275 276 default: 277 abort(); 278 } 279 280 if (srctype) 281 *srctype = src; 282 283 return (0); 284 } 285 286 /* 287 * Check if the bootfs name has the same pool name as it is set to. 288 * Assuming bootfs is a valid dataset name. 289 */ 290 static boolean_t 291 bootfs_name_valid(const char *pool, char *bootfs) 292 { 293 int len = strlen(pool); 294 295 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 296 return (B_FALSE); 297 298 if (strncmp(pool, bootfs, len) == 0 && 299 (bootfs[len] == '/' || bootfs[len] == '\0')) 300 return (B_TRUE); 301 302 return (B_FALSE); 303 } 304 305 /* 306 * Inspect the configuration to determine if any of the devices contain 307 * an EFI label. 308 */ 309 static boolean_t 310 pool_uses_efi(nvlist_t *config) 311 { 312 nvlist_t **child; 313 uint_t c, children; 314 315 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 316 &child, &children) != 0) 317 return (read_efi_label(config, NULL) >= 0); 318 319 for (c = 0; c < children; c++) { 320 if (pool_uses_efi(child[c])) 321 return (B_TRUE); 322 } 323 return (B_FALSE); 324 } 325 326 static boolean_t 327 pool_is_bootable(zpool_handle_t *zhp) 328 { 329 char bootfs[ZPOOL_MAXNAMELEN]; 330 331 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 332 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", 333 sizeof (bootfs)) != 0); 334 } 335 336 337 /* 338 * Given an nvlist of zpool properties to be set, validate that they are 339 * correct, and parse any numeric properties (index, boolean, etc) if they are 340 * specified as strings. 341 */ 342 static nvlist_t * 343 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 344 nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf) 345 { 346 nvpair_t *elem; 347 nvlist_t *retprops; 348 zpool_prop_t prop; 349 char *strval; 350 uint64_t intval; 351 char *slash; 352 struct stat64 statbuf; 353 zpool_handle_t *zhp; 354 nvlist_t *nvroot; 355 356 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 357 (void) no_memory(hdl); 358 return (NULL); 359 } 360 361 elem = NULL; 362 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 363 const char *propname = nvpair_name(elem); 364 365 /* 366 * Make sure this property is valid and applies to this type. 367 */ 368 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { 369 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 370 "invalid property '%s'"), propname); 371 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 372 goto error; 373 } 374 375 if (zpool_prop_readonly(prop)) { 376 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 377 "is readonly"), propname); 378 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 379 goto error; 380 } 381 382 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 383 &strval, &intval, errbuf) != 0) 384 goto error; 385 386 /* 387 * Perform additional checking for specific properties. 388 */ 389 switch (prop) { 390 case ZPOOL_PROP_VERSION: 391 if (intval < version || intval > SPA_VERSION) { 392 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 393 "property '%s' number %d is invalid."), 394 propname, intval); 395 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 396 goto error; 397 } 398 break; 399 400 case ZPOOL_PROP_BOOTFS: 401 if (create_or_import) { 402 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 403 "property '%s' cannot be set at creation " 404 "or import time"), propname); 405 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 406 goto error; 407 } 408 409 if (version < SPA_VERSION_BOOTFS) { 410 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 411 "pool must be upgraded to support " 412 "'%s' property"), propname); 413 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 414 goto error; 415 } 416 417 /* 418 * bootfs property value has to be a dataset name and 419 * the dataset has to be in the same pool as it sets to. 420 */ 421 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 422 strval)) { 423 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 424 "is an invalid name"), strval); 425 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 426 goto error; 427 } 428 429 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 430 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 431 "could not open pool '%s'"), poolname); 432 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 433 goto error; 434 } 435 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 436 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 437 438 /* 439 * bootfs property cannot be set on a disk which has 440 * been EFI labeled. 441 */ 442 if (pool_uses_efi(nvroot)) { 443 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 444 "property '%s' not supported on " 445 "EFI labeled devices"), propname); 446 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 447 zpool_close(zhp); 448 goto error; 449 } 450 zpool_close(zhp); 451 break; 452 453 case ZPOOL_PROP_ALTROOT: 454 if (!create_or_import) { 455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 456 "property '%s' can only be set during pool " 457 "creation or import"), propname); 458 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 459 goto error; 460 } 461 462 if (strval[0] != '/') { 463 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 464 "bad alternate root '%s'"), strval); 465 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 466 goto error; 467 } 468 break; 469 470 case ZPOOL_PROP_CACHEFILE: 471 if (strval[0] == '\0') 472 break; 473 474 if (strcmp(strval, "none") == 0) 475 break; 476 477 if (strval[0] != '/') { 478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 479 "property '%s' must be empty, an " 480 "absolute path, or 'none'"), propname); 481 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 482 goto error; 483 } 484 485 slash = strrchr(strval, '/'); 486 487 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 488 strcmp(slash, "/..") == 0) { 489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 490 "'%s' is not a valid file"), strval); 491 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 492 goto error; 493 } 494 495 *slash = '\0'; 496 497 if (strval[0] != '\0' && 498 (stat64(strval, &statbuf) != 0 || 499 !S_ISDIR(statbuf.st_mode))) { 500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 501 "'%s' is not a valid directory"), 502 strval); 503 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 504 goto error; 505 } 506 507 *slash = '/'; 508 break; 509 } 510 } 511 512 return (retprops); 513 error: 514 nvlist_free(retprops); 515 return (NULL); 516 } 517 518 /* 519 * Set zpool property : propname=propval. 520 */ 521 int 522 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 523 { 524 zfs_cmd_t zc = { 0 }; 525 int ret = -1; 526 char errbuf[1024]; 527 nvlist_t *nvl = NULL; 528 nvlist_t *realprops; 529 uint64_t version; 530 531 (void) snprintf(errbuf, sizeof (errbuf), 532 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 533 zhp->zpool_name); 534 535 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) 536 return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf)); 537 538 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 539 return (no_memory(zhp->zpool_hdl)); 540 541 if (nvlist_add_string(nvl, propname, propval) != 0) { 542 nvlist_free(nvl); 543 return (no_memory(zhp->zpool_hdl)); 544 } 545 546 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 547 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 548 zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) { 549 nvlist_free(nvl); 550 return (-1); 551 } 552 553 nvlist_free(nvl); 554 nvl = realprops; 555 556 /* 557 * Execute the corresponding ioctl() to set this property. 558 */ 559 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 560 561 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 562 nvlist_free(nvl); 563 return (-1); 564 } 565 566 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 567 568 zcmd_free_nvlists(&zc); 569 nvlist_free(nvl); 570 571 if (ret) 572 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 573 else 574 (void) zpool_props_refresh(zhp); 575 576 return (ret); 577 } 578 579 int 580 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 581 { 582 libzfs_handle_t *hdl = zhp->zpool_hdl; 583 zprop_list_t *entry; 584 char buf[ZFS_MAXPROPLEN]; 585 586 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 587 return (-1); 588 589 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 590 591 if (entry->pl_fixed) 592 continue; 593 594 if (entry->pl_prop != ZPROP_INVAL && 595 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 596 NULL) == 0) { 597 if (strlen(buf) > entry->pl_width) 598 entry->pl_width = strlen(buf); 599 } 600 } 601 602 return (0); 603 } 604 605 606 /* 607 * Validate the given pool name, optionally putting an extended error message in 608 * 'buf'. 609 */ 610 boolean_t 611 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 612 { 613 namecheck_err_t why; 614 char what; 615 int ret; 616 617 ret = pool_namecheck(pool, &why, &what); 618 619 /* 620 * The rules for reserved pool names were extended at a later point. 621 * But we need to support users with existing pools that may now be 622 * invalid. So we only check for this expanded set of names during a 623 * create (or import), and only in userland. 624 */ 625 if (ret == 0 && !isopen && 626 (strncmp(pool, "mirror", 6) == 0 || 627 strncmp(pool, "raidz", 5) == 0 || 628 strncmp(pool, "spare", 5) == 0 || 629 strcmp(pool, "log") == 0)) { 630 if (hdl != NULL) 631 zfs_error_aux(hdl, 632 dgettext(TEXT_DOMAIN, "name is reserved")); 633 return (B_FALSE); 634 } 635 636 637 if (ret != 0) { 638 if (hdl != NULL) { 639 switch (why) { 640 case NAME_ERR_TOOLONG: 641 zfs_error_aux(hdl, 642 dgettext(TEXT_DOMAIN, "name is too long")); 643 break; 644 645 case NAME_ERR_INVALCHAR: 646 zfs_error_aux(hdl, 647 dgettext(TEXT_DOMAIN, "invalid character " 648 "'%c' in pool name"), what); 649 break; 650 651 case NAME_ERR_NOLETTER: 652 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 653 "name must begin with a letter")); 654 break; 655 656 case NAME_ERR_RESERVED: 657 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 658 "name is reserved")); 659 break; 660 661 case NAME_ERR_DISKLIKE: 662 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 663 "pool name is reserved")); 664 break; 665 666 case NAME_ERR_LEADING_SLASH: 667 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 668 "leading slash in name")); 669 break; 670 671 case NAME_ERR_EMPTY_COMPONENT: 672 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 673 "empty component in name")); 674 break; 675 676 case NAME_ERR_TRAILING_SLASH: 677 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 678 "trailing slash in name")); 679 break; 680 681 case NAME_ERR_MULTIPLE_AT: 682 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 683 "multiple '@' delimiters in name")); 684 break; 685 686 } 687 } 688 return (B_FALSE); 689 } 690 691 return (B_TRUE); 692 } 693 694 /* 695 * Open a handle to the given pool, even if the pool is currently in the FAULTED 696 * state. 697 */ 698 zpool_handle_t * 699 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 700 { 701 zpool_handle_t *zhp; 702 boolean_t missing; 703 704 /* 705 * Make sure the pool name is valid. 706 */ 707 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 708 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 709 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 710 pool); 711 return (NULL); 712 } 713 714 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 715 return (NULL); 716 717 zhp->zpool_hdl = hdl; 718 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 719 720 if (zpool_refresh_stats(zhp, &missing) != 0) { 721 zpool_close(zhp); 722 return (NULL); 723 } 724 725 if (missing) { 726 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 727 (void) zfs_error_fmt(hdl, EZFS_NOENT, 728 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 729 zpool_close(zhp); 730 return (NULL); 731 } 732 733 return (zhp); 734 } 735 736 /* 737 * Like the above, but silent on error. Used when iterating over pools (because 738 * the configuration cache may be out of date). 739 */ 740 int 741 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 742 { 743 zpool_handle_t *zhp; 744 boolean_t missing; 745 746 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 747 return (-1); 748 749 zhp->zpool_hdl = hdl; 750 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 751 752 if (zpool_refresh_stats(zhp, &missing) != 0) { 753 zpool_close(zhp); 754 return (-1); 755 } 756 757 if (missing) { 758 zpool_close(zhp); 759 *ret = NULL; 760 return (0); 761 } 762 763 *ret = zhp; 764 return (0); 765 } 766 767 /* 768 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 769 * state. 770 */ 771 zpool_handle_t * 772 zpool_open(libzfs_handle_t *hdl, const char *pool) 773 { 774 zpool_handle_t *zhp; 775 776 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 777 return (NULL); 778 779 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 780 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 781 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 782 zpool_close(zhp); 783 return (NULL); 784 } 785 786 return (zhp); 787 } 788 789 /* 790 * Close the handle. Simply frees the memory associated with the handle. 791 */ 792 void 793 zpool_close(zpool_handle_t *zhp) 794 { 795 if (zhp->zpool_config) 796 nvlist_free(zhp->zpool_config); 797 if (zhp->zpool_old_config) 798 nvlist_free(zhp->zpool_old_config); 799 if (zhp->zpool_props) 800 nvlist_free(zhp->zpool_props); 801 free(zhp); 802 } 803 804 /* 805 * Return the name of the pool. 806 */ 807 const char * 808 zpool_get_name(zpool_handle_t *zhp) 809 { 810 return (zhp->zpool_name); 811 } 812 813 814 /* 815 * Return the state of the pool (ACTIVE or UNAVAILABLE) 816 */ 817 int 818 zpool_get_state(zpool_handle_t *zhp) 819 { 820 return (zhp->zpool_state); 821 } 822 823 /* 824 * Create the named pool, using the provided vdev list. It is assumed 825 * that the consumer has already validated the contents of the nvlist, so we 826 * don't have to worry about error semantics. 827 */ 828 int 829 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 830 nvlist_t *props, nvlist_t *fsprops) 831 { 832 zfs_cmd_t zc = { 0 }; 833 nvlist_t *zc_fsprops = NULL; 834 nvlist_t *zc_props = NULL; 835 char msg[1024]; 836 char *altroot; 837 int ret = -1; 838 839 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 840 "cannot create '%s'"), pool); 841 842 if (!zpool_name_valid(hdl, B_FALSE, pool)) 843 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 844 845 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 846 return (-1); 847 848 if (props) { 849 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 850 SPA_VERSION_1, B_TRUE, msg)) == NULL) { 851 goto create_failed; 852 } 853 } 854 855 if (fsprops) { 856 uint64_t zoned; 857 char *zonestr; 858 859 zoned = ((nvlist_lookup_string(fsprops, 860 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 861 strcmp(zonestr, "on") == 0); 862 863 if ((zc_fsprops = zfs_valid_proplist(hdl, 864 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 865 goto create_failed; 866 } 867 if (!zc_props && 868 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 869 goto create_failed; 870 } 871 if (nvlist_add_nvlist(zc_props, 872 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 873 goto create_failed; 874 } 875 } 876 877 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 878 goto create_failed; 879 880 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 881 882 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 883 884 zcmd_free_nvlists(&zc); 885 nvlist_free(zc_props); 886 nvlist_free(zc_fsprops); 887 888 switch (errno) { 889 case EBUSY: 890 /* 891 * This can happen if the user has specified the same 892 * device multiple times. We can't reliably detect this 893 * until we try to add it and see we already have a 894 * label. 895 */ 896 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 897 "one or more vdevs refer to the same device")); 898 return (zfs_error(hdl, EZFS_BADDEV, msg)); 899 900 case EOVERFLOW: 901 /* 902 * This occurs when one of the devices is below 903 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 904 * device was the problem device since there's no 905 * reliable way to determine device size from userland. 906 */ 907 { 908 char buf[64]; 909 910 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 911 912 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 913 "one or more devices is less than the " 914 "minimum size (%s)"), buf); 915 } 916 return (zfs_error(hdl, EZFS_BADDEV, msg)); 917 918 case ENOSPC: 919 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 920 "one or more devices is out of space")); 921 return (zfs_error(hdl, EZFS_BADDEV, msg)); 922 923 case ENOTBLK: 924 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 925 "cache device must be a disk or disk slice")); 926 return (zfs_error(hdl, EZFS_BADDEV, msg)); 927 928 default: 929 return (zpool_standard_error(hdl, errno, msg)); 930 } 931 } 932 933 /* 934 * If this is an alternate root pool, then we automatically set the 935 * mountpoint of the root dataset to be '/'. 936 */ 937 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), 938 &altroot) == 0) { 939 zfs_handle_t *zhp; 940 941 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); 942 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 943 "/") == 0); 944 945 zfs_close(zhp); 946 } 947 948 create_failed: 949 zcmd_free_nvlists(&zc); 950 nvlist_free(zc_props); 951 nvlist_free(zc_fsprops); 952 return (ret); 953 } 954 955 /* 956 * Destroy the given pool. It is up to the caller to ensure that there are no 957 * datasets left in the pool. 958 */ 959 int 960 zpool_destroy(zpool_handle_t *zhp) 961 { 962 zfs_cmd_t zc = { 0 }; 963 zfs_handle_t *zfp = NULL; 964 libzfs_handle_t *hdl = zhp->zpool_hdl; 965 char msg[1024]; 966 967 if (zhp->zpool_state == POOL_STATE_ACTIVE && 968 (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, 969 ZFS_TYPE_FILESYSTEM)) == NULL) 970 return (-1); 971 972 if (zpool_remove_zvol_links(zhp) != 0) 973 return (-1); 974 975 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 976 977 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 978 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 979 "cannot destroy '%s'"), zhp->zpool_name); 980 981 if (errno == EROFS) { 982 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 983 "one or more devices is read only")); 984 (void) zfs_error(hdl, EZFS_BADDEV, msg); 985 } else { 986 (void) zpool_standard_error(hdl, errno, msg); 987 } 988 989 if (zfp) 990 zfs_close(zfp); 991 return (-1); 992 } 993 994 if (zfp) { 995 remove_mountpoint(zfp); 996 zfs_close(zfp); 997 } 998 999 return (0); 1000 } 1001 1002 /* 1003 * Add the given vdevs to the pool. The caller must have already performed the 1004 * necessary verification to ensure that the vdev specification is well-formed. 1005 */ 1006 int 1007 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1008 { 1009 zfs_cmd_t zc = { 0 }; 1010 int ret; 1011 libzfs_handle_t *hdl = zhp->zpool_hdl; 1012 char msg[1024]; 1013 nvlist_t **spares, **l2cache; 1014 uint_t nspares, nl2cache; 1015 1016 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1017 "cannot add to '%s'"), zhp->zpool_name); 1018 1019 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1020 SPA_VERSION_SPARES && 1021 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1022 &spares, &nspares) == 0) { 1023 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1024 "upgraded to add hot spares")); 1025 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1026 } 1027 1028 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1029 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1030 uint64_t s; 1031 1032 for (s = 0; s < nspares; s++) { 1033 char *path; 1034 1035 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1036 &path) == 0 && pool_uses_efi(spares[s])) { 1037 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1038 "device '%s' contains an EFI label and " 1039 "cannot be used on root pools."), 1040 zpool_vdev_name(hdl, NULL, spares[s])); 1041 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1042 } 1043 } 1044 } 1045 1046 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1047 SPA_VERSION_L2CACHE && 1048 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1049 &l2cache, &nl2cache) == 0) { 1050 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1051 "upgraded to add cache devices")); 1052 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1053 } 1054 1055 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1056 return (-1); 1057 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1058 1059 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1060 switch (errno) { 1061 case EBUSY: 1062 /* 1063 * This can happen if the user has specified the same 1064 * device multiple times. We can't reliably detect this 1065 * until we try to add it and see we already have a 1066 * label. 1067 */ 1068 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1069 "one or more vdevs refer to the same device")); 1070 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1071 break; 1072 1073 case EOVERFLOW: 1074 /* 1075 * This occurrs when one of the devices is below 1076 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1077 * device was the problem device since there's no 1078 * reliable way to determine device size from userland. 1079 */ 1080 { 1081 char buf[64]; 1082 1083 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1084 1085 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1086 "device is less than the minimum " 1087 "size (%s)"), buf); 1088 } 1089 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1090 break; 1091 1092 case ENOTSUP: 1093 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1094 "pool must be upgraded to add these vdevs")); 1095 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1096 break; 1097 1098 case EDOM: 1099 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1100 "root pool can not have multiple vdevs" 1101 " or separate logs")); 1102 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1103 break; 1104 1105 case ENOTBLK: 1106 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1107 "cache device must be a disk or disk slice")); 1108 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1109 break; 1110 1111 default: 1112 (void) zpool_standard_error(hdl, errno, msg); 1113 } 1114 1115 ret = -1; 1116 } else { 1117 ret = 0; 1118 } 1119 1120 zcmd_free_nvlists(&zc); 1121 1122 return (ret); 1123 } 1124 1125 /* 1126 * Exports the pool from the system. The caller must ensure that there are no 1127 * mounted datasets in the pool. 1128 */ 1129 int 1130 zpool_export(zpool_handle_t *zhp, boolean_t force) 1131 { 1132 zfs_cmd_t zc = { 0 }; 1133 char msg[1024]; 1134 1135 if (zpool_remove_zvol_links(zhp) != 0) 1136 return (-1); 1137 1138 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1139 "cannot export '%s'"), zhp->zpool_name); 1140 1141 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1142 zc.zc_cookie = force; 1143 1144 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1145 switch (errno) { 1146 case EXDEV: 1147 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1148 "use '-f' to override the following errors:\n" 1149 "'%s' has an active shared spare which could be" 1150 " used by other pools once '%s' is exported."), 1151 zhp->zpool_name, zhp->zpool_name); 1152 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1153 msg)); 1154 default: 1155 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1156 msg)); 1157 } 1158 } 1159 1160 return (0); 1161 } 1162 1163 /* 1164 * zpool_import() is a contracted interface. Should be kept the same 1165 * if possible. 1166 * 1167 * Applications should use zpool_import_props() to import a pool with 1168 * new properties value to be set. 1169 */ 1170 int 1171 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1172 char *altroot) 1173 { 1174 nvlist_t *props = NULL; 1175 int ret; 1176 1177 if (altroot != NULL) { 1178 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1179 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1180 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1181 newname)); 1182 } 1183 1184 if (nvlist_add_string(props, 1185 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0) { 1186 nvlist_free(props); 1187 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1188 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1189 newname)); 1190 } 1191 } 1192 1193 ret = zpool_import_props(hdl, config, newname, props, B_FALSE); 1194 if (props) 1195 nvlist_free(props); 1196 return (ret); 1197 } 1198 1199 /* 1200 * Import the given pool using the known configuration and a list of 1201 * properties to be set. The configuration should have come from 1202 * zpool_find_import(). The 'newname' parameters control whether the pool 1203 * is imported with a different name. 1204 */ 1205 int 1206 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1207 nvlist_t *props, boolean_t importfaulted) 1208 { 1209 zfs_cmd_t zc = { 0 }; 1210 char *thename; 1211 char *origname; 1212 int ret; 1213 char errbuf[1024]; 1214 1215 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1216 &origname) == 0); 1217 1218 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1219 "cannot import pool '%s'"), origname); 1220 1221 if (newname != NULL) { 1222 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1223 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1224 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1225 newname)); 1226 thename = (char *)newname; 1227 } else { 1228 thename = origname; 1229 } 1230 1231 if (props) { 1232 uint64_t version; 1233 1234 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1235 &version) == 0); 1236 1237 if ((props = zpool_valid_proplist(hdl, origname, 1238 props, version, B_TRUE, errbuf)) == NULL) { 1239 return (-1); 1240 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1241 nvlist_free(props); 1242 return (-1); 1243 } 1244 } 1245 1246 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1247 1248 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1249 &zc.zc_guid) == 0); 1250 1251 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1252 nvlist_free(props); 1253 return (-1); 1254 } 1255 1256 zc.zc_cookie = (uint64_t)importfaulted; 1257 ret = 0; 1258 if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) { 1259 char desc[1024]; 1260 if (newname == NULL) 1261 (void) snprintf(desc, sizeof (desc), 1262 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1263 thename); 1264 else 1265 (void) snprintf(desc, sizeof (desc), 1266 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1267 origname, thename); 1268 1269 switch (errno) { 1270 case ENOTSUP: 1271 /* 1272 * Unsupported version. 1273 */ 1274 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1275 break; 1276 1277 case EINVAL: 1278 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1279 break; 1280 1281 default: 1282 (void) zpool_standard_error(hdl, errno, desc); 1283 } 1284 1285 ret = -1; 1286 } else { 1287 zpool_handle_t *zhp; 1288 1289 /* 1290 * This should never fail, but play it safe anyway. 1291 */ 1292 if (zpool_open_silent(hdl, thename, &zhp) != 0) { 1293 ret = -1; 1294 } else if (zhp != NULL) { 1295 ret = zpool_create_zvol_links(zhp); 1296 zpool_close(zhp); 1297 } 1298 1299 } 1300 1301 zcmd_free_nvlists(&zc); 1302 nvlist_free(props); 1303 1304 return (ret); 1305 } 1306 1307 /* 1308 * Scrub the pool. 1309 */ 1310 int 1311 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 1312 { 1313 zfs_cmd_t zc = { 0 }; 1314 char msg[1024]; 1315 libzfs_handle_t *hdl = zhp->zpool_hdl; 1316 1317 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1318 zc.zc_cookie = type; 1319 1320 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0) 1321 return (0); 1322 1323 (void) snprintf(msg, sizeof (msg), 1324 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1325 1326 if (errno == EBUSY) 1327 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1328 else 1329 return (zpool_standard_error(hdl, errno, msg)); 1330 } 1331 1332 /* 1333 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1334 * spare; but FALSE if its an INUSE spare. 1335 */ 1336 static nvlist_t * 1337 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid, 1338 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 1339 { 1340 uint_t c, children; 1341 nvlist_t **child; 1342 uint64_t theguid, present; 1343 char *path; 1344 uint64_t wholedisk = 0; 1345 nvlist_t *ret; 1346 uint64_t is_log; 1347 1348 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0); 1349 1350 if (search == NULL && 1351 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) { 1352 /* 1353 * If the device has never been present since import, the only 1354 * reliable way to match the vdev is by GUID. 1355 */ 1356 if (theguid == guid) 1357 return (nv); 1358 } else if (search != NULL && 1359 nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 1360 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1361 &wholedisk); 1362 if (wholedisk) { 1363 /* 1364 * For whole disks, the internal path has 's0', but the 1365 * path passed in by the user doesn't. 1366 */ 1367 if (strlen(search) == strlen(path) - 2 && 1368 strncmp(search, path, strlen(search)) == 0) 1369 return (nv); 1370 } else if (strcmp(search, path) == 0) { 1371 return (nv); 1372 } 1373 } 1374 1375 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1376 &child, &children) != 0) 1377 return (NULL); 1378 1379 for (c = 0; c < children; c++) { 1380 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1381 avail_spare, l2cache, NULL)) != NULL) { 1382 /* 1383 * The 'is_log' value is only set for the toplevel 1384 * vdev, not the leaf vdevs. So we always lookup the 1385 * log device from the root of the vdev tree (where 1386 * 'log' is non-NULL). 1387 */ 1388 if (log != NULL && 1389 nvlist_lookup_uint64(child[c], 1390 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 1391 is_log) { 1392 *log = B_TRUE; 1393 } 1394 return (ret); 1395 } 1396 } 1397 1398 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1399 &child, &children) == 0) { 1400 for (c = 0; c < children; c++) { 1401 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1402 avail_spare, l2cache, NULL)) != NULL) { 1403 *avail_spare = B_TRUE; 1404 return (ret); 1405 } 1406 } 1407 } 1408 1409 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1410 &child, &children) == 0) { 1411 for (c = 0; c < children; c++) { 1412 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1413 avail_spare, l2cache, NULL)) != NULL) { 1414 *l2cache = B_TRUE; 1415 return (ret); 1416 } 1417 } 1418 } 1419 1420 return (NULL); 1421 } 1422 1423 nvlist_t * 1424 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 1425 boolean_t *l2cache, boolean_t *log) 1426 { 1427 char buf[MAXPATHLEN]; 1428 const char *search; 1429 char *end; 1430 nvlist_t *nvroot; 1431 uint64_t guid; 1432 1433 guid = strtoull(path, &end, 10); 1434 if (guid != 0 && *end == '\0') { 1435 search = NULL; 1436 } else if (path[0] != '/') { 1437 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 1438 search = buf; 1439 } else { 1440 search = path; 1441 } 1442 1443 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1444 &nvroot) == 0); 1445 1446 *avail_spare = B_FALSE; 1447 *l2cache = B_FALSE; 1448 if (log != NULL) 1449 *log = B_FALSE; 1450 return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare, 1451 l2cache, log)); 1452 } 1453 1454 static int 1455 vdev_online(nvlist_t *nv) 1456 { 1457 uint64_t ival; 1458 1459 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 1460 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 1461 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 1462 return (0); 1463 1464 return (1); 1465 } 1466 1467 /* 1468 * Get phys_path for a root pool 1469 * Return 0 on success; non-zeron on failure. 1470 */ 1471 int 1472 zpool_get_physpath(zpool_handle_t *zhp, char *physpath) 1473 { 1474 nvlist_t *vdev_root; 1475 nvlist_t **child; 1476 uint_t count; 1477 int i; 1478 1479 /* 1480 * Make sure this is a root pool, as phys_path doesn't mean 1481 * anything to a non-root pool. 1482 */ 1483 if (!pool_is_bootable(zhp)) 1484 return (-1); 1485 1486 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1487 ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0); 1488 1489 if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 1490 &child, &count) != 0) 1491 return (-2); 1492 1493 for (i = 0; i < count; i++) { 1494 nvlist_t **child2; 1495 uint_t count2; 1496 char *type; 1497 char *tmppath; 1498 int j; 1499 1500 if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type) 1501 != 0) 1502 return (-3); 1503 1504 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 1505 if (!vdev_online(child[i])) 1506 return (-8); 1507 verify(nvlist_lookup_string(child[i], 1508 ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0); 1509 (void) strncpy(physpath, tmppath, strlen(tmppath)); 1510 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) { 1511 if (nvlist_lookup_nvlist_array(child[i], 1512 ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0) 1513 return (-4); 1514 1515 for (j = 0; j < count2; j++) { 1516 if (!vdev_online(child2[j])) 1517 return (-8); 1518 if (nvlist_lookup_string(child2[j], 1519 ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0) 1520 return (-5); 1521 1522 if ((strlen(physpath) + strlen(tmppath)) > 1523 MAXNAMELEN) 1524 return (-6); 1525 1526 if (strlen(physpath) == 0) { 1527 (void) strncpy(physpath, tmppath, 1528 strlen(tmppath)); 1529 } else { 1530 (void) strcat(physpath, " "); 1531 (void) strcat(physpath, tmppath); 1532 } 1533 } 1534 } else { 1535 return (-7); 1536 } 1537 } 1538 1539 return (0); 1540 } 1541 1542 /* 1543 * Returns TRUE if the given guid corresponds to the given type. 1544 * This is used to check for hot spares (INUSE or not), and level 2 cache 1545 * devices. 1546 */ 1547 static boolean_t 1548 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type) 1549 { 1550 uint64_t target_guid; 1551 nvlist_t *nvroot; 1552 nvlist_t **list; 1553 uint_t count; 1554 int i; 1555 1556 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1557 &nvroot) == 0); 1558 if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) { 1559 for (i = 0; i < count; i++) { 1560 verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID, 1561 &target_guid) == 0); 1562 if (guid == target_guid) 1563 return (B_TRUE); 1564 } 1565 } 1566 1567 return (B_FALSE); 1568 } 1569 1570 /* 1571 * Bring the specified vdev online. The 'flags' parameter is a set of the 1572 * ZFS_ONLINE_* flags. 1573 */ 1574 int 1575 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 1576 vdev_state_t *newstate) 1577 { 1578 zfs_cmd_t zc = { 0 }; 1579 char msg[1024]; 1580 nvlist_t *tgt; 1581 boolean_t avail_spare, l2cache; 1582 libzfs_handle_t *hdl = zhp->zpool_hdl; 1583 1584 (void) snprintf(msg, sizeof (msg), 1585 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 1586 1587 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1588 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1589 NULL)) == NULL) 1590 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1591 1592 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1593 1594 if (avail_spare || 1595 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) 1596 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1597 1598 zc.zc_cookie = VDEV_STATE_ONLINE; 1599 zc.zc_obj = flags; 1600 1601 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) 1602 return (zpool_standard_error(hdl, errno, msg)); 1603 1604 *newstate = zc.zc_cookie; 1605 return (0); 1606 } 1607 1608 /* 1609 * Take the specified vdev offline 1610 */ 1611 int 1612 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 1613 { 1614 zfs_cmd_t zc = { 0 }; 1615 char msg[1024]; 1616 nvlist_t *tgt; 1617 boolean_t avail_spare, l2cache; 1618 libzfs_handle_t *hdl = zhp->zpool_hdl; 1619 1620 (void) snprintf(msg, sizeof (msg), 1621 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 1622 1623 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1624 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1625 NULL)) == NULL) 1626 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1627 1628 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1629 1630 if (avail_spare || 1631 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) 1632 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1633 1634 zc.zc_cookie = VDEV_STATE_OFFLINE; 1635 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 1636 1637 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1638 return (0); 1639 1640 switch (errno) { 1641 case EBUSY: 1642 1643 /* 1644 * There are no other replicas of this device. 1645 */ 1646 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 1647 1648 default: 1649 return (zpool_standard_error(hdl, errno, msg)); 1650 } 1651 } 1652 1653 /* 1654 * Mark the given vdev faulted. 1655 */ 1656 int 1657 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid) 1658 { 1659 zfs_cmd_t zc = { 0 }; 1660 char msg[1024]; 1661 libzfs_handle_t *hdl = zhp->zpool_hdl; 1662 1663 (void) snprintf(msg, sizeof (msg), 1664 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 1665 1666 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1667 zc.zc_guid = guid; 1668 zc.zc_cookie = VDEV_STATE_FAULTED; 1669 1670 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1671 return (0); 1672 1673 switch (errno) { 1674 case EBUSY: 1675 1676 /* 1677 * There are no other replicas of this device. 1678 */ 1679 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 1680 1681 default: 1682 return (zpool_standard_error(hdl, errno, msg)); 1683 } 1684 1685 } 1686 1687 /* 1688 * Mark the given vdev degraded. 1689 */ 1690 int 1691 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid) 1692 { 1693 zfs_cmd_t zc = { 0 }; 1694 char msg[1024]; 1695 libzfs_handle_t *hdl = zhp->zpool_hdl; 1696 1697 (void) snprintf(msg, sizeof (msg), 1698 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 1699 1700 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1701 zc.zc_guid = guid; 1702 zc.zc_cookie = VDEV_STATE_DEGRADED; 1703 1704 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1705 return (0); 1706 1707 return (zpool_standard_error(hdl, errno, msg)); 1708 } 1709 1710 /* 1711 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 1712 * a hot spare. 1713 */ 1714 static boolean_t 1715 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 1716 { 1717 nvlist_t **child; 1718 uint_t c, children; 1719 char *type; 1720 1721 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 1722 &children) == 0) { 1723 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 1724 &type) == 0); 1725 1726 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 1727 children == 2 && child[which] == tgt) 1728 return (B_TRUE); 1729 1730 for (c = 0; c < children; c++) 1731 if (is_replacing_spare(child[c], tgt, which)) 1732 return (B_TRUE); 1733 } 1734 1735 return (B_FALSE); 1736 } 1737 1738 /* 1739 * Attach new_disk (fully described by nvroot) to old_disk. 1740 * If 'replacing' is specified, the new disk will replace the old one. 1741 */ 1742 int 1743 zpool_vdev_attach(zpool_handle_t *zhp, 1744 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 1745 { 1746 zfs_cmd_t zc = { 0 }; 1747 char msg[1024]; 1748 int ret; 1749 nvlist_t *tgt; 1750 boolean_t avail_spare, l2cache, islog; 1751 uint64_t val; 1752 char *path, *newname; 1753 nvlist_t **child; 1754 uint_t children; 1755 nvlist_t *config_root; 1756 libzfs_handle_t *hdl = zhp->zpool_hdl; 1757 boolean_t rootpool = pool_is_bootable(zhp); 1758 1759 if (replacing) 1760 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1761 "cannot replace %s with %s"), old_disk, new_disk); 1762 else 1763 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1764 "cannot attach %s to %s"), new_disk, old_disk); 1765 1766 /* 1767 * If this is a root pool, make sure that we're not attaching an 1768 * EFI labeled device. 1769 */ 1770 if (rootpool && pool_uses_efi(nvroot)) { 1771 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1772 "EFI labeled devices are not supported on root pools.")); 1773 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1774 } 1775 1776 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1777 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 1778 &islog)) == 0) 1779 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1780 1781 if (avail_spare) 1782 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1783 1784 if (l2cache) 1785 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 1786 1787 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1788 zc.zc_cookie = replacing; 1789 1790 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1791 &child, &children) != 0 || children != 1) { 1792 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1793 "new device must be a single disk")); 1794 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 1795 } 1796 1797 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 1798 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 1799 1800 if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL) 1801 return (-1); 1802 1803 /* 1804 * If the target is a hot spare that has been swapped in, we can only 1805 * replace it with another hot spare. 1806 */ 1807 if (replacing && 1808 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 1809 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 1810 NULL) == NULL || !avail_spare) && 1811 is_replacing_spare(config_root, tgt, 1)) { 1812 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1813 "can only be replaced by another hot spare")); 1814 free(newname); 1815 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1816 } 1817 1818 /* 1819 * If we are attempting to replace a spare, it canot be applied to an 1820 * already spared device. 1821 */ 1822 if (replacing && 1823 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && 1824 zpool_find_vdev(zhp, newname, &avail_spare, 1825 &l2cache, NULL) != NULL && avail_spare && 1826 is_replacing_spare(config_root, tgt, 0)) { 1827 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1828 "device has already been replaced with a spare")); 1829 free(newname); 1830 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1831 } 1832 1833 free(newname); 1834 1835 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1836 return (-1); 1837 1838 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc); 1839 1840 zcmd_free_nvlists(&zc); 1841 1842 if (ret == 0) { 1843 if (rootpool) { 1844 /* 1845 * XXX - This should be removed once we can 1846 * automatically install the bootblocks on the 1847 * newly attached disk. 1848 */ 1849 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please " 1850 "be sure to invoke %s to make '%s' bootable.\n"), 1851 BOOTCMD, new_disk); 1852 } 1853 return (0); 1854 } 1855 1856 switch (errno) { 1857 case ENOTSUP: 1858 /* 1859 * Can't attach to or replace this type of vdev. 1860 */ 1861 if (replacing) { 1862 if (islog) 1863 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1864 "cannot replace a log with a spare")); 1865 else 1866 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1867 "cannot replace a replacing device")); 1868 } else { 1869 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1870 "can only attach to mirrors and top-level " 1871 "disks")); 1872 } 1873 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 1874 break; 1875 1876 case EINVAL: 1877 /* 1878 * The new device must be a single disk. 1879 */ 1880 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1881 "new device must be a single disk")); 1882 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 1883 break; 1884 1885 case EBUSY: 1886 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 1887 new_disk); 1888 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1889 break; 1890 1891 case EOVERFLOW: 1892 /* 1893 * The new device is too small. 1894 */ 1895 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1896 "device is too small")); 1897 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1898 break; 1899 1900 case EDOM: 1901 /* 1902 * The new device has a different alignment requirement. 1903 */ 1904 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1905 "devices have different sector alignment")); 1906 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1907 break; 1908 1909 case ENAMETOOLONG: 1910 /* 1911 * The resulting top-level vdev spec won't fit in the label. 1912 */ 1913 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 1914 break; 1915 1916 default: 1917 (void) zpool_standard_error(hdl, errno, msg); 1918 } 1919 1920 return (-1); 1921 } 1922 1923 /* 1924 * Detach the specified device. 1925 */ 1926 int 1927 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 1928 { 1929 zfs_cmd_t zc = { 0 }; 1930 char msg[1024]; 1931 nvlist_t *tgt; 1932 boolean_t avail_spare, l2cache; 1933 libzfs_handle_t *hdl = zhp->zpool_hdl; 1934 1935 (void) snprintf(msg, sizeof (msg), 1936 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 1937 1938 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1939 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1940 NULL)) == 0) 1941 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1942 1943 if (avail_spare) 1944 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1945 1946 if (l2cache) 1947 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 1948 1949 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1950 1951 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 1952 return (0); 1953 1954 switch (errno) { 1955 1956 case ENOTSUP: 1957 /* 1958 * Can't detach from this type of vdev. 1959 */ 1960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 1961 "applicable to mirror and replacing vdevs")); 1962 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg); 1963 break; 1964 1965 case EBUSY: 1966 /* 1967 * There are no other replicas of this device. 1968 */ 1969 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 1970 break; 1971 1972 default: 1973 (void) zpool_standard_error(hdl, errno, msg); 1974 } 1975 1976 return (-1); 1977 } 1978 1979 /* 1980 * Remove the given device. Currently, this is supported only for hot spares 1981 * and level 2 cache devices. 1982 */ 1983 int 1984 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 1985 { 1986 zfs_cmd_t zc = { 0 }; 1987 char msg[1024]; 1988 nvlist_t *tgt; 1989 boolean_t avail_spare, l2cache; 1990 libzfs_handle_t *hdl = zhp->zpool_hdl; 1991 1992 (void) snprintf(msg, sizeof (msg), 1993 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 1994 1995 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1996 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1997 NULL)) == 0) 1998 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1999 2000 if (!avail_spare && !l2cache) { 2001 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2002 "only inactive hot spares or cache devices " 2003 "can be removed")); 2004 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2005 } 2006 2007 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2008 2009 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 2010 return (0); 2011 2012 return (zpool_standard_error(hdl, errno, msg)); 2013 } 2014 2015 /* 2016 * Clear the errors for the pool, or the particular device if specified. 2017 */ 2018 int 2019 zpool_clear(zpool_handle_t *zhp, const char *path) 2020 { 2021 zfs_cmd_t zc = { 0 }; 2022 char msg[1024]; 2023 nvlist_t *tgt; 2024 boolean_t avail_spare, l2cache; 2025 libzfs_handle_t *hdl = zhp->zpool_hdl; 2026 2027 if (path) 2028 (void) snprintf(msg, sizeof (msg), 2029 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2030 path); 2031 else 2032 (void) snprintf(msg, sizeof (msg), 2033 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2034 zhp->zpool_name); 2035 2036 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2037 if (path) { 2038 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 2039 &l2cache, NULL)) == 0) 2040 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2041 2042 /* 2043 * Don't allow error clearing for hot spares. Do allow 2044 * error clearing for l2cache devices. 2045 */ 2046 if (avail_spare) 2047 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2048 2049 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 2050 &zc.zc_guid) == 0); 2051 } 2052 2053 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) 2054 return (0); 2055 2056 return (zpool_standard_error(hdl, errno, msg)); 2057 } 2058 2059 /* 2060 * Similar to zpool_clear(), but takes a GUID (used by fmd). 2061 */ 2062 int 2063 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 2064 { 2065 zfs_cmd_t zc = { 0 }; 2066 char msg[1024]; 2067 libzfs_handle_t *hdl = zhp->zpool_hdl; 2068 2069 (void) snprintf(msg, sizeof (msg), 2070 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 2071 guid); 2072 2073 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2074 zc.zc_guid = guid; 2075 2076 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 2077 return (0); 2078 2079 return (zpool_standard_error(hdl, errno, msg)); 2080 } 2081 2082 /* 2083 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool> 2084 * hierarchy. 2085 */ 2086 int 2087 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *), 2088 void *data) 2089 { 2090 libzfs_handle_t *hdl = zhp->zpool_hdl; 2091 char (*paths)[MAXPATHLEN]; 2092 size_t size = 4; 2093 int curr, fd, base, ret = 0; 2094 DIR *dirp; 2095 struct dirent *dp; 2096 struct stat st; 2097 2098 if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0) 2099 return (errno == ENOENT ? 0 : -1); 2100 2101 if (fstatat(base, zhp->zpool_name, &st, 0) != 0) { 2102 int err = errno; 2103 (void) close(base); 2104 return (err == ENOENT ? 0 : -1); 2105 } 2106 2107 /* 2108 * Oddly this wasn't a directory -- ignore that failure since we 2109 * know there are no links lower in the (non-existant) hierarchy. 2110 */ 2111 if (!S_ISDIR(st.st_mode)) { 2112 (void) close(base); 2113 return (0); 2114 } 2115 2116 if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) { 2117 (void) close(base); 2118 return (-1); 2119 } 2120 2121 (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0])); 2122 curr = 0; 2123 2124 while (curr >= 0) { 2125 if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0) 2126 goto err; 2127 2128 if (S_ISDIR(st.st_mode)) { 2129 if ((fd = openat(base, paths[curr], O_RDONLY)) < 0) 2130 goto err; 2131 2132 if ((dirp = fdopendir(fd)) == NULL) { 2133 (void) close(fd); 2134 goto err; 2135 } 2136 2137 while ((dp = readdir(dirp)) != NULL) { 2138 if (dp->d_name[0] == '.') 2139 continue; 2140 2141 if (curr + 1 == size) { 2142 paths = zfs_realloc(hdl, paths, 2143 size * sizeof (paths[0]), 2144 size * 2 * sizeof (paths[0])); 2145 if (paths == NULL) { 2146 (void) closedir(dirp); 2147 (void) close(fd); 2148 goto err; 2149 } 2150 2151 size *= 2; 2152 } 2153 2154 (void) strlcpy(paths[curr + 1], paths[curr], 2155 sizeof (paths[curr + 1])); 2156 (void) strlcat(paths[curr], "/", 2157 sizeof (paths[curr])); 2158 (void) strlcat(paths[curr], dp->d_name, 2159 sizeof (paths[curr])); 2160 curr++; 2161 } 2162 2163 (void) closedir(dirp); 2164 2165 } else { 2166 if ((ret = cb(paths[curr], data)) != 0) 2167 break; 2168 } 2169 2170 curr--; 2171 } 2172 2173 free(paths); 2174 (void) close(base); 2175 2176 return (ret); 2177 2178 err: 2179 free(paths); 2180 (void) close(base); 2181 return (-1); 2182 } 2183 2184 typedef struct zvol_cb { 2185 zpool_handle_t *zcb_pool; 2186 boolean_t zcb_create; 2187 } zvol_cb_t; 2188 2189 /*ARGSUSED*/ 2190 static int 2191 do_zvol_create(zfs_handle_t *zhp, void *data) 2192 { 2193 int ret = 0; 2194 2195 if (ZFS_IS_VOLUME(zhp)) { 2196 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name); 2197 ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL); 2198 } 2199 2200 if (ret == 0) 2201 ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL); 2202 2203 zfs_close(zhp); 2204 2205 return (ret); 2206 } 2207 2208 /* 2209 * Iterate over all zvols in the pool and make any necessary minor nodes. 2210 */ 2211 int 2212 zpool_create_zvol_links(zpool_handle_t *zhp) 2213 { 2214 zfs_handle_t *zfp; 2215 int ret; 2216 2217 /* 2218 * If the pool is unavailable, just return success. 2219 */ 2220 if ((zfp = make_dataset_handle(zhp->zpool_hdl, 2221 zhp->zpool_name)) == NULL) 2222 return (0); 2223 2224 ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL); 2225 2226 zfs_close(zfp); 2227 return (ret); 2228 } 2229 2230 static int 2231 do_zvol_remove(const char *dataset, void *data) 2232 { 2233 zpool_handle_t *zhp = data; 2234 2235 return (zvol_remove_link(zhp->zpool_hdl, dataset)); 2236 } 2237 2238 /* 2239 * Iterate over all zvols in the pool and remove any minor nodes. We iterate 2240 * by examining the /dev links so that a corrupted pool doesn't impede this 2241 * operation. 2242 */ 2243 int 2244 zpool_remove_zvol_links(zpool_handle_t *zhp) 2245 { 2246 return (zpool_iter_zvol(zhp, do_zvol_remove, zhp)); 2247 } 2248 2249 /* 2250 * Convert from a devid string to a path. 2251 */ 2252 static char * 2253 devid_to_path(char *devid_str) 2254 { 2255 ddi_devid_t devid; 2256 char *minor; 2257 char *path; 2258 devid_nmlist_t *list = NULL; 2259 int ret; 2260 2261 if (devid_str_decode(devid_str, &devid, &minor) != 0) 2262 return (NULL); 2263 2264 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 2265 2266 devid_str_free(minor); 2267 devid_free(devid); 2268 2269 if (ret != 0) 2270 return (NULL); 2271 2272 if ((path = strdup(list[0].devname)) == NULL) 2273 return (NULL); 2274 2275 devid_free_nmlist(list); 2276 2277 return (path); 2278 } 2279 2280 /* 2281 * Convert from a path to a devid string. 2282 */ 2283 static char * 2284 path_to_devid(const char *path) 2285 { 2286 int fd; 2287 ddi_devid_t devid; 2288 char *minor, *ret; 2289 2290 if ((fd = open(path, O_RDONLY)) < 0) 2291 return (NULL); 2292 2293 minor = NULL; 2294 ret = NULL; 2295 if (devid_get(fd, &devid) == 0) { 2296 if (devid_get_minor_name(fd, &minor) == 0) 2297 ret = devid_str_encode(devid, minor); 2298 if (minor != NULL) 2299 devid_str_free(minor); 2300 devid_free(devid); 2301 } 2302 (void) close(fd); 2303 2304 return (ret); 2305 } 2306 2307 /* 2308 * Issue the necessary ioctl() to update the stored path value for the vdev. We 2309 * ignore any failure here, since a common case is for an unprivileged user to 2310 * type 'zpool status', and we'll display the correct information anyway. 2311 */ 2312 static void 2313 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 2314 { 2315 zfs_cmd_t zc = { 0 }; 2316 2317 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2318 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 2319 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2320 &zc.zc_guid) == 0); 2321 2322 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 2323 } 2324 2325 /* 2326 * Given a vdev, return the name to display in iostat. If the vdev has a path, 2327 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 2328 * We also check if this is a whole disk, in which case we strip off the 2329 * trailing 's0' slice name. 2330 * 2331 * This routine is also responsible for identifying when disks have been 2332 * reconfigured in a new location. The kernel will have opened the device by 2333 * devid, but the path will still refer to the old location. To catch this, we 2334 * first do a path -> devid translation (which is fast for the common case). If 2335 * the devid matches, we're done. If not, we do a reverse devid -> path 2336 * translation and issue the appropriate ioctl() to update the path of the vdev. 2337 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 2338 * of these checks. 2339 */ 2340 char * 2341 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv) 2342 { 2343 char *path, *devid; 2344 uint64_t value; 2345 char buf[64]; 2346 vdev_stat_t *vs; 2347 uint_t vsc; 2348 2349 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 2350 &value) == 0) { 2351 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2352 &value) == 0); 2353 (void) snprintf(buf, sizeof (buf), "%llu", 2354 (u_longlong_t)value); 2355 path = buf; 2356 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 2357 2358 /* 2359 * If the device is dead (faulted, offline, etc) then don't 2360 * bother opening it. Otherwise we may be forcing the user to 2361 * open a misbehaving device, which can have undesirable 2362 * effects. 2363 */ 2364 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, 2365 (uint64_t **)&vs, &vsc) != 0 || 2366 vs->vs_state >= VDEV_STATE_DEGRADED) && 2367 zhp != NULL && 2368 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 2369 /* 2370 * Determine if the current path is correct. 2371 */ 2372 char *newdevid = path_to_devid(path); 2373 2374 if (newdevid == NULL || 2375 strcmp(devid, newdevid) != 0) { 2376 char *newpath; 2377 2378 if ((newpath = devid_to_path(devid)) != NULL) { 2379 /* 2380 * Update the path appropriately. 2381 */ 2382 set_path(zhp, nv, newpath); 2383 if (nvlist_add_string(nv, 2384 ZPOOL_CONFIG_PATH, newpath) == 0) 2385 verify(nvlist_lookup_string(nv, 2386 ZPOOL_CONFIG_PATH, 2387 &path) == 0); 2388 free(newpath); 2389 } 2390 } 2391 2392 if (newdevid) 2393 devid_str_free(newdevid); 2394 } 2395 2396 if (strncmp(path, "/dev/dsk/", 9) == 0) 2397 path += 9; 2398 2399 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2400 &value) == 0 && value) { 2401 char *tmp = zfs_strdup(hdl, path); 2402 if (tmp == NULL) 2403 return (NULL); 2404 tmp[strlen(path) - 2] = '\0'; 2405 return (tmp); 2406 } 2407 } else { 2408 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 2409 2410 /* 2411 * If it's a raidz device, we need to stick in the parity level. 2412 */ 2413 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 2414 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 2415 &value) == 0); 2416 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 2417 (u_longlong_t)value); 2418 path = buf; 2419 } 2420 } 2421 2422 return (zfs_strdup(hdl, path)); 2423 } 2424 2425 static int 2426 zbookmark_compare(const void *a, const void *b) 2427 { 2428 return (memcmp(a, b, sizeof (zbookmark_t))); 2429 } 2430 2431 /* 2432 * Retrieve the persistent error log, uniquify the members, and return to the 2433 * caller. 2434 */ 2435 int 2436 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 2437 { 2438 zfs_cmd_t zc = { 0 }; 2439 uint64_t count; 2440 zbookmark_t *zb = NULL; 2441 int i; 2442 2443 /* 2444 * Retrieve the raw error list from the kernel. If the number of errors 2445 * has increased, allocate more space and continue until we get the 2446 * entire list. 2447 */ 2448 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 2449 &count) == 0); 2450 if (count == 0) 2451 return (0); 2452 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 2453 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 2454 return (-1); 2455 zc.zc_nvlist_dst_size = count; 2456 (void) strcpy(zc.zc_name, zhp->zpool_name); 2457 for (;;) { 2458 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 2459 &zc) != 0) { 2460 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2461 if (errno == ENOMEM) { 2462 count = zc.zc_nvlist_dst_size; 2463 if ((zc.zc_nvlist_dst = (uintptr_t) 2464 zfs_alloc(zhp->zpool_hdl, count * 2465 sizeof (zbookmark_t))) == (uintptr_t)NULL) 2466 return (-1); 2467 } else { 2468 return (-1); 2469 } 2470 } else { 2471 break; 2472 } 2473 } 2474 2475 /* 2476 * Sort the resulting bookmarks. This is a little confusing due to the 2477 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 2478 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 2479 * _not_ copied as part of the process. So we point the start of our 2480 * array appropriate and decrement the total number of elements. 2481 */ 2482 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 2483 zc.zc_nvlist_dst_size; 2484 count -= zc.zc_nvlist_dst_size; 2485 2486 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 2487 2488 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 2489 2490 /* 2491 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 2492 */ 2493 for (i = 0; i < count; i++) { 2494 nvlist_t *nv; 2495 2496 /* ignoring zb_blkid and zb_level for now */ 2497 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 2498 zb[i-1].zb_object == zb[i].zb_object) 2499 continue; 2500 2501 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 2502 goto nomem; 2503 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 2504 zb[i].zb_objset) != 0) { 2505 nvlist_free(nv); 2506 goto nomem; 2507 } 2508 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 2509 zb[i].zb_object) != 0) { 2510 nvlist_free(nv); 2511 goto nomem; 2512 } 2513 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 2514 nvlist_free(nv); 2515 goto nomem; 2516 } 2517 nvlist_free(nv); 2518 } 2519 2520 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2521 return (0); 2522 2523 nomem: 2524 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2525 return (no_memory(zhp->zpool_hdl)); 2526 } 2527 2528 /* 2529 * Upgrade a ZFS pool to the latest on-disk version. 2530 */ 2531 int 2532 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 2533 { 2534 zfs_cmd_t zc = { 0 }; 2535 libzfs_handle_t *hdl = zhp->zpool_hdl; 2536 2537 (void) strcpy(zc.zc_name, zhp->zpool_name); 2538 zc.zc_cookie = new_version; 2539 2540 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 2541 return (zpool_standard_error_fmt(hdl, errno, 2542 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 2543 zhp->zpool_name)); 2544 return (0); 2545 } 2546 2547 void 2548 zpool_set_history_str(const char *subcommand, int argc, char **argv, 2549 char *history_str) 2550 { 2551 int i; 2552 2553 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); 2554 for (i = 1; i < argc; i++) { 2555 if (strlen(history_str) + 1 + strlen(argv[i]) > 2556 HIS_MAX_RECORD_LEN) 2557 break; 2558 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); 2559 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); 2560 } 2561 } 2562 2563 /* 2564 * Stage command history for logging. 2565 */ 2566 int 2567 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) 2568 { 2569 if (history_str == NULL) 2570 return (EINVAL); 2571 2572 if (strlen(history_str) > HIS_MAX_RECORD_LEN) 2573 return (EINVAL); 2574 2575 if (hdl->libzfs_log_str != NULL) 2576 free(hdl->libzfs_log_str); 2577 2578 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) 2579 return (no_memory(hdl)); 2580 2581 return (0); 2582 } 2583 2584 /* 2585 * Perform ioctl to get some command history of a pool. 2586 * 2587 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 2588 * logical offset of the history buffer to start reading from. 2589 * 2590 * Upon return, 'off' is the next logical offset to read from and 2591 * 'len' is the actual amount of bytes read into 'buf'. 2592 */ 2593 static int 2594 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 2595 { 2596 zfs_cmd_t zc = { 0 }; 2597 libzfs_handle_t *hdl = zhp->zpool_hdl; 2598 2599 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2600 2601 zc.zc_history = (uint64_t)(uintptr_t)buf; 2602 zc.zc_history_len = *len; 2603 zc.zc_history_offset = *off; 2604 2605 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 2606 switch (errno) { 2607 case EPERM: 2608 return (zfs_error_fmt(hdl, EZFS_PERM, 2609 dgettext(TEXT_DOMAIN, 2610 "cannot show history for pool '%s'"), 2611 zhp->zpool_name)); 2612 case ENOENT: 2613 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 2614 dgettext(TEXT_DOMAIN, "cannot get history for pool " 2615 "'%s'"), zhp->zpool_name)); 2616 case ENOTSUP: 2617 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 2618 dgettext(TEXT_DOMAIN, "cannot get history for pool " 2619 "'%s', pool must be upgraded"), zhp->zpool_name)); 2620 default: 2621 return (zpool_standard_error_fmt(hdl, errno, 2622 dgettext(TEXT_DOMAIN, 2623 "cannot get history for '%s'"), zhp->zpool_name)); 2624 } 2625 } 2626 2627 *len = zc.zc_history_len; 2628 *off = zc.zc_history_offset; 2629 2630 return (0); 2631 } 2632 2633 /* 2634 * Process the buffer of nvlists, unpacking and storing each nvlist record 2635 * into 'records'. 'leftover' is set to the number of bytes that weren't 2636 * processed as there wasn't a complete record. 2637 */ 2638 static int 2639 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 2640 nvlist_t ***records, uint_t *numrecords) 2641 { 2642 uint64_t reclen; 2643 nvlist_t *nv; 2644 int i; 2645 2646 while (bytes_read > sizeof (reclen)) { 2647 2648 /* get length of packed record (stored as little endian) */ 2649 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 2650 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 2651 2652 if (bytes_read < sizeof (reclen) + reclen) 2653 break; 2654 2655 /* unpack record */ 2656 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 2657 return (ENOMEM); 2658 bytes_read -= sizeof (reclen) + reclen; 2659 buf += sizeof (reclen) + reclen; 2660 2661 /* add record to nvlist array */ 2662 (*numrecords)++; 2663 if (ISP2(*numrecords + 1)) { 2664 *records = realloc(*records, 2665 *numrecords * 2 * sizeof (nvlist_t *)); 2666 } 2667 (*records)[*numrecords - 1] = nv; 2668 } 2669 2670 *leftover = bytes_read; 2671 return (0); 2672 } 2673 2674 #define HIS_BUF_LEN (128*1024) 2675 2676 /* 2677 * Retrieve the command history of a pool. 2678 */ 2679 int 2680 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 2681 { 2682 char buf[HIS_BUF_LEN]; 2683 uint64_t off = 0; 2684 nvlist_t **records = NULL; 2685 uint_t numrecords = 0; 2686 int err, i; 2687 2688 do { 2689 uint64_t bytes_read = sizeof (buf); 2690 uint64_t leftover; 2691 2692 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 2693 break; 2694 2695 /* if nothing else was read in, we're at EOF, just return */ 2696 if (!bytes_read) 2697 break; 2698 2699 if ((err = zpool_history_unpack(buf, bytes_read, 2700 &leftover, &records, &numrecords)) != 0) 2701 break; 2702 off -= leftover; 2703 2704 /* CONSTCOND */ 2705 } while (1); 2706 2707 if (!err) { 2708 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 2709 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 2710 records, numrecords) == 0); 2711 } 2712 for (i = 0; i < numrecords; i++) 2713 nvlist_free(records[i]); 2714 free(records); 2715 2716 return (err); 2717 } 2718 2719 void 2720 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 2721 char *pathname, size_t len) 2722 { 2723 zfs_cmd_t zc = { 0 }; 2724 boolean_t mounted = B_FALSE; 2725 char *mntpnt = NULL; 2726 char dsname[MAXNAMELEN]; 2727 2728 if (dsobj == 0) { 2729 /* special case for the MOS */ 2730 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 2731 return; 2732 } 2733 2734 /* get the dataset's name */ 2735 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2736 zc.zc_obj = dsobj; 2737 if (ioctl(zhp->zpool_hdl->libzfs_fd, 2738 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 2739 /* just write out a path of two object numbers */ 2740 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 2741 dsobj, obj); 2742 return; 2743 } 2744 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 2745 2746 /* find out if the dataset is mounted */ 2747 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 2748 2749 /* get the corrupted object's path */ 2750 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 2751 zc.zc_obj = obj; 2752 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 2753 &zc) == 0) { 2754 if (mounted) { 2755 (void) snprintf(pathname, len, "%s%s", mntpnt, 2756 zc.zc_value); 2757 } else { 2758 (void) snprintf(pathname, len, "%s:%s", 2759 dsname, zc.zc_value); 2760 } 2761 } else { 2762 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 2763 } 2764 free(mntpnt); 2765 } 2766 2767 #define RDISK_ROOT "/dev/rdsk" 2768 #define BACKUP_SLICE "s2" 2769 /* 2770 * Don't start the slice at the default block of 34; many storage 2771 * devices will use a stripe width of 128k, so start there instead. 2772 */ 2773 #define NEW_START_BLOCK 256 2774 2775 /* 2776 * Read the EFI label from the config, if a label does not exist then 2777 * pass back the error to the caller. If the caller has passed a non-NULL 2778 * diskaddr argument then we set it to the starting address of the EFI 2779 * partition. 2780 */ 2781 static int 2782 read_efi_label(nvlist_t *config, diskaddr_t *sb) 2783 { 2784 char *path; 2785 int fd; 2786 char diskname[MAXPATHLEN]; 2787 int err = -1; 2788 2789 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 2790 return (err); 2791 2792 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 2793 strrchr(path, '/')); 2794 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 2795 struct dk_gpt *vtoc; 2796 2797 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 2798 if (sb != NULL) 2799 *sb = vtoc->efi_parts[0].p_start; 2800 efi_free(vtoc); 2801 } 2802 (void) close(fd); 2803 } 2804 return (err); 2805 } 2806 2807 /* 2808 * determine where a partition starts on a disk in the current 2809 * configuration 2810 */ 2811 static diskaddr_t 2812 find_start_block(nvlist_t *config) 2813 { 2814 nvlist_t **child; 2815 uint_t c, children; 2816 diskaddr_t sb = MAXOFFSET_T; 2817 uint64_t wholedisk; 2818 2819 if (nvlist_lookup_nvlist_array(config, 2820 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 2821 if (nvlist_lookup_uint64(config, 2822 ZPOOL_CONFIG_WHOLE_DISK, 2823 &wholedisk) != 0 || !wholedisk) { 2824 return (MAXOFFSET_T); 2825 } 2826 if (read_efi_label(config, &sb) < 0) 2827 sb = MAXOFFSET_T; 2828 return (sb); 2829 } 2830 2831 for (c = 0; c < children; c++) { 2832 sb = find_start_block(child[c]); 2833 if (sb != MAXOFFSET_T) { 2834 return (sb); 2835 } 2836 } 2837 return (MAXOFFSET_T); 2838 } 2839 2840 /* 2841 * Label an individual disk. The name provided is the short name, 2842 * stripped of any leading /dev path. 2843 */ 2844 int 2845 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) 2846 { 2847 char path[MAXPATHLEN]; 2848 struct dk_gpt *vtoc; 2849 int fd; 2850 size_t resv = EFI_MIN_RESV_SIZE; 2851 uint64_t slice_size; 2852 diskaddr_t start_block; 2853 char errbuf[1024]; 2854 2855 /* prepare an error message just in case */ 2856 (void) snprintf(errbuf, sizeof (errbuf), 2857 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 2858 2859 if (zhp) { 2860 nvlist_t *nvroot; 2861 2862 if (pool_is_bootable(zhp)) { 2863 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2864 "EFI labeled devices are not supported on root " 2865 "pools.")); 2866 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 2867 } 2868 2869 verify(nvlist_lookup_nvlist(zhp->zpool_config, 2870 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 2871 2872 if (zhp->zpool_start_block == 0) 2873 start_block = find_start_block(nvroot); 2874 else 2875 start_block = zhp->zpool_start_block; 2876 zhp->zpool_start_block = start_block; 2877 } else { 2878 /* new pool */ 2879 start_block = NEW_START_BLOCK; 2880 } 2881 2882 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 2883 BACKUP_SLICE); 2884 2885 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2886 /* 2887 * This shouldn't happen. We've long since verified that this 2888 * is a valid device. 2889 */ 2890 zfs_error_aux(hdl, 2891 dgettext(TEXT_DOMAIN, "unable to open device")); 2892 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2893 } 2894 2895 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 2896 /* 2897 * The only way this can fail is if we run out of memory, or we 2898 * were unable to read the disk's capacity 2899 */ 2900 if (errno == ENOMEM) 2901 (void) no_memory(hdl); 2902 2903 (void) close(fd); 2904 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2905 "unable to read disk capacity"), name); 2906 2907 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2908 } 2909 2910 slice_size = vtoc->efi_last_u_lba + 1; 2911 slice_size -= EFI_MIN_RESV_SIZE; 2912 if (start_block == MAXOFFSET_T) 2913 start_block = NEW_START_BLOCK; 2914 slice_size -= start_block; 2915 2916 vtoc->efi_parts[0].p_start = start_block; 2917 vtoc->efi_parts[0].p_size = slice_size; 2918 2919 /* 2920 * Why we use V_USR: V_BACKUP confuses users, and is considered 2921 * disposable by some EFI utilities (since EFI doesn't have a backup 2922 * slice). V_UNASSIGNED is supposed to be used only for zero size 2923 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 2924 * etc. were all pretty specific. V_USR is as close to reality as we 2925 * can get, in the absence of V_OTHER. 2926 */ 2927 vtoc->efi_parts[0].p_tag = V_USR; 2928 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 2929 2930 vtoc->efi_parts[8].p_start = slice_size + start_block; 2931 vtoc->efi_parts[8].p_size = resv; 2932 vtoc->efi_parts[8].p_tag = V_RESERVED; 2933 2934 if (efi_write(fd, vtoc) != 0) { 2935 /* 2936 * Some block drivers (like pcata) may not support EFI 2937 * GPT labels. Print out a helpful error message dir- 2938 * ecting the user to manually label the disk and give 2939 * a specific slice. 2940 */ 2941 (void) close(fd); 2942 efi_free(vtoc); 2943 2944 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2945 "try using fdisk(1M) and then provide a specific slice")); 2946 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 2947 } 2948 2949 (void) close(fd); 2950 efi_free(vtoc); 2951 return (0); 2952 } 2953 2954 static boolean_t 2955 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 2956 { 2957 char *type; 2958 nvlist_t **child; 2959 uint_t children, c; 2960 2961 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 2962 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2963 strcmp(type, VDEV_TYPE_FILE) == 0 || 2964 strcmp(type, VDEV_TYPE_LOG) == 0 || 2965 strcmp(type, VDEV_TYPE_MISSING) == 0) { 2966 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2967 "vdev type '%s' is not supported"), type); 2968 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 2969 return (B_FALSE); 2970 } 2971 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 2972 &child, &children) == 0) { 2973 for (c = 0; c < children; c++) { 2974 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 2975 return (B_FALSE); 2976 } 2977 } 2978 return (B_TRUE); 2979 } 2980 2981 /* 2982 * check if this zvol is allowable for use as a dump device; zero if 2983 * it is, > 0 if it isn't, < 0 if it isn't a zvol 2984 */ 2985 int 2986 zvol_check_dump_config(char *arg) 2987 { 2988 zpool_handle_t *zhp = NULL; 2989 nvlist_t *config, *nvroot; 2990 char *p, *volname; 2991 nvlist_t **top; 2992 uint_t toplevels; 2993 libzfs_handle_t *hdl; 2994 char errbuf[1024]; 2995 char poolname[ZPOOL_MAXNAMELEN]; 2996 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 2997 int ret = 1; 2998 2999 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 3000 return (-1); 3001 } 3002 3003 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3004 "dump is not supported on device '%s'"), arg); 3005 3006 if ((hdl = libzfs_init()) == NULL) 3007 return (1); 3008 libzfs_print_on_error(hdl, B_TRUE); 3009 3010 volname = arg + pathlen; 3011 3012 /* check the configuration of the pool */ 3013 if ((p = strchr(volname, '/')) == NULL) { 3014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3015 "malformed dataset name")); 3016 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 3017 return (1); 3018 } else if (p - volname >= ZFS_MAXNAMELEN) { 3019 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3020 "dataset name is too long")); 3021 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 3022 return (1); 3023 } else { 3024 (void) strncpy(poolname, volname, p - volname); 3025 poolname[p - volname] = '\0'; 3026 } 3027 3028 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 3029 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3030 "could not open pool '%s'"), poolname); 3031 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 3032 goto out; 3033 } 3034 config = zpool_get_config(zhp, NULL); 3035 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3036 &nvroot) != 0) { 3037 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3038 "could not obtain vdev configuration for '%s'"), poolname); 3039 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3040 goto out; 3041 } 3042 3043 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3044 &top, &toplevels) == 0); 3045 if (toplevels != 1) { 3046 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3047 "'%s' has multiple top level vdevs"), poolname); 3048 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3049 goto out; 3050 } 3051 3052 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 3053 goto out; 3054 } 3055 ret = 0; 3056 3057 out: 3058 if (zhp) 3059 zpool_close(zhp); 3060 libzfs_fini(hdl); 3061 return (ret); 3062 } 3063