1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2013 by Delphix. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 */ 28 29 #include <ctype.h> 30 #include <errno.h> 31 #include <devid.h> 32 #include <fcntl.h> 33 #include <libintl.h> 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <strings.h> 37 #include <unistd.h> 38 #include <libgen.h> 39 #include <sys/efi_partition.h> 40 #include <sys/vtoc.h> 41 #include <sys/zfs_ioctl.h> 42 #include <dlfcn.h> 43 44 #include "zfs_namecheck.h" 45 #include "zfs_prop.h" 46 #include "libzfs_impl.h" 47 #include "zfs_comutil.h" 48 #include "zfeature_common.h" 49 50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52 #define DISK_ROOT "/dev/dsk" 53 #define RDISK_ROOT "/dev/rdsk" 54 #define BACKUP_SLICE "s2" 55 56 typedef struct prop_flags { 57 int create:1; /* Validate property on creation */ 58 int import:1; /* Validate property on import */ 59 } prop_flags_t; 60 61 /* 62 * ==================================================================== 63 * zpool property functions 64 * ==================================================================== 65 */ 66 67 static int 68 zpool_get_all_props(zpool_handle_t *zhp) 69 { 70 zfs_cmd_t zc = { 0 }; 71 libzfs_handle_t *hdl = zhp->zpool_hdl; 72 73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 74 75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 76 return (-1); 77 78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 79 if (errno == ENOMEM) { 80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 81 zcmd_free_nvlists(&zc); 82 return (-1); 83 } 84 } else { 85 zcmd_free_nvlists(&zc); 86 return (-1); 87 } 88 } 89 90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 91 zcmd_free_nvlists(&zc); 92 return (-1); 93 } 94 95 zcmd_free_nvlists(&zc); 96 97 return (0); 98 } 99 100 static int 101 zpool_props_refresh(zpool_handle_t *zhp) 102 { 103 nvlist_t *old_props; 104 105 old_props = zhp->zpool_props; 106 107 if (zpool_get_all_props(zhp) != 0) 108 return (-1); 109 110 nvlist_free(old_props); 111 return (0); 112 } 113 114 static char * 115 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 116 zprop_source_t *src) 117 { 118 nvlist_t *nv, *nvl; 119 uint64_t ival; 120 char *value; 121 zprop_source_t source; 122 123 nvl = zhp->zpool_props; 124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 126 source = ival; 127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 128 } else { 129 source = ZPROP_SRC_DEFAULT; 130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 131 value = "-"; 132 } 133 134 if (src) 135 *src = source; 136 137 return (value); 138 } 139 140 uint64_t 141 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 142 { 143 nvlist_t *nv, *nvl; 144 uint64_t value; 145 zprop_source_t source; 146 147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 148 /* 149 * zpool_get_all_props() has most likely failed because 150 * the pool is faulted, but if all we need is the top level 151 * vdev's guid then get it from the zhp config nvlist. 152 */ 153 if ((prop == ZPOOL_PROP_GUID) && 154 (nvlist_lookup_nvlist(zhp->zpool_config, 155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 157 == 0)) { 158 return (value); 159 } 160 return (zpool_prop_default_numeric(prop)); 161 } 162 163 nvl = zhp->zpool_props; 164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 166 source = value; 167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 168 } else { 169 source = ZPROP_SRC_DEFAULT; 170 value = zpool_prop_default_numeric(prop); 171 } 172 173 if (src) 174 *src = source; 175 176 return (value); 177 } 178 179 /* 180 * Map VDEV STATE to printed strings. 181 */ 182 char * 183 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 184 { 185 switch (state) { 186 case VDEV_STATE_CLOSED: 187 case VDEV_STATE_OFFLINE: 188 return (gettext("OFFLINE")); 189 case VDEV_STATE_REMOVED: 190 return (gettext("REMOVED")); 191 case VDEV_STATE_CANT_OPEN: 192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 193 return (gettext("FAULTED")); 194 else if (aux == VDEV_AUX_SPLIT_POOL) 195 return (gettext("SPLIT")); 196 else 197 return (gettext("UNAVAIL")); 198 case VDEV_STATE_FAULTED: 199 return (gettext("FAULTED")); 200 case VDEV_STATE_DEGRADED: 201 return (gettext("DEGRADED")); 202 case VDEV_STATE_HEALTHY: 203 return (gettext("ONLINE")); 204 } 205 206 return (gettext("UNKNOWN")); 207 } 208 209 /* 210 * Get a zpool property value for 'prop' and return the value in 211 * a pre-allocated buffer. 212 */ 213 int 214 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 215 zprop_source_t *srctype, boolean_t literal) 216 { 217 uint64_t intval; 218 const char *strval; 219 zprop_source_t src = ZPROP_SRC_NONE; 220 nvlist_t *nvroot; 221 vdev_stat_t *vs; 222 uint_t vsc; 223 224 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 225 switch (prop) { 226 case ZPOOL_PROP_NAME: 227 (void) strlcpy(buf, zpool_get_name(zhp), len); 228 break; 229 230 case ZPOOL_PROP_HEALTH: 231 (void) strlcpy(buf, "FAULTED", len); 232 break; 233 234 case ZPOOL_PROP_GUID: 235 intval = zpool_get_prop_int(zhp, prop, &src); 236 (void) snprintf(buf, len, "%llu", intval); 237 break; 238 239 case ZPOOL_PROP_ALTROOT: 240 case ZPOOL_PROP_CACHEFILE: 241 case ZPOOL_PROP_COMMENT: 242 if (zhp->zpool_props != NULL || 243 zpool_get_all_props(zhp) == 0) { 244 (void) strlcpy(buf, 245 zpool_get_prop_string(zhp, prop, &src), 246 len); 247 break; 248 } 249 /* FALLTHROUGH */ 250 default: 251 (void) strlcpy(buf, "-", len); 252 break; 253 } 254 255 if (srctype != NULL) 256 *srctype = src; 257 return (0); 258 } 259 260 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 261 prop != ZPOOL_PROP_NAME) 262 return (-1); 263 264 switch (zpool_prop_get_type(prop)) { 265 case PROP_TYPE_STRING: 266 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 267 len); 268 break; 269 270 case PROP_TYPE_NUMBER: 271 intval = zpool_get_prop_int(zhp, prop, &src); 272 273 switch (prop) { 274 case ZPOOL_PROP_SIZE: 275 case ZPOOL_PROP_ALLOCATED: 276 case ZPOOL_PROP_FREE: 277 case ZPOOL_PROP_FREEING: 278 case ZPOOL_PROP_EXPANDSZ: 279 if (literal) { 280 (void) snprintf(buf, len, "%llu", 281 (u_longlong_t)intval); 282 } else { 283 (void) zfs_nicenum(intval, buf, len); 284 } 285 break; 286 287 case ZPOOL_PROP_CAPACITY: 288 if (literal) { 289 (void) snprintf(buf, len, "%llu", 290 (u_longlong_t)intval); 291 } else { 292 (void) snprintf(buf, len, "%llu%%", 293 (u_longlong_t)intval); 294 } 295 break; 296 297 case ZPOOL_PROP_DEDUPRATIO: 298 (void) snprintf(buf, len, "%llu.%02llux", 299 (u_longlong_t)(intval / 100), 300 (u_longlong_t)(intval % 100)); 301 break; 302 303 case ZPOOL_PROP_HEALTH: 304 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 305 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 306 verify(nvlist_lookup_uint64_array(nvroot, 307 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 308 == 0); 309 310 (void) strlcpy(buf, zpool_state_to_name(intval, 311 vs->vs_aux), len); 312 break; 313 case ZPOOL_PROP_VERSION: 314 if (intval >= SPA_VERSION_FEATURES) { 315 (void) snprintf(buf, len, "-"); 316 break; 317 } 318 /* FALLTHROUGH */ 319 default: 320 (void) snprintf(buf, len, "%llu", intval); 321 } 322 break; 323 324 case PROP_TYPE_INDEX: 325 intval = zpool_get_prop_int(zhp, prop, &src); 326 if (zpool_prop_index_to_string(prop, intval, &strval) 327 != 0) 328 return (-1); 329 (void) strlcpy(buf, strval, len); 330 break; 331 332 default: 333 abort(); 334 } 335 336 if (srctype) 337 *srctype = src; 338 339 return (0); 340 } 341 342 /* 343 * Check if the bootfs name has the same pool name as it is set to. 344 * Assuming bootfs is a valid dataset name. 345 */ 346 static boolean_t 347 bootfs_name_valid(const char *pool, char *bootfs) 348 { 349 int len = strlen(pool); 350 351 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 352 return (B_FALSE); 353 354 if (strncmp(pool, bootfs, len) == 0 && 355 (bootfs[len] == '/' || bootfs[len] == '\0')) 356 return (B_TRUE); 357 358 return (B_FALSE); 359 } 360 361 /* 362 * Inspect the configuration to determine if any of the devices contain 363 * an EFI label. 364 */ 365 static boolean_t 366 pool_uses_efi(nvlist_t *config) 367 { 368 nvlist_t **child; 369 uint_t c, children; 370 371 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 372 &child, &children) != 0) 373 return (read_efi_label(config, NULL) >= 0); 374 375 for (c = 0; c < children; c++) { 376 if (pool_uses_efi(child[c])) 377 return (B_TRUE); 378 } 379 return (B_FALSE); 380 } 381 382 boolean_t 383 zpool_is_bootable(zpool_handle_t *zhp) 384 { 385 char bootfs[ZPOOL_MAXNAMELEN]; 386 387 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 388 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 389 sizeof (bootfs)) != 0); 390 } 391 392 393 /* 394 * Given an nvlist of zpool properties to be set, validate that they are 395 * correct, and parse any numeric properties (index, boolean, etc) if they are 396 * specified as strings. 397 */ 398 static nvlist_t * 399 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 400 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 401 { 402 nvpair_t *elem; 403 nvlist_t *retprops; 404 zpool_prop_t prop; 405 char *strval; 406 uint64_t intval; 407 char *slash, *check; 408 struct stat64 statbuf; 409 zpool_handle_t *zhp; 410 nvlist_t *nvroot; 411 412 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 413 (void) no_memory(hdl); 414 return (NULL); 415 } 416 417 elem = NULL; 418 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 419 const char *propname = nvpair_name(elem); 420 421 prop = zpool_name_to_prop(propname); 422 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 423 int err; 424 char *fname = strchr(propname, '@') + 1; 425 426 err = zfeature_lookup_name(fname, NULL); 427 if (err != 0) { 428 ASSERT3U(err, ==, ENOENT); 429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 430 "invalid feature '%s'"), fname); 431 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 432 goto error; 433 } 434 435 if (nvpair_type(elem) != DATA_TYPE_STRING) { 436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 437 "'%s' must be a string"), propname); 438 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 439 goto error; 440 } 441 442 (void) nvpair_value_string(elem, &strval); 443 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 444 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 445 "property '%s' can only be set to " 446 "'enabled'"), propname); 447 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 448 goto error; 449 } 450 451 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 452 (void) no_memory(hdl); 453 goto error; 454 } 455 continue; 456 } 457 458 /* 459 * Make sure this property is valid and applies to this type. 460 */ 461 if (prop == ZPROP_INVAL) { 462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 463 "invalid property '%s'"), propname); 464 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 465 goto error; 466 } 467 468 if (zpool_prop_readonly(prop)) { 469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 470 "is readonly"), propname); 471 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 472 goto error; 473 } 474 475 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 476 &strval, &intval, errbuf) != 0) 477 goto error; 478 479 /* 480 * Perform additional checking for specific properties. 481 */ 482 switch (prop) { 483 case ZPOOL_PROP_VERSION: 484 if (intval < version || 485 !SPA_VERSION_IS_SUPPORTED(intval)) { 486 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 487 "property '%s' number %d is invalid."), 488 propname, intval); 489 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 490 goto error; 491 } 492 break; 493 494 case ZPOOL_PROP_BOOTFS: 495 if (flags.create || flags.import) { 496 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 497 "property '%s' cannot be set at creation " 498 "or import time"), propname); 499 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 500 goto error; 501 } 502 503 if (version < SPA_VERSION_BOOTFS) { 504 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 505 "pool must be upgraded to support " 506 "'%s' property"), propname); 507 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 508 goto error; 509 } 510 511 /* 512 * bootfs property value has to be a dataset name and 513 * the dataset has to be in the same pool as it sets to. 514 */ 515 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 516 strval)) { 517 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 518 "is an invalid name"), strval); 519 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 520 goto error; 521 } 522 523 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 525 "could not open pool '%s'"), poolname); 526 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 527 goto error; 528 } 529 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 530 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 531 532 /* 533 * bootfs property cannot be set on a disk which has 534 * been EFI labeled. 535 */ 536 if (pool_uses_efi(nvroot)) { 537 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 538 "property '%s' not supported on " 539 "EFI labeled devices"), propname); 540 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 541 zpool_close(zhp); 542 goto error; 543 } 544 zpool_close(zhp); 545 break; 546 547 case ZPOOL_PROP_ALTROOT: 548 if (!flags.create && !flags.import) { 549 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 550 "property '%s' can only be set during pool " 551 "creation or import"), propname); 552 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 553 goto error; 554 } 555 556 if (strval[0] != '/') { 557 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 558 "bad alternate root '%s'"), strval); 559 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 560 goto error; 561 } 562 break; 563 564 case ZPOOL_PROP_CACHEFILE: 565 if (strval[0] == '\0') 566 break; 567 568 if (strcmp(strval, "none") == 0) 569 break; 570 571 if (strval[0] != '/') { 572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 573 "property '%s' must be empty, an " 574 "absolute path, or 'none'"), propname); 575 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 576 goto error; 577 } 578 579 slash = strrchr(strval, '/'); 580 581 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 582 strcmp(slash, "/..") == 0) { 583 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 584 "'%s' is not a valid file"), strval); 585 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 586 goto error; 587 } 588 589 *slash = '\0'; 590 591 if (strval[0] != '\0' && 592 (stat64(strval, &statbuf) != 0 || 593 !S_ISDIR(statbuf.st_mode))) { 594 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 595 "'%s' is not a valid directory"), 596 strval); 597 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 598 goto error; 599 } 600 601 *slash = '/'; 602 break; 603 604 case ZPOOL_PROP_COMMENT: 605 for (check = strval; *check != '\0'; check++) { 606 if (!isprint(*check)) { 607 zfs_error_aux(hdl, 608 dgettext(TEXT_DOMAIN, 609 "comment may only have printable " 610 "characters")); 611 (void) zfs_error(hdl, EZFS_BADPROP, 612 errbuf); 613 goto error; 614 } 615 } 616 if (strlen(strval) > ZPROP_MAX_COMMENT) { 617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 618 "comment must not exceed %d characters"), 619 ZPROP_MAX_COMMENT); 620 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 621 goto error; 622 } 623 break; 624 case ZPOOL_PROP_READONLY: 625 if (!flags.import) { 626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 627 "property '%s' can only be set at " 628 "import time"), propname); 629 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 630 goto error; 631 } 632 break; 633 } 634 } 635 636 return (retprops); 637 error: 638 nvlist_free(retprops); 639 return (NULL); 640 } 641 642 /* 643 * Set zpool property : propname=propval. 644 */ 645 int 646 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 647 { 648 zfs_cmd_t zc = { 0 }; 649 int ret = -1; 650 char errbuf[1024]; 651 nvlist_t *nvl = NULL; 652 nvlist_t *realprops; 653 uint64_t version; 654 prop_flags_t flags = { 0 }; 655 656 (void) snprintf(errbuf, sizeof (errbuf), 657 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 658 zhp->zpool_name); 659 660 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 661 return (no_memory(zhp->zpool_hdl)); 662 663 if (nvlist_add_string(nvl, propname, propval) != 0) { 664 nvlist_free(nvl); 665 return (no_memory(zhp->zpool_hdl)); 666 } 667 668 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 669 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 670 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 671 nvlist_free(nvl); 672 return (-1); 673 } 674 675 nvlist_free(nvl); 676 nvl = realprops; 677 678 /* 679 * Execute the corresponding ioctl() to set this property. 680 */ 681 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 682 683 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 684 nvlist_free(nvl); 685 return (-1); 686 } 687 688 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 689 690 zcmd_free_nvlists(&zc); 691 nvlist_free(nvl); 692 693 if (ret) 694 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 695 else 696 (void) zpool_props_refresh(zhp); 697 698 return (ret); 699 } 700 701 int 702 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 703 { 704 libzfs_handle_t *hdl = zhp->zpool_hdl; 705 zprop_list_t *entry; 706 char buf[ZFS_MAXPROPLEN]; 707 nvlist_t *features = NULL; 708 zprop_list_t **last; 709 boolean_t firstexpand = (NULL == *plp); 710 711 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 712 return (-1); 713 714 last = plp; 715 while (*last != NULL) 716 last = &(*last)->pl_next; 717 718 if ((*plp)->pl_all) 719 features = zpool_get_features(zhp); 720 721 if ((*plp)->pl_all && firstexpand) { 722 for (int i = 0; i < SPA_FEATURES; i++) { 723 zprop_list_t *entry = zfs_alloc(hdl, 724 sizeof (zprop_list_t)); 725 entry->pl_prop = ZPROP_INVAL; 726 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 727 spa_feature_table[i].fi_uname); 728 entry->pl_width = strlen(entry->pl_user_prop); 729 entry->pl_all = B_TRUE; 730 731 *last = entry; 732 last = &entry->pl_next; 733 } 734 } 735 736 /* add any unsupported features */ 737 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 738 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 739 char *propname; 740 boolean_t found; 741 zprop_list_t *entry; 742 743 if (zfeature_is_supported(nvpair_name(nvp))) 744 continue; 745 746 propname = zfs_asprintf(hdl, "unsupported@%s", 747 nvpair_name(nvp)); 748 749 /* 750 * Before adding the property to the list make sure that no 751 * other pool already added the same property. 752 */ 753 found = B_FALSE; 754 entry = *plp; 755 while (entry != NULL) { 756 if (entry->pl_user_prop != NULL && 757 strcmp(propname, entry->pl_user_prop) == 0) { 758 found = B_TRUE; 759 break; 760 } 761 entry = entry->pl_next; 762 } 763 if (found) { 764 free(propname); 765 continue; 766 } 767 768 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 769 entry->pl_prop = ZPROP_INVAL; 770 entry->pl_user_prop = propname; 771 entry->pl_width = strlen(entry->pl_user_prop); 772 entry->pl_all = B_TRUE; 773 774 *last = entry; 775 last = &entry->pl_next; 776 } 777 778 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 779 780 if (entry->pl_fixed) 781 continue; 782 783 if (entry->pl_prop != ZPROP_INVAL && 784 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 785 NULL, B_FALSE) == 0) { 786 if (strlen(buf) > entry->pl_width) 787 entry->pl_width = strlen(buf); 788 } 789 } 790 791 return (0); 792 } 793 794 /* 795 * Get the state for the given feature on the given ZFS pool. 796 */ 797 int 798 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 799 size_t len) 800 { 801 uint64_t refcount; 802 boolean_t found = B_FALSE; 803 nvlist_t *features = zpool_get_features(zhp); 804 boolean_t supported; 805 const char *feature = strchr(propname, '@') + 1; 806 807 supported = zpool_prop_feature(propname); 808 ASSERT(supported || zfs_prop_unsupported(propname)); 809 810 /* 811 * Convert from feature name to feature guid. This conversion is 812 * unecessary for unsupported@... properties because they already 813 * use guids. 814 */ 815 if (supported) { 816 int ret; 817 spa_feature_t fid; 818 819 ret = zfeature_lookup_name(feature, &fid); 820 if (ret != 0) { 821 (void) strlcpy(buf, "-", len); 822 return (ENOTSUP); 823 } 824 feature = spa_feature_table[fid].fi_guid; 825 } 826 827 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 828 found = B_TRUE; 829 830 if (supported) { 831 if (!found) { 832 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 833 } else { 834 if (refcount == 0) 835 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 836 else 837 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 838 } 839 } else { 840 if (found) { 841 if (refcount == 0) { 842 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 843 } else { 844 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 845 } 846 } else { 847 (void) strlcpy(buf, "-", len); 848 return (ENOTSUP); 849 } 850 } 851 852 return (0); 853 } 854 855 /* 856 * Don't start the slice at the default block of 34; many storage 857 * devices will use a stripe width of 128k, so start there instead. 858 */ 859 #define NEW_START_BLOCK 256 860 861 /* 862 * Validate the given pool name, optionally putting an extended error message in 863 * 'buf'. 864 */ 865 boolean_t 866 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 867 { 868 namecheck_err_t why; 869 char what; 870 int ret; 871 872 ret = pool_namecheck(pool, &why, &what); 873 874 /* 875 * The rules for reserved pool names were extended at a later point. 876 * But we need to support users with existing pools that may now be 877 * invalid. So we only check for this expanded set of names during a 878 * create (or import), and only in userland. 879 */ 880 if (ret == 0 && !isopen && 881 (strncmp(pool, "mirror", 6) == 0 || 882 strncmp(pool, "raidz", 5) == 0 || 883 strncmp(pool, "spare", 5) == 0 || 884 strcmp(pool, "log") == 0)) { 885 if (hdl != NULL) 886 zfs_error_aux(hdl, 887 dgettext(TEXT_DOMAIN, "name is reserved")); 888 return (B_FALSE); 889 } 890 891 892 if (ret != 0) { 893 if (hdl != NULL) { 894 switch (why) { 895 case NAME_ERR_TOOLONG: 896 zfs_error_aux(hdl, 897 dgettext(TEXT_DOMAIN, "name is too long")); 898 break; 899 900 case NAME_ERR_INVALCHAR: 901 zfs_error_aux(hdl, 902 dgettext(TEXT_DOMAIN, "invalid character " 903 "'%c' in pool name"), what); 904 break; 905 906 case NAME_ERR_NOLETTER: 907 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 908 "name must begin with a letter")); 909 break; 910 911 case NAME_ERR_RESERVED: 912 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 913 "name is reserved")); 914 break; 915 916 case NAME_ERR_DISKLIKE: 917 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 918 "pool name is reserved")); 919 break; 920 921 case NAME_ERR_LEADING_SLASH: 922 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 923 "leading slash in name")); 924 break; 925 926 case NAME_ERR_EMPTY_COMPONENT: 927 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 928 "empty component in name")); 929 break; 930 931 case NAME_ERR_TRAILING_SLASH: 932 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 933 "trailing slash in name")); 934 break; 935 936 case NAME_ERR_MULTIPLE_AT: 937 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 938 "multiple '@' delimiters in name")); 939 break; 940 941 } 942 } 943 return (B_FALSE); 944 } 945 946 return (B_TRUE); 947 } 948 949 /* 950 * Open a handle to the given pool, even if the pool is currently in the FAULTED 951 * state. 952 */ 953 zpool_handle_t * 954 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 955 { 956 zpool_handle_t *zhp; 957 boolean_t missing; 958 959 /* 960 * Make sure the pool name is valid. 961 */ 962 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 963 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 964 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 965 pool); 966 return (NULL); 967 } 968 969 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 970 return (NULL); 971 972 zhp->zpool_hdl = hdl; 973 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 974 975 if (zpool_refresh_stats(zhp, &missing) != 0) { 976 zpool_close(zhp); 977 return (NULL); 978 } 979 980 if (missing) { 981 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 982 (void) zfs_error_fmt(hdl, EZFS_NOENT, 983 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 984 zpool_close(zhp); 985 return (NULL); 986 } 987 988 return (zhp); 989 } 990 991 /* 992 * Like the above, but silent on error. Used when iterating over pools (because 993 * the configuration cache may be out of date). 994 */ 995 int 996 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 997 { 998 zpool_handle_t *zhp; 999 boolean_t missing; 1000 1001 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1002 return (-1); 1003 1004 zhp->zpool_hdl = hdl; 1005 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1006 1007 if (zpool_refresh_stats(zhp, &missing) != 0) { 1008 zpool_close(zhp); 1009 return (-1); 1010 } 1011 1012 if (missing) { 1013 zpool_close(zhp); 1014 *ret = NULL; 1015 return (0); 1016 } 1017 1018 *ret = zhp; 1019 return (0); 1020 } 1021 1022 /* 1023 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1024 * state. 1025 */ 1026 zpool_handle_t * 1027 zpool_open(libzfs_handle_t *hdl, const char *pool) 1028 { 1029 zpool_handle_t *zhp; 1030 1031 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1032 return (NULL); 1033 1034 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1035 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1036 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1037 zpool_close(zhp); 1038 return (NULL); 1039 } 1040 1041 return (zhp); 1042 } 1043 1044 /* 1045 * Close the handle. Simply frees the memory associated with the handle. 1046 */ 1047 void 1048 zpool_close(zpool_handle_t *zhp) 1049 { 1050 if (zhp->zpool_config) 1051 nvlist_free(zhp->zpool_config); 1052 if (zhp->zpool_old_config) 1053 nvlist_free(zhp->zpool_old_config); 1054 if (zhp->zpool_props) 1055 nvlist_free(zhp->zpool_props); 1056 free(zhp); 1057 } 1058 1059 /* 1060 * Return the name of the pool. 1061 */ 1062 const char * 1063 zpool_get_name(zpool_handle_t *zhp) 1064 { 1065 return (zhp->zpool_name); 1066 } 1067 1068 1069 /* 1070 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1071 */ 1072 int 1073 zpool_get_state(zpool_handle_t *zhp) 1074 { 1075 return (zhp->zpool_state); 1076 } 1077 1078 /* 1079 * Create the named pool, using the provided vdev list. It is assumed 1080 * that the consumer has already validated the contents of the nvlist, so we 1081 * don't have to worry about error semantics. 1082 */ 1083 int 1084 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1085 nvlist_t *props, nvlist_t *fsprops) 1086 { 1087 zfs_cmd_t zc = { 0 }; 1088 nvlist_t *zc_fsprops = NULL; 1089 nvlist_t *zc_props = NULL; 1090 char msg[1024]; 1091 int ret = -1; 1092 1093 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1094 "cannot create '%s'"), pool); 1095 1096 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1097 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1098 1099 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1100 return (-1); 1101 1102 if (props) { 1103 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1104 1105 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1106 SPA_VERSION_1, flags, msg)) == NULL) { 1107 goto create_failed; 1108 } 1109 } 1110 1111 if (fsprops) { 1112 uint64_t zoned; 1113 char *zonestr; 1114 1115 zoned = ((nvlist_lookup_string(fsprops, 1116 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1117 strcmp(zonestr, "on") == 0); 1118 1119 if ((zc_fsprops = zfs_valid_proplist(hdl, 1120 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 1121 goto create_failed; 1122 } 1123 if (!zc_props && 1124 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1125 goto create_failed; 1126 } 1127 if (nvlist_add_nvlist(zc_props, 1128 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1129 goto create_failed; 1130 } 1131 } 1132 1133 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1134 goto create_failed; 1135 1136 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1137 1138 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1139 1140 zcmd_free_nvlists(&zc); 1141 nvlist_free(zc_props); 1142 nvlist_free(zc_fsprops); 1143 1144 switch (errno) { 1145 case EBUSY: 1146 /* 1147 * This can happen if the user has specified the same 1148 * device multiple times. We can't reliably detect this 1149 * until we try to add it and see we already have a 1150 * label. 1151 */ 1152 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1153 "one or more vdevs refer to the same device")); 1154 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1155 1156 case EOVERFLOW: 1157 /* 1158 * This occurs when one of the devices is below 1159 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1160 * device was the problem device since there's no 1161 * reliable way to determine device size from userland. 1162 */ 1163 { 1164 char buf[64]; 1165 1166 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1167 1168 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1169 "one or more devices is less than the " 1170 "minimum size (%s)"), buf); 1171 } 1172 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1173 1174 case ENOSPC: 1175 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1176 "one or more devices is out of space")); 1177 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1178 1179 case ENOTBLK: 1180 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1181 "cache device must be a disk or disk slice")); 1182 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1183 1184 default: 1185 return (zpool_standard_error(hdl, errno, msg)); 1186 } 1187 } 1188 1189 create_failed: 1190 zcmd_free_nvlists(&zc); 1191 nvlist_free(zc_props); 1192 nvlist_free(zc_fsprops); 1193 return (ret); 1194 } 1195 1196 /* 1197 * Destroy the given pool. It is up to the caller to ensure that there are no 1198 * datasets left in the pool. 1199 */ 1200 int 1201 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1202 { 1203 zfs_cmd_t zc = { 0 }; 1204 zfs_handle_t *zfp = NULL; 1205 libzfs_handle_t *hdl = zhp->zpool_hdl; 1206 char msg[1024]; 1207 1208 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1209 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1210 return (-1); 1211 1212 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1213 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1214 1215 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1216 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1217 "cannot destroy '%s'"), zhp->zpool_name); 1218 1219 if (errno == EROFS) { 1220 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1221 "one or more devices is read only")); 1222 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1223 } else { 1224 (void) zpool_standard_error(hdl, errno, msg); 1225 } 1226 1227 if (zfp) 1228 zfs_close(zfp); 1229 return (-1); 1230 } 1231 1232 if (zfp) { 1233 remove_mountpoint(zfp); 1234 zfs_close(zfp); 1235 } 1236 1237 return (0); 1238 } 1239 1240 /* 1241 * Add the given vdevs to the pool. The caller must have already performed the 1242 * necessary verification to ensure that the vdev specification is well-formed. 1243 */ 1244 int 1245 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1246 { 1247 zfs_cmd_t zc = { 0 }; 1248 int ret; 1249 libzfs_handle_t *hdl = zhp->zpool_hdl; 1250 char msg[1024]; 1251 nvlist_t **spares, **l2cache; 1252 uint_t nspares, nl2cache; 1253 1254 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1255 "cannot add to '%s'"), zhp->zpool_name); 1256 1257 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1258 SPA_VERSION_SPARES && 1259 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1260 &spares, &nspares) == 0) { 1261 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1262 "upgraded to add hot spares")); 1263 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1264 } 1265 1266 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1267 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1268 uint64_t s; 1269 1270 for (s = 0; s < nspares; s++) { 1271 char *path; 1272 1273 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1274 &path) == 0 && pool_uses_efi(spares[s])) { 1275 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1276 "device '%s' contains an EFI label and " 1277 "cannot be used on root pools."), 1278 zpool_vdev_name(hdl, NULL, spares[s], 1279 B_FALSE)); 1280 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1281 } 1282 } 1283 } 1284 1285 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1286 SPA_VERSION_L2CACHE && 1287 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1288 &l2cache, &nl2cache) == 0) { 1289 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1290 "upgraded to add cache devices")); 1291 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1292 } 1293 1294 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1295 return (-1); 1296 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1297 1298 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1299 switch (errno) { 1300 case EBUSY: 1301 /* 1302 * This can happen if the user has specified the same 1303 * device multiple times. We can't reliably detect this 1304 * until we try to add it and see we already have a 1305 * label. 1306 */ 1307 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1308 "one or more vdevs refer to the same device")); 1309 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1310 break; 1311 1312 case EOVERFLOW: 1313 /* 1314 * This occurrs when one of the devices is below 1315 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1316 * device was the problem device since there's no 1317 * reliable way to determine device size from userland. 1318 */ 1319 { 1320 char buf[64]; 1321 1322 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1323 1324 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1325 "device is less than the minimum " 1326 "size (%s)"), buf); 1327 } 1328 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1329 break; 1330 1331 case ENOTSUP: 1332 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1333 "pool must be upgraded to add these vdevs")); 1334 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1335 break; 1336 1337 case EDOM: 1338 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1339 "root pool can not have multiple vdevs" 1340 " or separate logs")); 1341 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1342 break; 1343 1344 case ENOTBLK: 1345 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1346 "cache device must be a disk or disk slice")); 1347 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1348 break; 1349 1350 default: 1351 (void) zpool_standard_error(hdl, errno, msg); 1352 } 1353 1354 ret = -1; 1355 } else { 1356 ret = 0; 1357 } 1358 1359 zcmd_free_nvlists(&zc); 1360 1361 return (ret); 1362 } 1363 1364 /* 1365 * Exports the pool from the system. The caller must ensure that there are no 1366 * mounted datasets in the pool. 1367 */ 1368 static int 1369 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1370 const char *log_str) 1371 { 1372 zfs_cmd_t zc = { 0 }; 1373 char msg[1024]; 1374 1375 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1376 "cannot export '%s'"), zhp->zpool_name); 1377 1378 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1379 zc.zc_cookie = force; 1380 zc.zc_guid = hardforce; 1381 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1382 1383 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1384 switch (errno) { 1385 case EXDEV: 1386 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1387 "use '-f' to override the following errors:\n" 1388 "'%s' has an active shared spare which could be" 1389 " used by other pools once '%s' is exported."), 1390 zhp->zpool_name, zhp->zpool_name); 1391 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1392 msg)); 1393 default: 1394 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1395 msg)); 1396 } 1397 } 1398 1399 return (0); 1400 } 1401 1402 int 1403 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1404 { 1405 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1406 } 1407 1408 int 1409 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1410 { 1411 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1412 } 1413 1414 static void 1415 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1416 nvlist_t *config) 1417 { 1418 nvlist_t *nv = NULL; 1419 uint64_t rewindto; 1420 int64_t loss = -1; 1421 struct tm t; 1422 char timestr[128]; 1423 1424 if (!hdl->libzfs_printerr || config == NULL) 1425 return; 1426 1427 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1428 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1429 return; 1430 } 1431 1432 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1433 return; 1434 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1435 1436 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1437 strftime(timestr, 128, 0, &t) != 0) { 1438 if (dryrun) { 1439 (void) printf(dgettext(TEXT_DOMAIN, 1440 "Would be able to return %s " 1441 "to its state as of %s.\n"), 1442 name, timestr); 1443 } else { 1444 (void) printf(dgettext(TEXT_DOMAIN, 1445 "Pool %s returned to its state as of %s.\n"), 1446 name, timestr); 1447 } 1448 if (loss > 120) { 1449 (void) printf(dgettext(TEXT_DOMAIN, 1450 "%s approximately %lld "), 1451 dryrun ? "Would discard" : "Discarded", 1452 (loss + 30) / 60); 1453 (void) printf(dgettext(TEXT_DOMAIN, 1454 "minutes of transactions.\n")); 1455 } else if (loss > 0) { 1456 (void) printf(dgettext(TEXT_DOMAIN, 1457 "%s approximately %lld "), 1458 dryrun ? "Would discard" : "Discarded", loss); 1459 (void) printf(dgettext(TEXT_DOMAIN, 1460 "seconds of transactions.\n")); 1461 } 1462 } 1463 } 1464 1465 void 1466 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1467 nvlist_t *config) 1468 { 1469 nvlist_t *nv = NULL; 1470 int64_t loss = -1; 1471 uint64_t edata = UINT64_MAX; 1472 uint64_t rewindto; 1473 struct tm t; 1474 char timestr[128]; 1475 1476 if (!hdl->libzfs_printerr) 1477 return; 1478 1479 if (reason >= 0) 1480 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1481 else 1482 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1483 1484 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1485 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1486 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1487 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1488 goto no_info; 1489 1490 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1491 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1492 &edata); 1493 1494 (void) printf(dgettext(TEXT_DOMAIN, 1495 "Recovery is possible, but will result in some data loss.\n")); 1496 1497 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1498 strftime(timestr, 128, 0, &t) != 0) { 1499 (void) printf(dgettext(TEXT_DOMAIN, 1500 "\tReturning the pool to its state as of %s\n" 1501 "\tshould correct the problem. "), 1502 timestr); 1503 } else { 1504 (void) printf(dgettext(TEXT_DOMAIN, 1505 "\tReverting the pool to an earlier state " 1506 "should correct the problem.\n\t")); 1507 } 1508 1509 if (loss > 120) { 1510 (void) printf(dgettext(TEXT_DOMAIN, 1511 "Approximately %lld minutes of data\n" 1512 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1513 } else if (loss > 0) { 1514 (void) printf(dgettext(TEXT_DOMAIN, 1515 "Approximately %lld seconds of data\n" 1516 "\tmust be discarded, irreversibly. "), loss); 1517 } 1518 if (edata != 0 && edata != UINT64_MAX) { 1519 if (edata == 1) { 1520 (void) printf(dgettext(TEXT_DOMAIN, 1521 "After rewind, at least\n" 1522 "\tone persistent user-data error will remain. ")); 1523 } else { 1524 (void) printf(dgettext(TEXT_DOMAIN, 1525 "After rewind, several\n" 1526 "\tpersistent user-data errors will remain. ")); 1527 } 1528 } 1529 (void) printf(dgettext(TEXT_DOMAIN, 1530 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1531 reason >= 0 ? "clear" : "import", name); 1532 1533 (void) printf(dgettext(TEXT_DOMAIN, 1534 "A scrub of the pool\n" 1535 "\tis strongly recommended after recovery.\n")); 1536 return; 1537 1538 no_info: 1539 (void) printf(dgettext(TEXT_DOMAIN, 1540 "Destroy and re-create the pool from\n\ta backup source.\n")); 1541 } 1542 1543 /* 1544 * zpool_import() is a contracted interface. Should be kept the same 1545 * if possible. 1546 * 1547 * Applications should use zpool_import_props() to import a pool with 1548 * new properties value to be set. 1549 */ 1550 int 1551 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1552 char *altroot) 1553 { 1554 nvlist_t *props = NULL; 1555 int ret; 1556 1557 if (altroot != NULL) { 1558 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1559 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1560 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1561 newname)); 1562 } 1563 1564 if (nvlist_add_string(props, 1565 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1566 nvlist_add_string(props, 1567 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1568 nvlist_free(props); 1569 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1570 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1571 newname)); 1572 } 1573 } 1574 1575 ret = zpool_import_props(hdl, config, newname, props, 1576 ZFS_IMPORT_NORMAL); 1577 if (props) 1578 nvlist_free(props); 1579 return (ret); 1580 } 1581 1582 static void 1583 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1584 int indent) 1585 { 1586 nvlist_t **child; 1587 uint_t c, children; 1588 char *vname; 1589 uint64_t is_log = 0; 1590 1591 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1592 &is_log); 1593 1594 if (name != NULL) 1595 (void) printf("\t%*s%s%s\n", indent, "", name, 1596 is_log ? " [log]" : ""); 1597 1598 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1599 &child, &children) != 0) 1600 return; 1601 1602 for (c = 0; c < children; c++) { 1603 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1604 print_vdev_tree(hdl, vname, child[c], indent + 2); 1605 free(vname); 1606 } 1607 } 1608 1609 void 1610 zpool_print_unsup_feat(nvlist_t *config) 1611 { 1612 nvlist_t *nvinfo, *unsup_feat; 1613 1614 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1615 0); 1616 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1617 &unsup_feat) == 0); 1618 1619 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1620 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1621 char *desc; 1622 1623 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1624 verify(nvpair_value_string(nvp, &desc) == 0); 1625 1626 if (strlen(desc) > 0) 1627 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1628 else 1629 (void) printf("\t%s\n", nvpair_name(nvp)); 1630 } 1631 } 1632 1633 /* 1634 * Import the given pool using the known configuration and a list of 1635 * properties to be set. The configuration should have come from 1636 * zpool_find_import(). The 'newname' parameters control whether the pool 1637 * is imported with a different name. 1638 */ 1639 int 1640 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1641 nvlist_t *props, int flags) 1642 { 1643 zfs_cmd_t zc = { 0 }; 1644 zpool_rewind_policy_t policy; 1645 nvlist_t *nv = NULL; 1646 nvlist_t *nvinfo = NULL; 1647 nvlist_t *missing = NULL; 1648 char *thename; 1649 char *origname; 1650 int ret; 1651 int error = 0; 1652 char errbuf[1024]; 1653 1654 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1655 &origname) == 0); 1656 1657 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1658 "cannot import pool '%s'"), origname); 1659 1660 if (newname != NULL) { 1661 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1662 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1663 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1664 newname)); 1665 thename = (char *)newname; 1666 } else { 1667 thename = origname; 1668 } 1669 1670 if (props) { 1671 uint64_t version; 1672 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1673 1674 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1675 &version) == 0); 1676 1677 if ((props = zpool_valid_proplist(hdl, origname, 1678 props, version, flags, errbuf)) == NULL) { 1679 return (-1); 1680 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1681 nvlist_free(props); 1682 return (-1); 1683 } 1684 } 1685 1686 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1687 1688 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1689 &zc.zc_guid) == 0); 1690 1691 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1692 nvlist_free(props); 1693 return (-1); 1694 } 1695 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1696 nvlist_free(props); 1697 return (-1); 1698 } 1699 1700 zc.zc_cookie = flags; 1701 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1702 errno == ENOMEM) { 1703 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1704 zcmd_free_nvlists(&zc); 1705 return (-1); 1706 } 1707 } 1708 if (ret != 0) 1709 error = errno; 1710 1711 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1712 zpool_get_rewind_policy(config, &policy); 1713 1714 if (error) { 1715 char desc[1024]; 1716 1717 /* 1718 * Dry-run failed, but we print out what success 1719 * looks like if we found a best txg 1720 */ 1721 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1722 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1723 B_TRUE, nv); 1724 nvlist_free(nv); 1725 return (-1); 1726 } 1727 1728 if (newname == NULL) 1729 (void) snprintf(desc, sizeof (desc), 1730 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1731 thename); 1732 else 1733 (void) snprintf(desc, sizeof (desc), 1734 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1735 origname, thename); 1736 1737 switch (error) { 1738 case ENOTSUP: 1739 if (nv != NULL && nvlist_lookup_nvlist(nv, 1740 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1741 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1742 (void) printf(dgettext(TEXT_DOMAIN, "This " 1743 "pool uses the following feature(s) not " 1744 "supported by this system:\n")); 1745 zpool_print_unsup_feat(nv); 1746 if (nvlist_exists(nvinfo, 1747 ZPOOL_CONFIG_CAN_RDONLY)) { 1748 (void) printf(dgettext(TEXT_DOMAIN, 1749 "All unsupported features are only " 1750 "required for writing to the pool." 1751 "\nThe pool can be imported using " 1752 "'-o readonly=on'.\n")); 1753 } 1754 } 1755 /* 1756 * Unsupported version. 1757 */ 1758 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1759 break; 1760 1761 case EINVAL: 1762 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1763 break; 1764 1765 case EROFS: 1766 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1767 "one or more devices is read only")); 1768 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1769 break; 1770 1771 case ENXIO: 1772 if (nv && nvlist_lookup_nvlist(nv, 1773 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1774 nvlist_lookup_nvlist(nvinfo, 1775 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1776 (void) printf(dgettext(TEXT_DOMAIN, 1777 "The devices below are missing, use " 1778 "'-m' to import the pool anyway:\n")); 1779 print_vdev_tree(hdl, NULL, missing, 2); 1780 (void) printf("\n"); 1781 } 1782 (void) zpool_standard_error(hdl, error, desc); 1783 break; 1784 1785 case EEXIST: 1786 (void) zpool_standard_error(hdl, error, desc); 1787 break; 1788 1789 default: 1790 (void) zpool_standard_error(hdl, error, desc); 1791 zpool_explain_recover(hdl, 1792 newname ? origname : thename, -error, nv); 1793 break; 1794 } 1795 1796 nvlist_free(nv); 1797 ret = -1; 1798 } else { 1799 zpool_handle_t *zhp; 1800 1801 /* 1802 * This should never fail, but play it safe anyway. 1803 */ 1804 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1805 ret = -1; 1806 else if (zhp != NULL) 1807 zpool_close(zhp); 1808 if (policy.zrp_request & 1809 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1810 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1811 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1812 } 1813 nvlist_free(nv); 1814 return (0); 1815 } 1816 1817 zcmd_free_nvlists(&zc); 1818 nvlist_free(props); 1819 1820 return (ret); 1821 } 1822 1823 /* 1824 * Scan the pool. 1825 */ 1826 int 1827 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1828 { 1829 zfs_cmd_t zc = { 0 }; 1830 char msg[1024]; 1831 libzfs_handle_t *hdl = zhp->zpool_hdl; 1832 1833 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1834 zc.zc_cookie = func; 1835 1836 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1837 (errno == ENOENT && func != POOL_SCAN_NONE)) 1838 return (0); 1839 1840 if (func == POOL_SCAN_SCRUB) { 1841 (void) snprintf(msg, sizeof (msg), 1842 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1843 } else if (func == POOL_SCAN_NONE) { 1844 (void) snprintf(msg, sizeof (msg), 1845 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1846 zc.zc_name); 1847 } else { 1848 assert(!"unexpected result"); 1849 } 1850 1851 if (errno == EBUSY) { 1852 nvlist_t *nvroot; 1853 pool_scan_stat_t *ps = NULL; 1854 uint_t psc; 1855 1856 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1857 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1858 (void) nvlist_lookup_uint64_array(nvroot, 1859 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1860 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1861 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1862 else 1863 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1864 } else if (errno == ENOENT) { 1865 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1866 } else { 1867 return (zpool_standard_error(hdl, errno, msg)); 1868 } 1869 } 1870 1871 /* 1872 * This provides a very minimal check whether a given string is likely a 1873 * c#t#d# style string. Users of this are expected to do their own 1874 * verification of the s# part. 1875 */ 1876 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1877 1878 /* 1879 * More elaborate version for ones which may start with "/dev/dsk/" 1880 * and the like. 1881 */ 1882 static int 1883 ctd_check_path(char *str) { 1884 /* 1885 * If it starts with a slash, check the last component. 1886 */ 1887 if (str && str[0] == '/') { 1888 char *tmp = strrchr(str, '/'); 1889 1890 /* 1891 * If it ends in "/old", check the second-to-last 1892 * component of the string instead. 1893 */ 1894 if (tmp != str && strcmp(tmp, "/old") == 0) { 1895 for (tmp--; *tmp != '/'; tmp--) 1896 ; 1897 } 1898 str = tmp + 1; 1899 } 1900 return (CTD_CHECK(str)); 1901 } 1902 1903 /* 1904 * Find a vdev that matches the search criteria specified. We use the 1905 * the nvpair name to determine how we should look for the device. 1906 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1907 * spare; but FALSE if its an INUSE spare. 1908 */ 1909 static nvlist_t * 1910 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1911 boolean_t *l2cache, boolean_t *log) 1912 { 1913 uint_t c, children; 1914 nvlist_t **child; 1915 nvlist_t *ret; 1916 uint64_t is_log; 1917 char *srchkey; 1918 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1919 1920 /* Nothing to look for */ 1921 if (search == NULL || pair == NULL) 1922 return (NULL); 1923 1924 /* Obtain the key we will use to search */ 1925 srchkey = nvpair_name(pair); 1926 1927 switch (nvpair_type(pair)) { 1928 case DATA_TYPE_UINT64: 1929 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1930 uint64_t srchval, theguid; 1931 1932 verify(nvpair_value_uint64(pair, &srchval) == 0); 1933 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1934 &theguid) == 0); 1935 if (theguid == srchval) 1936 return (nv); 1937 } 1938 break; 1939 1940 case DATA_TYPE_STRING: { 1941 char *srchval, *val; 1942 1943 verify(nvpair_value_string(pair, &srchval) == 0); 1944 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1945 break; 1946 1947 /* 1948 * Search for the requested value. Special cases: 1949 * 1950 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1951 * "s0" or "s0/old". The "s0" part is hidden from the user, 1952 * but included in the string, so this matches around it. 1953 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1954 * 1955 * Otherwise, all other searches are simple string compares. 1956 */ 1957 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1958 ctd_check_path(val)) { 1959 uint64_t wholedisk = 0; 1960 1961 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1962 &wholedisk); 1963 if (wholedisk) { 1964 int slen = strlen(srchval); 1965 int vlen = strlen(val); 1966 1967 if (slen != vlen - 2) 1968 break; 1969 1970 /* 1971 * make_leaf_vdev() should only set 1972 * wholedisk for ZPOOL_CONFIG_PATHs which 1973 * will include "/dev/dsk/", giving plenty of 1974 * room for the indices used next. 1975 */ 1976 ASSERT(vlen >= 6); 1977 1978 /* 1979 * strings identical except trailing "s0" 1980 */ 1981 if (strcmp(&val[vlen - 2], "s0") == 0 && 1982 strncmp(srchval, val, slen) == 0) 1983 return (nv); 1984 1985 /* 1986 * strings identical except trailing "s0/old" 1987 */ 1988 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 1989 strcmp(&srchval[slen - 4], "/old") == 0 && 1990 strncmp(srchval, val, slen - 4) == 0) 1991 return (nv); 1992 1993 break; 1994 } 1995 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 1996 char *type, *idx, *end, *p; 1997 uint64_t id, vdev_id; 1998 1999 /* 2000 * Determine our vdev type, keeping in mind 2001 * that the srchval is composed of a type and 2002 * vdev id pair (i.e. mirror-4). 2003 */ 2004 if ((type = strdup(srchval)) == NULL) 2005 return (NULL); 2006 2007 if ((p = strrchr(type, '-')) == NULL) { 2008 free(type); 2009 break; 2010 } 2011 idx = p + 1; 2012 *p = '\0'; 2013 2014 /* 2015 * If the types don't match then keep looking. 2016 */ 2017 if (strncmp(val, type, strlen(val)) != 0) { 2018 free(type); 2019 break; 2020 } 2021 2022 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2023 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2024 strncmp(type, VDEV_TYPE_MIRROR, 2025 strlen(VDEV_TYPE_MIRROR)) == 0); 2026 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2027 &id) == 0); 2028 2029 errno = 0; 2030 vdev_id = strtoull(idx, &end, 10); 2031 2032 free(type); 2033 if (errno != 0) 2034 return (NULL); 2035 2036 /* 2037 * Now verify that we have the correct vdev id. 2038 */ 2039 if (vdev_id == id) 2040 return (nv); 2041 } 2042 2043 /* 2044 * Common case 2045 */ 2046 if (strcmp(srchval, val) == 0) 2047 return (nv); 2048 break; 2049 } 2050 2051 default: 2052 break; 2053 } 2054 2055 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2056 &child, &children) != 0) 2057 return (NULL); 2058 2059 for (c = 0; c < children; c++) { 2060 if ((ret = vdev_to_nvlist_iter(child[c], search, 2061 avail_spare, l2cache, NULL)) != NULL) { 2062 /* 2063 * The 'is_log' value is only set for the toplevel 2064 * vdev, not the leaf vdevs. So we always lookup the 2065 * log device from the root of the vdev tree (where 2066 * 'log' is non-NULL). 2067 */ 2068 if (log != NULL && 2069 nvlist_lookup_uint64(child[c], 2070 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2071 is_log) { 2072 *log = B_TRUE; 2073 } 2074 return (ret); 2075 } 2076 } 2077 2078 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2079 &child, &children) == 0) { 2080 for (c = 0; c < children; c++) { 2081 if ((ret = vdev_to_nvlist_iter(child[c], search, 2082 avail_spare, l2cache, NULL)) != NULL) { 2083 *avail_spare = B_TRUE; 2084 return (ret); 2085 } 2086 } 2087 } 2088 2089 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2090 &child, &children) == 0) { 2091 for (c = 0; c < children; c++) { 2092 if ((ret = vdev_to_nvlist_iter(child[c], search, 2093 avail_spare, l2cache, NULL)) != NULL) { 2094 *l2cache = B_TRUE; 2095 return (ret); 2096 } 2097 } 2098 } 2099 2100 return (NULL); 2101 } 2102 2103 /* 2104 * Given a physical path (minus the "/devices" prefix), find the 2105 * associated vdev. 2106 */ 2107 nvlist_t * 2108 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2109 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2110 { 2111 nvlist_t *search, *nvroot, *ret; 2112 2113 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2114 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2115 2116 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2117 &nvroot) == 0); 2118 2119 *avail_spare = B_FALSE; 2120 *l2cache = B_FALSE; 2121 if (log != NULL) 2122 *log = B_FALSE; 2123 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2124 nvlist_free(search); 2125 2126 return (ret); 2127 } 2128 2129 /* 2130 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2131 */ 2132 boolean_t 2133 zpool_vdev_is_interior(const char *name) 2134 { 2135 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2136 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2137 return (B_TRUE); 2138 return (B_FALSE); 2139 } 2140 2141 nvlist_t * 2142 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2143 boolean_t *l2cache, boolean_t *log) 2144 { 2145 char buf[MAXPATHLEN]; 2146 char *end; 2147 nvlist_t *nvroot, *search, *ret; 2148 uint64_t guid; 2149 2150 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2151 2152 guid = strtoull(path, &end, 10); 2153 if (guid != 0 && *end == '\0') { 2154 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2155 } else if (zpool_vdev_is_interior(path)) { 2156 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2157 } else if (path[0] != '/') { 2158 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 2159 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2160 } else { 2161 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2162 } 2163 2164 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2165 &nvroot) == 0); 2166 2167 *avail_spare = B_FALSE; 2168 *l2cache = B_FALSE; 2169 if (log != NULL) 2170 *log = B_FALSE; 2171 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2172 nvlist_free(search); 2173 2174 return (ret); 2175 } 2176 2177 static int 2178 vdev_online(nvlist_t *nv) 2179 { 2180 uint64_t ival; 2181 2182 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2183 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2184 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2185 return (0); 2186 2187 return (1); 2188 } 2189 2190 /* 2191 * Helper function for zpool_get_physpaths(). 2192 */ 2193 static int 2194 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2195 size_t *bytes_written) 2196 { 2197 size_t bytes_left, pos, rsz; 2198 char *tmppath; 2199 const char *format; 2200 2201 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2202 &tmppath) != 0) 2203 return (EZFS_NODEVICE); 2204 2205 pos = *bytes_written; 2206 bytes_left = physpath_size - pos; 2207 format = (pos == 0) ? "%s" : " %s"; 2208 2209 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2210 *bytes_written += rsz; 2211 2212 if (rsz >= bytes_left) { 2213 /* if physpath was not copied properly, clear it */ 2214 if (bytes_left != 0) { 2215 physpath[pos] = 0; 2216 } 2217 return (EZFS_NOSPC); 2218 } 2219 return (0); 2220 } 2221 2222 static int 2223 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2224 size_t *rsz, boolean_t is_spare) 2225 { 2226 char *type; 2227 int ret; 2228 2229 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2230 return (EZFS_INVALCONFIG); 2231 2232 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2233 /* 2234 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2235 * For a spare vdev, we only want to boot from the active 2236 * spare device. 2237 */ 2238 if (is_spare) { 2239 uint64_t spare = 0; 2240 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2241 &spare); 2242 if (!spare) 2243 return (EZFS_INVALCONFIG); 2244 } 2245 2246 if (vdev_online(nv)) { 2247 if ((ret = vdev_get_one_physpath(nv, physpath, 2248 phypath_size, rsz)) != 0) 2249 return (ret); 2250 } 2251 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2252 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2253 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2254 nvlist_t **child; 2255 uint_t count; 2256 int i, ret; 2257 2258 if (nvlist_lookup_nvlist_array(nv, 2259 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2260 return (EZFS_INVALCONFIG); 2261 2262 for (i = 0; i < count; i++) { 2263 ret = vdev_get_physpaths(child[i], physpath, 2264 phypath_size, rsz, is_spare); 2265 if (ret == EZFS_NOSPC) 2266 return (ret); 2267 } 2268 } 2269 2270 return (EZFS_POOL_INVALARG); 2271 } 2272 2273 /* 2274 * Get phys_path for a root pool config. 2275 * Return 0 on success; non-zero on failure. 2276 */ 2277 static int 2278 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2279 { 2280 size_t rsz; 2281 nvlist_t *vdev_root; 2282 nvlist_t **child; 2283 uint_t count; 2284 char *type; 2285 2286 rsz = 0; 2287 2288 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2289 &vdev_root) != 0) 2290 return (EZFS_INVALCONFIG); 2291 2292 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2293 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2294 &child, &count) != 0) 2295 return (EZFS_INVALCONFIG); 2296 2297 /* 2298 * root pool can not have EFI labeled disks and can only have 2299 * a single top-level vdev. 2300 */ 2301 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2302 pool_uses_efi(vdev_root)) 2303 return (EZFS_POOL_INVALARG); 2304 2305 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2306 B_FALSE); 2307 2308 /* No online devices */ 2309 if (rsz == 0) 2310 return (EZFS_NODEVICE); 2311 2312 return (0); 2313 } 2314 2315 /* 2316 * Get phys_path for a root pool 2317 * Return 0 on success; non-zero on failure. 2318 */ 2319 int 2320 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2321 { 2322 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2323 phypath_size)); 2324 } 2325 2326 /* 2327 * If the device has being dynamically expanded then we need to relabel 2328 * the disk to use the new unallocated space. 2329 */ 2330 static int 2331 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2332 { 2333 char path[MAXPATHLEN]; 2334 char errbuf[1024]; 2335 int fd, error; 2336 int (*_efi_use_whole_disk)(int); 2337 2338 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2339 "efi_use_whole_disk")) == NULL) 2340 return (-1); 2341 2342 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2343 2344 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2345 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2346 "relabel '%s': unable to open device"), name); 2347 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2348 } 2349 2350 /* 2351 * It's possible that we might encounter an error if the device 2352 * does not have any unallocated space left. If so, we simply 2353 * ignore that error and continue on. 2354 */ 2355 error = _efi_use_whole_disk(fd); 2356 (void) close(fd); 2357 if (error && error != VT_ENOSPC) { 2358 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2359 "relabel '%s': unable to read disk capacity"), name); 2360 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2361 } 2362 return (0); 2363 } 2364 2365 /* 2366 * Bring the specified vdev online. The 'flags' parameter is a set of the 2367 * ZFS_ONLINE_* flags. 2368 */ 2369 int 2370 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2371 vdev_state_t *newstate) 2372 { 2373 zfs_cmd_t zc = { 0 }; 2374 char msg[1024]; 2375 nvlist_t *tgt; 2376 boolean_t avail_spare, l2cache, islog; 2377 libzfs_handle_t *hdl = zhp->zpool_hdl; 2378 2379 if (flags & ZFS_ONLINE_EXPAND) { 2380 (void) snprintf(msg, sizeof (msg), 2381 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2382 } else { 2383 (void) snprintf(msg, sizeof (msg), 2384 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2385 } 2386 2387 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2388 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2389 &islog)) == NULL) 2390 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2391 2392 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2393 2394 if (avail_spare) 2395 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2396 2397 if (flags & ZFS_ONLINE_EXPAND || 2398 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2399 char *pathname = NULL; 2400 uint64_t wholedisk = 0; 2401 2402 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2403 &wholedisk); 2404 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2405 &pathname) == 0); 2406 2407 /* 2408 * XXX - L2ARC 1.0 devices can't support expansion. 2409 */ 2410 if (l2cache) { 2411 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2412 "cannot expand cache devices")); 2413 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2414 } 2415 2416 if (wholedisk) { 2417 pathname += strlen(DISK_ROOT) + 1; 2418 (void) zpool_relabel_disk(hdl, pathname); 2419 } 2420 } 2421 2422 zc.zc_cookie = VDEV_STATE_ONLINE; 2423 zc.zc_obj = flags; 2424 2425 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2426 if (errno == EINVAL) { 2427 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2428 "from this pool into a new one. Use '%s' " 2429 "instead"), "zpool detach"); 2430 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2431 } 2432 return (zpool_standard_error(hdl, errno, msg)); 2433 } 2434 2435 *newstate = zc.zc_cookie; 2436 return (0); 2437 } 2438 2439 /* 2440 * Take the specified vdev offline 2441 */ 2442 int 2443 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2444 { 2445 zfs_cmd_t zc = { 0 }; 2446 char msg[1024]; 2447 nvlist_t *tgt; 2448 boolean_t avail_spare, l2cache; 2449 libzfs_handle_t *hdl = zhp->zpool_hdl; 2450 2451 (void) snprintf(msg, sizeof (msg), 2452 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2453 2454 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2455 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2456 NULL)) == NULL) 2457 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2458 2459 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2460 2461 if (avail_spare) 2462 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2463 2464 zc.zc_cookie = VDEV_STATE_OFFLINE; 2465 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2466 2467 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2468 return (0); 2469 2470 switch (errno) { 2471 case EBUSY: 2472 2473 /* 2474 * There are no other replicas of this device. 2475 */ 2476 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2477 2478 case EEXIST: 2479 /* 2480 * The log device has unplayed logs 2481 */ 2482 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2483 2484 default: 2485 return (zpool_standard_error(hdl, errno, msg)); 2486 } 2487 } 2488 2489 /* 2490 * Mark the given vdev faulted. 2491 */ 2492 int 2493 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2494 { 2495 zfs_cmd_t zc = { 0 }; 2496 char msg[1024]; 2497 libzfs_handle_t *hdl = zhp->zpool_hdl; 2498 2499 (void) snprintf(msg, sizeof (msg), 2500 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2501 2502 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2503 zc.zc_guid = guid; 2504 zc.zc_cookie = VDEV_STATE_FAULTED; 2505 zc.zc_obj = aux; 2506 2507 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2508 return (0); 2509 2510 switch (errno) { 2511 case EBUSY: 2512 2513 /* 2514 * There are no other replicas of this device. 2515 */ 2516 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2517 2518 default: 2519 return (zpool_standard_error(hdl, errno, msg)); 2520 } 2521 2522 } 2523 2524 /* 2525 * Mark the given vdev degraded. 2526 */ 2527 int 2528 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2529 { 2530 zfs_cmd_t zc = { 0 }; 2531 char msg[1024]; 2532 libzfs_handle_t *hdl = zhp->zpool_hdl; 2533 2534 (void) snprintf(msg, sizeof (msg), 2535 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2536 2537 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2538 zc.zc_guid = guid; 2539 zc.zc_cookie = VDEV_STATE_DEGRADED; 2540 zc.zc_obj = aux; 2541 2542 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2543 return (0); 2544 2545 return (zpool_standard_error(hdl, errno, msg)); 2546 } 2547 2548 /* 2549 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2550 * a hot spare. 2551 */ 2552 static boolean_t 2553 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2554 { 2555 nvlist_t **child; 2556 uint_t c, children; 2557 char *type; 2558 2559 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2560 &children) == 0) { 2561 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2562 &type) == 0); 2563 2564 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2565 children == 2 && child[which] == tgt) 2566 return (B_TRUE); 2567 2568 for (c = 0; c < children; c++) 2569 if (is_replacing_spare(child[c], tgt, which)) 2570 return (B_TRUE); 2571 } 2572 2573 return (B_FALSE); 2574 } 2575 2576 /* 2577 * Attach new_disk (fully described by nvroot) to old_disk. 2578 * If 'replacing' is specified, the new disk will replace the old one. 2579 */ 2580 int 2581 zpool_vdev_attach(zpool_handle_t *zhp, 2582 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2583 { 2584 zfs_cmd_t zc = { 0 }; 2585 char msg[1024]; 2586 int ret; 2587 nvlist_t *tgt; 2588 boolean_t avail_spare, l2cache, islog; 2589 uint64_t val; 2590 char *newname; 2591 nvlist_t **child; 2592 uint_t children; 2593 nvlist_t *config_root; 2594 libzfs_handle_t *hdl = zhp->zpool_hdl; 2595 boolean_t rootpool = zpool_is_bootable(zhp); 2596 2597 if (replacing) 2598 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2599 "cannot replace %s with %s"), old_disk, new_disk); 2600 else 2601 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2602 "cannot attach %s to %s"), new_disk, old_disk); 2603 2604 /* 2605 * If this is a root pool, make sure that we're not attaching an 2606 * EFI labeled device. 2607 */ 2608 if (rootpool && pool_uses_efi(nvroot)) { 2609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2610 "EFI labeled devices are not supported on root pools.")); 2611 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2612 } 2613 2614 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2615 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2616 &islog)) == 0) 2617 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2618 2619 if (avail_spare) 2620 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2621 2622 if (l2cache) 2623 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2624 2625 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2626 zc.zc_cookie = replacing; 2627 2628 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2629 &child, &children) != 0 || children != 1) { 2630 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2631 "new device must be a single disk")); 2632 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2633 } 2634 2635 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2636 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2637 2638 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2639 return (-1); 2640 2641 /* 2642 * If the target is a hot spare that has been swapped in, we can only 2643 * replace it with another hot spare. 2644 */ 2645 if (replacing && 2646 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2647 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2648 NULL) == NULL || !avail_spare) && 2649 is_replacing_spare(config_root, tgt, 1)) { 2650 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2651 "can only be replaced by another hot spare")); 2652 free(newname); 2653 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2654 } 2655 2656 free(newname); 2657 2658 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2659 return (-1); 2660 2661 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2662 2663 zcmd_free_nvlists(&zc); 2664 2665 if (ret == 0) { 2666 if (rootpool) { 2667 /* 2668 * XXX need a better way to prevent user from 2669 * booting up a half-baked vdev. 2670 */ 2671 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2672 "sure to wait until resilver is done " 2673 "before rebooting.\n")); 2674 } 2675 return (0); 2676 } 2677 2678 switch (errno) { 2679 case ENOTSUP: 2680 /* 2681 * Can't attach to or replace this type of vdev. 2682 */ 2683 if (replacing) { 2684 uint64_t version = zpool_get_prop_int(zhp, 2685 ZPOOL_PROP_VERSION, NULL); 2686 2687 if (islog) 2688 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2689 "cannot replace a log with a spare")); 2690 else if (version >= SPA_VERSION_MULTI_REPLACE) 2691 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2692 "already in replacing/spare config; wait " 2693 "for completion or use 'zpool detach'")); 2694 else 2695 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2696 "cannot replace a replacing device")); 2697 } else { 2698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2699 "can only attach to mirrors and top-level " 2700 "disks")); 2701 } 2702 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2703 break; 2704 2705 case EINVAL: 2706 /* 2707 * The new device must be a single disk. 2708 */ 2709 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2710 "new device must be a single disk")); 2711 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2712 break; 2713 2714 case EBUSY: 2715 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2716 new_disk); 2717 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2718 break; 2719 2720 case EOVERFLOW: 2721 /* 2722 * The new device is too small. 2723 */ 2724 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2725 "device is too small")); 2726 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2727 break; 2728 2729 case EDOM: 2730 /* 2731 * The new device has a different alignment requirement. 2732 */ 2733 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2734 "devices have different sector alignment")); 2735 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2736 break; 2737 2738 case ENAMETOOLONG: 2739 /* 2740 * The resulting top-level vdev spec won't fit in the label. 2741 */ 2742 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2743 break; 2744 2745 default: 2746 (void) zpool_standard_error(hdl, errno, msg); 2747 } 2748 2749 return (-1); 2750 } 2751 2752 /* 2753 * Detach the specified device. 2754 */ 2755 int 2756 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2757 { 2758 zfs_cmd_t zc = { 0 }; 2759 char msg[1024]; 2760 nvlist_t *tgt; 2761 boolean_t avail_spare, l2cache; 2762 libzfs_handle_t *hdl = zhp->zpool_hdl; 2763 2764 (void) snprintf(msg, sizeof (msg), 2765 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2766 2767 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2768 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2769 NULL)) == 0) 2770 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2771 2772 if (avail_spare) 2773 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2774 2775 if (l2cache) 2776 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2777 2778 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2779 2780 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2781 return (0); 2782 2783 switch (errno) { 2784 2785 case ENOTSUP: 2786 /* 2787 * Can't detach from this type of vdev. 2788 */ 2789 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2790 "applicable to mirror and replacing vdevs")); 2791 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2792 break; 2793 2794 case EBUSY: 2795 /* 2796 * There are no other replicas of this device. 2797 */ 2798 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2799 break; 2800 2801 default: 2802 (void) zpool_standard_error(hdl, errno, msg); 2803 } 2804 2805 return (-1); 2806 } 2807 2808 /* 2809 * Find a mirror vdev in the source nvlist. 2810 * 2811 * The mchild array contains a list of disks in one of the top-level mirrors 2812 * of the source pool. The schild array contains a list of disks that the 2813 * user specified on the command line. We loop over the mchild array to 2814 * see if any entry in the schild array matches. 2815 * 2816 * If a disk in the mchild array is found in the schild array, we return 2817 * the index of that entry. Otherwise we return -1. 2818 */ 2819 static int 2820 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2821 nvlist_t **schild, uint_t schildren) 2822 { 2823 uint_t mc; 2824 2825 for (mc = 0; mc < mchildren; mc++) { 2826 uint_t sc; 2827 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2828 mchild[mc], B_FALSE); 2829 2830 for (sc = 0; sc < schildren; sc++) { 2831 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2832 schild[sc], B_FALSE); 2833 boolean_t result = (strcmp(mpath, spath) == 0); 2834 2835 free(spath); 2836 if (result) { 2837 free(mpath); 2838 return (mc); 2839 } 2840 } 2841 2842 free(mpath); 2843 } 2844 2845 return (-1); 2846 } 2847 2848 /* 2849 * Split a mirror pool. If newroot points to null, then a new nvlist 2850 * is generated and it is the responsibility of the caller to free it. 2851 */ 2852 int 2853 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2854 nvlist_t *props, splitflags_t flags) 2855 { 2856 zfs_cmd_t zc = { 0 }; 2857 char msg[1024]; 2858 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2859 nvlist_t **varray = NULL, *zc_props = NULL; 2860 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2861 libzfs_handle_t *hdl = zhp->zpool_hdl; 2862 uint64_t vers; 2863 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2864 int retval = 0; 2865 2866 (void) snprintf(msg, sizeof (msg), 2867 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2868 2869 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2870 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2871 2872 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2873 (void) fprintf(stderr, gettext("Internal error: unable to " 2874 "retrieve pool configuration\n")); 2875 return (-1); 2876 } 2877 2878 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2879 == 0); 2880 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2881 2882 if (props) { 2883 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2884 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2885 props, vers, flags, msg)) == NULL) 2886 return (-1); 2887 } 2888 2889 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2890 &children) != 0) { 2891 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2892 "Source pool is missing vdev tree")); 2893 if (zc_props) 2894 nvlist_free(zc_props); 2895 return (-1); 2896 } 2897 2898 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2899 vcount = 0; 2900 2901 if (*newroot == NULL || 2902 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2903 &newchild, &newchildren) != 0) 2904 newchildren = 0; 2905 2906 for (c = 0; c < children; c++) { 2907 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2908 char *type; 2909 nvlist_t **mchild, *vdev; 2910 uint_t mchildren; 2911 int entry; 2912 2913 /* 2914 * Unlike cache & spares, slogs are stored in the 2915 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2916 */ 2917 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2918 &is_log); 2919 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2920 &is_hole); 2921 if (is_log || is_hole) { 2922 /* 2923 * Create a hole vdev and put it in the config. 2924 */ 2925 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2926 goto out; 2927 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2928 VDEV_TYPE_HOLE) != 0) 2929 goto out; 2930 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2931 1) != 0) 2932 goto out; 2933 if (lastlog == 0) 2934 lastlog = vcount; 2935 varray[vcount++] = vdev; 2936 continue; 2937 } 2938 lastlog = 0; 2939 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2940 == 0); 2941 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2942 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2943 "Source pool must be composed only of mirrors\n")); 2944 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2945 goto out; 2946 } 2947 2948 verify(nvlist_lookup_nvlist_array(child[c], 2949 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2950 2951 /* find or add an entry for this top-level vdev */ 2952 if (newchildren > 0 && 2953 (entry = find_vdev_entry(zhp, mchild, mchildren, 2954 newchild, newchildren)) >= 0) { 2955 /* We found a disk that the user specified. */ 2956 vdev = mchild[entry]; 2957 ++found; 2958 } else { 2959 /* User didn't specify a disk for this vdev. */ 2960 vdev = mchild[mchildren - 1]; 2961 } 2962 2963 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2964 goto out; 2965 } 2966 2967 /* did we find every disk the user specified? */ 2968 if (found != newchildren) { 2969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2970 "include at most one disk from each mirror")); 2971 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2972 goto out; 2973 } 2974 2975 /* Prepare the nvlist for populating. */ 2976 if (*newroot == NULL) { 2977 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2978 goto out; 2979 freelist = B_TRUE; 2980 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2981 VDEV_TYPE_ROOT) != 0) 2982 goto out; 2983 } else { 2984 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 2985 } 2986 2987 /* Add all the children we found */ 2988 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 2989 lastlog == 0 ? vcount : lastlog) != 0) 2990 goto out; 2991 2992 /* 2993 * If we're just doing a dry run, exit now with success. 2994 */ 2995 if (flags.dryrun) { 2996 memory_err = B_FALSE; 2997 freelist = B_FALSE; 2998 goto out; 2999 } 3000 3001 /* now build up the config list & call the ioctl */ 3002 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3003 goto out; 3004 3005 if (nvlist_add_nvlist(newconfig, 3006 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3007 nvlist_add_string(newconfig, 3008 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3009 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3010 goto out; 3011 3012 /* 3013 * The new pool is automatically part of the namespace unless we 3014 * explicitly export it. 3015 */ 3016 if (!flags.import) 3017 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3018 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3019 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3020 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3021 goto out; 3022 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3023 goto out; 3024 3025 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3026 retval = zpool_standard_error(hdl, errno, msg); 3027 goto out; 3028 } 3029 3030 freelist = B_FALSE; 3031 memory_err = B_FALSE; 3032 3033 out: 3034 if (varray != NULL) { 3035 int v; 3036 3037 for (v = 0; v < vcount; v++) 3038 nvlist_free(varray[v]); 3039 free(varray); 3040 } 3041 zcmd_free_nvlists(&zc); 3042 if (zc_props) 3043 nvlist_free(zc_props); 3044 if (newconfig) 3045 nvlist_free(newconfig); 3046 if (freelist) { 3047 nvlist_free(*newroot); 3048 *newroot = NULL; 3049 } 3050 3051 if (retval != 0) 3052 return (retval); 3053 3054 if (memory_err) 3055 return (no_memory(hdl)); 3056 3057 return (0); 3058 } 3059 3060 /* 3061 * Remove the given device. Currently, this is supported only for hot spares 3062 * and level 2 cache devices. 3063 */ 3064 int 3065 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3066 { 3067 zfs_cmd_t zc = { 0 }; 3068 char msg[1024]; 3069 nvlist_t *tgt; 3070 boolean_t avail_spare, l2cache, islog; 3071 libzfs_handle_t *hdl = zhp->zpool_hdl; 3072 uint64_t version; 3073 3074 (void) snprintf(msg, sizeof (msg), 3075 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3076 3077 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3078 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3079 &islog)) == 0) 3080 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3081 /* 3082 * XXX - this should just go away. 3083 */ 3084 if (!avail_spare && !l2cache && !islog) { 3085 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3086 "only inactive hot spares, cache, top-level, " 3087 "or log devices can be removed")); 3088 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3089 } 3090 3091 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3092 if (islog && version < SPA_VERSION_HOLES) { 3093 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3094 "pool must be upgrade to support log removal")); 3095 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3096 } 3097 3098 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3099 3100 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3101 return (0); 3102 3103 return (zpool_standard_error(hdl, errno, msg)); 3104 } 3105 3106 /* 3107 * Clear the errors for the pool, or the particular device if specified. 3108 */ 3109 int 3110 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3111 { 3112 zfs_cmd_t zc = { 0 }; 3113 char msg[1024]; 3114 nvlist_t *tgt; 3115 zpool_rewind_policy_t policy; 3116 boolean_t avail_spare, l2cache; 3117 libzfs_handle_t *hdl = zhp->zpool_hdl; 3118 nvlist_t *nvi = NULL; 3119 int error; 3120 3121 if (path) 3122 (void) snprintf(msg, sizeof (msg), 3123 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3124 path); 3125 else 3126 (void) snprintf(msg, sizeof (msg), 3127 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3128 zhp->zpool_name); 3129 3130 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3131 if (path) { 3132 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3133 &l2cache, NULL)) == 0) 3134 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3135 3136 /* 3137 * Don't allow error clearing for hot spares. Do allow 3138 * error clearing for l2cache devices. 3139 */ 3140 if (avail_spare) 3141 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3142 3143 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3144 &zc.zc_guid) == 0); 3145 } 3146 3147 zpool_get_rewind_policy(rewindnvl, &policy); 3148 zc.zc_cookie = policy.zrp_request; 3149 3150 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3151 return (-1); 3152 3153 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3154 return (-1); 3155 3156 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3157 errno == ENOMEM) { 3158 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3159 zcmd_free_nvlists(&zc); 3160 return (-1); 3161 } 3162 } 3163 3164 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3165 errno != EPERM && errno != EACCES)) { 3166 if (policy.zrp_request & 3167 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3168 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3169 zpool_rewind_exclaim(hdl, zc.zc_name, 3170 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3171 nvi); 3172 nvlist_free(nvi); 3173 } 3174 zcmd_free_nvlists(&zc); 3175 return (0); 3176 } 3177 3178 zcmd_free_nvlists(&zc); 3179 return (zpool_standard_error(hdl, errno, msg)); 3180 } 3181 3182 /* 3183 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3184 */ 3185 int 3186 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3187 { 3188 zfs_cmd_t zc = { 0 }; 3189 char msg[1024]; 3190 libzfs_handle_t *hdl = zhp->zpool_hdl; 3191 3192 (void) snprintf(msg, sizeof (msg), 3193 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3194 guid); 3195 3196 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3197 zc.zc_guid = guid; 3198 zc.zc_cookie = ZPOOL_NO_REWIND; 3199 3200 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3201 return (0); 3202 3203 return (zpool_standard_error(hdl, errno, msg)); 3204 } 3205 3206 /* 3207 * Change the GUID for a pool. 3208 */ 3209 int 3210 zpool_reguid(zpool_handle_t *zhp) 3211 { 3212 char msg[1024]; 3213 libzfs_handle_t *hdl = zhp->zpool_hdl; 3214 zfs_cmd_t zc = { 0 }; 3215 3216 (void) snprintf(msg, sizeof (msg), 3217 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3218 3219 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3220 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3221 return (0); 3222 3223 return (zpool_standard_error(hdl, errno, msg)); 3224 } 3225 3226 /* 3227 * Reopen the pool. 3228 */ 3229 int 3230 zpool_reopen(zpool_handle_t *zhp) 3231 { 3232 zfs_cmd_t zc = { 0 }; 3233 char msg[1024]; 3234 libzfs_handle_t *hdl = zhp->zpool_hdl; 3235 3236 (void) snprintf(msg, sizeof (msg), 3237 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3238 zhp->zpool_name); 3239 3240 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3241 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3242 return (0); 3243 return (zpool_standard_error(hdl, errno, msg)); 3244 } 3245 3246 /* 3247 * Convert from a devid string to a path. 3248 */ 3249 static char * 3250 devid_to_path(char *devid_str) 3251 { 3252 ddi_devid_t devid; 3253 char *minor; 3254 char *path; 3255 devid_nmlist_t *list = NULL; 3256 int ret; 3257 3258 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3259 return (NULL); 3260 3261 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3262 3263 devid_str_free(minor); 3264 devid_free(devid); 3265 3266 if (ret != 0) 3267 return (NULL); 3268 3269 if ((path = strdup(list[0].devname)) == NULL) 3270 return (NULL); 3271 3272 devid_free_nmlist(list); 3273 3274 return (path); 3275 } 3276 3277 /* 3278 * Convert from a path to a devid string. 3279 */ 3280 static char * 3281 path_to_devid(const char *path) 3282 { 3283 int fd; 3284 ddi_devid_t devid; 3285 char *minor, *ret; 3286 3287 if ((fd = open(path, O_RDONLY)) < 0) 3288 return (NULL); 3289 3290 minor = NULL; 3291 ret = NULL; 3292 if (devid_get(fd, &devid) == 0) { 3293 if (devid_get_minor_name(fd, &minor) == 0) 3294 ret = devid_str_encode(devid, minor); 3295 if (minor != NULL) 3296 devid_str_free(minor); 3297 devid_free(devid); 3298 } 3299 (void) close(fd); 3300 3301 return (ret); 3302 } 3303 3304 /* 3305 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3306 * ignore any failure here, since a common case is for an unprivileged user to 3307 * type 'zpool status', and we'll display the correct information anyway. 3308 */ 3309 static void 3310 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3311 { 3312 zfs_cmd_t zc = { 0 }; 3313 3314 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3315 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3316 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3317 &zc.zc_guid) == 0); 3318 3319 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3320 } 3321 3322 /* 3323 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3324 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3325 * We also check if this is a whole disk, in which case we strip off the 3326 * trailing 's0' slice name. 3327 * 3328 * This routine is also responsible for identifying when disks have been 3329 * reconfigured in a new location. The kernel will have opened the device by 3330 * devid, but the path will still refer to the old location. To catch this, we 3331 * first do a path -> devid translation (which is fast for the common case). If 3332 * the devid matches, we're done. If not, we do a reverse devid -> path 3333 * translation and issue the appropriate ioctl() to update the path of the vdev. 3334 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3335 * of these checks. 3336 */ 3337 char * 3338 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3339 boolean_t verbose) 3340 { 3341 char *path, *devid; 3342 uint64_t value; 3343 char buf[64]; 3344 vdev_stat_t *vs; 3345 uint_t vsc; 3346 3347 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3348 &value) == 0) { 3349 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3350 &value) == 0); 3351 (void) snprintf(buf, sizeof (buf), "%llu", 3352 (u_longlong_t)value); 3353 path = buf; 3354 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3355 3356 /* 3357 * If the device is dead (faulted, offline, etc) then don't 3358 * bother opening it. Otherwise we may be forcing the user to 3359 * open a misbehaving device, which can have undesirable 3360 * effects. 3361 */ 3362 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3363 (uint64_t **)&vs, &vsc) != 0 || 3364 vs->vs_state >= VDEV_STATE_DEGRADED) && 3365 zhp != NULL && 3366 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3367 /* 3368 * Determine if the current path is correct. 3369 */ 3370 char *newdevid = path_to_devid(path); 3371 3372 if (newdevid == NULL || 3373 strcmp(devid, newdevid) != 0) { 3374 char *newpath; 3375 3376 if ((newpath = devid_to_path(devid)) != NULL) { 3377 /* 3378 * Update the path appropriately. 3379 */ 3380 set_path(zhp, nv, newpath); 3381 if (nvlist_add_string(nv, 3382 ZPOOL_CONFIG_PATH, newpath) == 0) 3383 verify(nvlist_lookup_string(nv, 3384 ZPOOL_CONFIG_PATH, 3385 &path) == 0); 3386 free(newpath); 3387 } 3388 } 3389 3390 if (newdevid) 3391 devid_str_free(newdevid); 3392 } 3393 3394 if (strncmp(path, "/dev/dsk/", 9) == 0) 3395 path += 9; 3396 3397 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3398 &value) == 0 && value) { 3399 int pathlen = strlen(path); 3400 char *tmp = zfs_strdup(hdl, path); 3401 3402 /* 3403 * If it starts with c#, and ends with "s0", chop 3404 * the "s0" off, or if it ends with "s0/old", remove 3405 * the "s0" from the middle. 3406 */ 3407 if (CTD_CHECK(tmp)) { 3408 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3409 tmp[pathlen - 2] = '\0'; 3410 } else if (pathlen > 6 && 3411 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3412 (void) strcpy(&tmp[pathlen - 6], 3413 "/old"); 3414 } 3415 } 3416 return (tmp); 3417 } 3418 } else { 3419 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3420 3421 /* 3422 * If it's a raidz device, we need to stick in the parity level. 3423 */ 3424 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3425 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3426 &value) == 0); 3427 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3428 (u_longlong_t)value); 3429 path = buf; 3430 } 3431 3432 /* 3433 * We identify each top-level vdev by using a <type-id> 3434 * naming convention. 3435 */ 3436 if (verbose) { 3437 uint64_t id; 3438 3439 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3440 &id) == 0); 3441 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3442 (u_longlong_t)id); 3443 path = buf; 3444 } 3445 } 3446 3447 return (zfs_strdup(hdl, path)); 3448 } 3449 3450 static int 3451 zbookmark_compare(const void *a, const void *b) 3452 { 3453 return (memcmp(a, b, sizeof (zbookmark_t))); 3454 } 3455 3456 /* 3457 * Retrieve the persistent error log, uniquify the members, and return to the 3458 * caller. 3459 */ 3460 int 3461 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3462 { 3463 zfs_cmd_t zc = { 0 }; 3464 uint64_t count; 3465 zbookmark_t *zb = NULL; 3466 int i; 3467 3468 /* 3469 * Retrieve the raw error list from the kernel. If the number of errors 3470 * has increased, allocate more space and continue until we get the 3471 * entire list. 3472 */ 3473 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3474 &count) == 0); 3475 if (count == 0) 3476 return (0); 3477 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3478 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 3479 return (-1); 3480 zc.zc_nvlist_dst_size = count; 3481 (void) strcpy(zc.zc_name, zhp->zpool_name); 3482 for (;;) { 3483 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3484 &zc) != 0) { 3485 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3486 if (errno == ENOMEM) { 3487 count = zc.zc_nvlist_dst_size; 3488 if ((zc.zc_nvlist_dst = (uintptr_t) 3489 zfs_alloc(zhp->zpool_hdl, count * 3490 sizeof (zbookmark_t))) == (uintptr_t)NULL) 3491 return (-1); 3492 } else { 3493 return (-1); 3494 } 3495 } else { 3496 break; 3497 } 3498 } 3499 3500 /* 3501 * Sort the resulting bookmarks. This is a little confusing due to the 3502 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3503 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3504 * _not_ copied as part of the process. So we point the start of our 3505 * array appropriate and decrement the total number of elements. 3506 */ 3507 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 3508 zc.zc_nvlist_dst_size; 3509 count -= zc.zc_nvlist_dst_size; 3510 3511 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 3512 3513 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3514 3515 /* 3516 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3517 */ 3518 for (i = 0; i < count; i++) { 3519 nvlist_t *nv; 3520 3521 /* ignoring zb_blkid and zb_level for now */ 3522 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3523 zb[i-1].zb_object == zb[i].zb_object) 3524 continue; 3525 3526 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3527 goto nomem; 3528 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3529 zb[i].zb_objset) != 0) { 3530 nvlist_free(nv); 3531 goto nomem; 3532 } 3533 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3534 zb[i].zb_object) != 0) { 3535 nvlist_free(nv); 3536 goto nomem; 3537 } 3538 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3539 nvlist_free(nv); 3540 goto nomem; 3541 } 3542 nvlist_free(nv); 3543 } 3544 3545 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3546 return (0); 3547 3548 nomem: 3549 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3550 return (no_memory(zhp->zpool_hdl)); 3551 } 3552 3553 /* 3554 * Upgrade a ZFS pool to the latest on-disk version. 3555 */ 3556 int 3557 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3558 { 3559 zfs_cmd_t zc = { 0 }; 3560 libzfs_handle_t *hdl = zhp->zpool_hdl; 3561 3562 (void) strcpy(zc.zc_name, zhp->zpool_name); 3563 zc.zc_cookie = new_version; 3564 3565 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3566 return (zpool_standard_error_fmt(hdl, errno, 3567 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3568 zhp->zpool_name)); 3569 return (0); 3570 } 3571 3572 void 3573 zfs_save_arguments(int argc, char **argv, char *string, int len) 3574 { 3575 (void) strlcpy(string, basename(argv[0]), len); 3576 for (int i = 1; i < argc; i++) { 3577 (void) strlcat(string, " ", len); 3578 (void) strlcat(string, argv[i], len); 3579 } 3580 } 3581 3582 int 3583 zpool_log_history(libzfs_handle_t *hdl, const char *message) 3584 { 3585 zfs_cmd_t zc = { 0 }; 3586 nvlist_t *args; 3587 int err; 3588 3589 args = fnvlist_alloc(); 3590 fnvlist_add_string(args, "message", message); 3591 err = zcmd_write_src_nvlist(hdl, &zc, args); 3592 if (err == 0) 3593 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3594 nvlist_free(args); 3595 zcmd_free_nvlists(&zc); 3596 return (err); 3597 } 3598 3599 /* 3600 * Perform ioctl to get some command history of a pool. 3601 * 3602 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3603 * logical offset of the history buffer to start reading from. 3604 * 3605 * Upon return, 'off' is the next logical offset to read from and 3606 * 'len' is the actual amount of bytes read into 'buf'. 3607 */ 3608 static int 3609 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3610 { 3611 zfs_cmd_t zc = { 0 }; 3612 libzfs_handle_t *hdl = zhp->zpool_hdl; 3613 3614 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3615 3616 zc.zc_history = (uint64_t)(uintptr_t)buf; 3617 zc.zc_history_len = *len; 3618 zc.zc_history_offset = *off; 3619 3620 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3621 switch (errno) { 3622 case EPERM: 3623 return (zfs_error_fmt(hdl, EZFS_PERM, 3624 dgettext(TEXT_DOMAIN, 3625 "cannot show history for pool '%s'"), 3626 zhp->zpool_name)); 3627 case ENOENT: 3628 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3629 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3630 "'%s'"), zhp->zpool_name)); 3631 case ENOTSUP: 3632 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3633 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3634 "'%s', pool must be upgraded"), zhp->zpool_name)); 3635 default: 3636 return (zpool_standard_error_fmt(hdl, errno, 3637 dgettext(TEXT_DOMAIN, 3638 "cannot get history for '%s'"), zhp->zpool_name)); 3639 } 3640 } 3641 3642 *len = zc.zc_history_len; 3643 *off = zc.zc_history_offset; 3644 3645 return (0); 3646 } 3647 3648 /* 3649 * Process the buffer of nvlists, unpacking and storing each nvlist record 3650 * into 'records'. 'leftover' is set to the number of bytes that weren't 3651 * processed as there wasn't a complete record. 3652 */ 3653 int 3654 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3655 nvlist_t ***records, uint_t *numrecords) 3656 { 3657 uint64_t reclen; 3658 nvlist_t *nv; 3659 int i; 3660 3661 while (bytes_read > sizeof (reclen)) { 3662 3663 /* get length of packed record (stored as little endian) */ 3664 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3665 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3666 3667 if (bytes_read < sizeof (reclen) + reclen) 3668 break; 3669 3670 /* unpack record */ 3671 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3672 return (ENOMEM); 3673 bytes_read -= sizeof (reclen) + reclen; 3674 buf += sizeof (reclen) + reclen; 3675 3676 /* add record to nvlist array */ 3677 (*numrecords)++; 3678 if (ISP2(*numrecords + 1)) { 3679 *records = realloc(*records, 3680 *numrecords * 2 * sizeof (nvlist_t *)); 3681 } 3682 (*records)[*numrecords - 1] = nv; 3683 } 3684 3685 *leftover = bytes_read; 3686 return (0); 3687 } 3688 3689 #define HIS_BUF_LEN (128*1024) 3690 3691 /* 3692 * Retrieve the command history of a pool. 3693 */ 3694 int 3695 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3696 { 3697 char buf[HIS_BUF_LEN]; 3698 uint64_t off = 0; 3699 nvlist_t **records = NULL; 3700 uint_t numrecords = 0; 3701 int err, i; 3702 3703 do { 3704 uint64_t bytes_read = sizeof (buf); 3705 uint64_t leftover; 3706 3707 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3708 break; 3709 3710 /* if nothing else was read in, we're at EOF, just return */ 3711 if (!bytes_read) 3712 break; 3713 3714 if ((err = zpool_history_unpack(buf, bytes_read, 3715 &leftover, &records, &numrecords)) != 0) 3716 break; 3717 off -= leftover; 3718 3719 /* CONSTCOND */ 3720 } while (1); 3721 3722 if (!err) { 3723 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3724 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3725 records, numrecords) == 0); 3726 } 3727 for (i = 0; i < numrecords; i++) 3728 nvlist_free(records[i]); 3729 free(records); 3730 3731 return (err); 3732 } 3733 3734 void 3735 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3736 char *pathname, size_t len) 3737 { 3738 zfs_cmd_t zc = { 0 }; 3739 boolean_t mounted = B_FALSE; 3740 char *mntpnt = NULL; 3741 char dsname[MAXNAMELEN]; 3742 3743 if (dsobj == 0) { 3744 /* special case for the MOS */ 3745 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3746 return; 3747 } 3748 3749 /* get the dataset's name */ 3750 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3751 zc.zc_obj = dsobj; 3752 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3753 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3754 /* just write out a path of two object numbers */ 3755 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3756 dsobj, obj); 3757 return; 3758 } 3759 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3760 3761 /* find out if the dataset is mounted */ 3762 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3763 3764 /* get the corrupted object's path */ 3765 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3766 zc.zc_obj = obj; 3767 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3768 &zc) == 0) { 3769 if (mounted) { 3770 (void) snprintf(pathname, len, "%s%s", mntpnt, 3771 zc.zc_value); 3772 } else { 3773 (void) snprintf(pathname, len, "%s:%s", 3774 dsname, zc.zc_value); 3775 } 3776 } else { 3777 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3778 } 3779 free(mntpnt); 3780 } 3781 3782 /* 3783 * Read the EFI label from the config, if a label does not exist then 3784 * pass back the error to the caller. If the caller has passed a non-NULL 3785 * diskaddr argument then we set it to the starting address of the EFI 3786 * partition. 3787 */ 3788 static int 3789 read_efi_label(nvlist_t *config, diskaddr_t *sb) 3790 { 3791 char *path; 3792 int fd; 3793 char diskname[MAXPATHLEN]; 3794 int err = -1; 3795 3796 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3797 return (err); 3798 3799 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3800 strrchr(path, '/')); 3801 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3802 struct dk_gpt *vtoc; 3803 3804 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3805 if (sb != NULL) 3806 *sb = vtoc->efi_parts[0].p_start; 3807 efi_free(vtoc); 3808 } 3809 (void) close(fd); 3810 } 3811 return (err); 3812 } 3813 3814 /* 3815 * determine where a partition starts on a disk in the current 3816 * configuration 3817 */ 3818 static diskaddr_t 3819 find_start_block(nvlist_t *config) 3820 { 3821 nvlist_t **child; 3822 uint_t c, children; 3823 diskaddr_t sb = MAXOFFSET_T; 3824 uint64_t wholedisk; 3825 3826 if (nvlist_lookup_nvlist_array(config, 3827 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3828 if (nvlist_lookup_uint64(config, 3829 ZPOOL_CONFIG_WHOLE_DISK, 3830 &wholedisk) != 0 || !wholedisk) { 3831 return (MAXOFFSET_T); 3832 } 3833 if (read_efi_label(config, &sb) < 0) 3834 sb = MAXOFFSET_T; 3835 return (sb); 3836 } 3837 3838 for (c = 0; c < children; c++) { 3839 sb = find_start_block(child[c]); 3840 if (sb != MAXOFFSET_T) { 3841 return (sb); 3842 } 3843 } 3844 return (MAXOFFSET_T); 3845 } 3846 3847 /* 3848 * Label an individual disk. The name provided is the short name, 3849 * stripped of any leading /dev path. 3850 */ 3851 int 3852 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) 3853 { 3854 char path[MAXPATHLEN]; 3855 struct dk_gpt *vtoc; 3856 int fd; 3857 size_t resv = EFI_MIN_RESV_SIZE; 3858 uint64_t slice_size; 3859 diskaddr_t start_block; 3860 char errbuf[1024]; 3861 3862 /* prepare an error message just in case */ 3863 (void) snprintf(errbuf, sizeof (errbuf), 3864 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3865 3866 if (zhp) { 3867 nvlist_t *nvroot; 3868 3869 if (zpool_is_bootable(zhp)) { 3870 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3871 "EFI labeled devices are not supported on root " 3872 "pools.")); 3873 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3874 } 3875 3876 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3877 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3878 3879 if (zhp->zpool_start_block == 0) 3880 start_block = find_start_block(nvroot); 3881 else 3882 start_block = zhp->zpool_start_block; 3883 zhp->zpool_start_block = start_block; 3884 } else { 3885 /* new pool */ 3886 start_block = NEW_START_BLOCK; 3887 } 3888 3889 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3890 BACKUP_SLICE); 3891 3892 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3893 /* 3894 * This shouldn't happen. We've long since verified that this 3895 * is a valid device. 3896 */ 3897 zfs_error_aux(hdl, 3898 dgettext(TEXT_DOMAIN, "unable to open device")); 3899 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3900 } 3901 3902 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3903 /* 3904 * The only way this can fail is if we run out of memory, or we 3905 * were unable to read the disk's capacity 3906 */ 3907 if (errno == ENOMEM) 3908 (void) no_memory(hdl); 3909 3910 (void) close(fd); 3911 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3912 "unable to read disk capacity"), name); 3913 3914 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3915 } 3916 3917 slice_size = vtoc->efi_last_u_lba + 1; 3918 slice_size -= EFI_MIN_RESV_SIZE; 3919 if (start_block == MAXOFFSET_T) 3920 start_block = NEW_START_BLOCK; 3921 slice_size -= start_block; 3922 3923 vtoc->efi_parts[0].p_start = start_block; 3924 vtoc->efi_parts[0].p_size = slice_size; 3925 3926 /* 3927 * Why we use V_USR: V_BACKUP confuses users, and is considered 3928 * disposable by some EFI utilities (since EFI doesn't have a backup 3929 * slice). V_UNASSIGNED is supposed to be used only for zero size 3930 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3931 * etc. were all pretty specific. V_USR is as close to reality as we 3932 * can get, in the absence of V_OTHER. 3933 */ 3934 vtoc->efi_parts[0].p_tag = V_USR; 3935 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3936 3937 vtoc->efi_parts[8].p_start = slice_size + start_block; 3938 vtoc->efi_parts[8].p_size = resv; 3939 vtoc->efi_parts[8].p_tag = V_RESERVED; 3940 3941 if (efi_write(fd, vtoc) != 0) { 3942 /* 3943 * Some block drivers (like pcata) may not support EFI 3944 * GPT labels. Print out a helpful error message dir- 3945 * ecting the user to manually label the disk and give 3946 * a specific slice. 3947 */ 3948 (void) close(fd); 3949 efi_free(vtoc); 3950 3951 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3952 "try using fdisk(1M) and then provide a specific slice")); 3953 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3954 } 3955 3956 (void) close(fd); 3957 efi_free(vtoc); 3958 return (0); 3959 } 3960 3961 static boolean_t 3962 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3963 { 3964 char *type; 3965 nvlist_t **child; 3966 uint_t children, c; 3967 3968 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3969 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 3970 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3971 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3972 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3973 "vdev type '%s' is not supported"), type); 3974 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3975 return (B_FALSE); 3976 } 3977 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3978 &child, &children) == 0) { 3979 for (c = 0; c < children; c++) { 3980 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3981 return (B_FALSE); 3982 } 3983 } 3984 return (B_TRUE); 3985 } 3986 3987 /* 3988 * Check if this zvol is allowable for use as a dump device; zero if 3989 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 3990 * 3991 * Allowable storage configurations include mirrors, all raidz variants, and 3992 * pools with log, cache, and spare devices. Pools which are backed by files or 3993 * have missing/hole vdevs are not suitable. 3994 */ 3995 int 3996 zvol_check_dump_config(char *arg) 3997 { 3998 zpool_handle_t *zhp = NULL; 3999 nvlist_t *config, *nvroot; 4000 char *p, *volname; 4001 nvlist_t **top; 4002 uint_t toplevels; 4003 libzfs_handle_t *hdl; 4004 char errbuf[1024]; 4005 char poolname[ZPOOL_MAXNAMELEN]; 4006 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4007 int ret = 1; 4008 4009 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4010 return (-1); 4011 } 4012 4013 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4014 "dump is not supported on device '%s'"), arg); 4015 4016 if ((hdl = libzfs_init()) == NULL) 4017 return (1); 4018 libzfs_print_on_error(hdl, B_TRUE); 4019 4020 volname = arg + pathlen; 4021 4022 /* check the configuration of the pool */ 4023 if ((p = strchr(volname, '/')) == NULL) { 4024 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4025 "malformed dataset name")); 4026 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4027 return (1); 4028 } else if (p - volname >= ZFS_MAXNAMELEN) { 4029 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4030 "dataset name is too long")); 4031 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4032 return (1); 4033 } else { 4034 (void) strncpy(poolname, volname, p - volname); 4035 poolname[p - volname] = '\0'; 4036 } 4037 4038 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4039 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4040 "could not open pool '%s'"), poolname); 4041 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4042 goto out; 4043 } 4044 config = zpool_get_config(zhp, NULL); 4045 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4046 &nvroot) != 0) { 4047 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4048 "could not obtain vdev configuration for '%s'"), poolname); 4049 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4050 goto out; 4051 } 4052 4053 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4054 &top, &toplevels) == 0); 4055 4056 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4057 goto out; 4058 } 4059 ret = 0; 4060 4061 out: 4062 if (zhp) 4063 zpool_close(zhp); 4064 libzfs_fini(hdl); 4065 return (ret); 4066 } 4067