1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 */ 28 29 #include <ctype.h> 30 #include <errno.h> 31 #include <devid.h> 32 #include <fcntl.h> 33 #include <libintl.h> 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <strings.h> 37 #include <unistd.h> 38 #include <libgen.h> 39 #include <sys/efi_partition.h> 40 #include <sys/vtoc.h> 41 #include <sys/zfs_ioctl.h> 42 #include <dlfcn.h> 43 44 #include "zfs_namecheck.h" 45 #include "zfs_prop.h" 46 #include "libzfs_impl.h" 47 #include "zfs_comutil.h" 48 #include "zfeature_common.h" 49 50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52 #define DISK_ROOT "/dev/dsk" 53 #define RDISK_ROOT "/dev/rdsk" 54 #define BACKUP_SLICE "s2" 55 56 typedef struct prop_flags { 57 int create:1; /* Validate property on creation */ 58 int import:1; /* Validate property on import */ 59 } prop_flags_t; 60 61 /* 62 * ==================================================================== 63 * zpool property functions 64 * ==================================================================== 65 */ 66 67 static int 68 zpool_get_all_props(zpool_handle_t *zhp) 69 { 70 zfs_cmd_t zc = { 0 }; 71 libzfs_handle_t *hdl = zhp->zpool_hdl; 72 73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 74 75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 76 return (-1); 77 78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 79 if (errno == ENOMEM) { 80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 81 zcmd_free_nvlists(&zc); 82 return (-1); 83 } 84 } else { 85 zcmd_free_nvlists(&zc); 86 return (-1); 87 } 88 } 89 90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 91 zcmd_free_nvlists(&zc); 92 return (-1); 93 } 94 95 zcmd_free_nvlists(&zc); 96 97 return (0); 98 } 99 100 static int 101 zpool_props_refresh(zpool_handle_t *zhp) 102 { 103 nvlist_t *old_props; 104 105 old_props = zhp->zpool_props; 106 107 if (zpool_get_all_props(zhp) != 0) 108 return (-1); 109 110 nvlist_free(old_props); 111 return (0); 112 } 113 114 static char * 115 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 116 zprop_source_t *src) 117 { 118 nvlist_t *nv, *nvl; 119 uint64_t ival; 120 char *value; 121 zprop_source_t source; 122 123 nvl = zhp->zpool_props; 124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 126 source = ival; 127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 128 } else { 129 source = ZPROP_SRC_DEFAULT; 130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 131 value = "-"; 132 } 133 134 if (src) 135 *src = source; 136 137 return (value); 138 } 139 140 uint64_t 141 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 142 { 143 nvlist_t *nv, *nvl; 144 uint64_t value; 145 zprop_source_t source; 146 147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 148 /* 149 * zpool_get_all_props() has most likely failed because 150 * the pool is faulted, but if all we need is the top level 151 * vdev's guid then get it from the zhp config nvlist. 152 */ 153 if ((prop == ZPOOL_PROP_GUID) && 154 (nvlist_lookup_nvlist(zhp->zpool_config, 155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 157 == 0)) { 158 return (value); 159 } 160 return (zpool_prop_default_numeric(prop)); 161 } 162 163 nvl = zhp->zpool_props; 164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 166 source = value; 167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 168 } else { 169 source = ZPROP_SRC_DEFAULT; 170 value = zpool_prop_default_numeric(prop); 171 } 172 173 if (src) 174 *src = source; 175 176 return (value); 177 } 178 179 /* 180 * Map VDEV STATE to printed strings. 181 */ 182 char * 183 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 184 { 185 switch (state) { 186 case VDEV_STATE_CLOSED: 187 case VDEV_STATE_OFFLINE: 188 return (gettext("OFFLINE")); 189 case VDEV_STATE_REMOVED: 190 return (gettext("REMOVED")); 191 case VDEV_STATE_CANT_OPEN: 192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 193 return (gettext("FAULTED")); 194 else if (aux == VDEV_AUX_SPLIT_POOL) 195 return (gettext("SPLIT")); 196 else 197 return (gettext("UNAVAIL")); 198 case VDEV_STATE_FAULTED: 199 return (gettext("FAULTED")); 200 case VDEV_STATE_DEGRADED: 201 return (gettext("DEGRADED")); 202 case VDEV_STATE_HEALTHY: 203 return (gettext("ONLINE")); 204 } 205 206 return (gettext("UNKNOWN")); 207 } 208 209 /* 210 * Get a zpool property value for 'prop' and return the value in 211 * a pre-allocated buffer. 212 */ 213 int 214 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 215 zprop_source_t *srctype, boolean_t literal) 216 { 217 uint64_t intval; 218 const char *strval; 219 zprop_source_t src = ZPROP_SRC_NONE; 220 nvlist_t *nvroot; 221 vdev_stat_t *vs; 222 uint_t vsc; 223 224 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 225 switch (prop) { 226 case ZPOOL_PROP_NAME: 227 (void) strlcpy(buf, zpool_get_name(zhp), len); 228 break; 229 230 case ZPOOL_PROP_HEALTH: 231 (void) strlcpy(buf, "FAULTED", len); 232 break; 233 234 case ZPOOL_PROP_GUID: 235 intval = zpool_get_prop_int(zhp, prop, &src); 236 (void) snprintf(buf, len, "%llu", intval); 237 break; 238 239 case ZPOOL_PROP_ALTROOT: 240 case ZPOOL_PROP_CACHEFILE: 241 case ZPOOL_PROP_COMMENT: 242 if (zhp->zpool_props != NULL || 243 zpool_get_all_props(zhp) == 0) { 244 (void) strlcpy(buf, 245 zpool_get_prop_string(zhp, prop, &src), 246 len); 247 break; 248 } 249 /* FALLTHROUGH */ 250 default: 251 (void) strlcpy(buf, "-", len); 252 break; 253 } 254 255 if (srctype != NULL) 256 *srctype = src; 257 return (0); 258 } 259 260 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 261 prop != ZPOOL_PROP_NAME) 262 return (-1); 263 264 switch (zpool_prop_get_type(prop)) { 265 case PROP_TYPE_STRING: 266 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 267 len); 268 break; 269 270 case PROP_TYPE_NUMBER: 271 intval = zpool_get_prop_int(zhp, prop, &src); 272 273 switch (prop) { 274 case ZPOOL_PROP_SIZE: 275 case ZPOOL_PROP_ALLOCATED: 276 case ZPOOL_PROP_FREE: 277 case ZPOOL_PROP_FREEING: 278 case ZPOOL_PROP_LEAKED: 279 case ZPOOL_PROP_EXPANDSZ: 280 if (literal) { 281 (void) snprintf(buf, len, "%llu", 282 (u_longlong_t)intval); 283 } else { 284 (void) zfs_nicenum(intval, buf, len); 285 } 286 break; 287 288 case ZPOOL_PROP_CAPACITY: 289 if (literal) { 290 (void) snprintf(buf, len, "%llu", 291 (u_longlong_t)intval); 292 } else { 293 (void) snprintf(buf, len, "%llu%%", 294 (u_longlong_t)intval); 295 } 296 break; 297 case ZPOOL_PROP_FRAGMENTATION: 298 if (intval == UINT64_MAX) { 299 (void) strlcpy(buf, "-", len); 300 } else { 301 (void) snprintf(buf, len, "%llu%%", 302 (u_longlong_t)intval); 303 } 304 break; 305 306 case ZPOOL_PROP_DEDUPRATIO: 307 (void) snprintf(buf, len, "%llu.%02llux", 308 (u_longlong_t)(intval / 100), 309 (u_longlong_t)(intval % 100)); 310 break; 311 312 case ZPOOL_PROP_HEALTH: 313 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 314 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 315 verify(nvlist_lookup_uint64_array(nvroot, 316 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 317 == 0); 318 319 (void) strlcpy(buf, zpool_state_to_name(intval, 320 vs->vs_aux), len); 321 break; 322 case ZPOOL_PROP_VERSION: 323 if (intval >= SPA_VERSION_FEATURES) { 324 (void) snprintf(buf, len, "-"); 325 break; 326 } 327 /* FALLTHROUGH */ 328 default: 329 (void) snprintf(buf, len, "%llu", intval); 330 } 331 break; 332 333 case PROP_TYPE_INDEX: 334 intval = zpool_get_prop_int(zhp, prop, &src); 335 if (zpool_prop_index_to_string(prop, intval, &strval) 336 != 0) 337 return (-1); 338 (void) strlcpy(buf, strval, len); 339 break; 340 341 default: 342 abort(); 343 } 344 345 if (srctype) 346 *srctype = src; 347 348 return (0); 349 } 350 351 /* 352 * Check if the bootfs name has the same pool name as it is set to. 353 * Assuming bootfs is a valid dataset name. 354 */ 355 static boolean_t 356 bootfs_name_valid(const char *pool, char *bootfs) 357 { 358 int len = strlen(pool); 359 360 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 361 return (B_FALSE); 362 363 if (strncmp(pool, bootfs, len) == 0 && 364 (bootfs[len] == '/' || bootfs[len] == '\0')) 365 return (B_TRUE); 366 367 return (B_FALSE); 368 } 369 370 /* 371 * Inspect the configuration to determine if any of the devices contain 372 * an EFI label. 373 */ 374 static boolean_t 375 pool_uses_efi(nvlist_t *config) 376 { 377 nvlist_t **child; 378 uint_t c, children; 379 380 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 381 &child, &children) != 0) 382 return (read_efi_label(config, NULL) >= 0); 383 384 for (c = 0; c < children; c++) { 385 if (pool_uses_efi(child[c])) 386 return (B_TRUE); 387 } 388 return (B_FALSE); 389 } 390 391 boolean_t 392 zpool_is_bootable(zpool_handle_t *zhp) 393 { 394 char bootfs[ZPOOL_MAXNAMELEN]; 395 396 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 397 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 398 sizeof (bootfs)) != 0); 399 } 400 401 402 /* 403 * Given an nvlist of zpool properties to be set, validate that they are 404 * correct, and parse any numeric properties (index, boolean, etc) if they are 405 * specified as strings. 406 */ 407 static nvlist_t * 408 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 409 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 410 { 411 nvpair_t *elem; 412 nvlist_t *retprops; 413 zpool_prop_t prop; 414 char *strval; 415 uint64_t intval; 416 char *slash, *check; 417 struct stat64 statbuf; 418 zpool_handle_t *zhp; 419 nvlist_t *nvroot; 420 421 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 422 (void) no_memory(hdl); 423 return (NULL); 424 } 425 426 elem = NULL; 427 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 428 const char *propname = nvpair_name(elem); 429 430 prop = zpool_name_to_prop(propname); 431 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 432 int err; 433 char *fname = strchr(propname, '@') + 1; 434 435 err = zfeature_lookup_name(fname, NULL); 436 if (err != 0) { 437 ASSERT3U(err, ==, ENOENT); 438 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 439 "invalid feature '%s'"), fname); 440 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 441 goto error; 442 } 443 444 if (nvpair_type(elem) != DATA_TYPE_STRING) { 445 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 446 "'%s' must be a string"), propname); 447 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 448 goto error; 449 } 450 451 (void) nvpair_value_string(elem, &strval); 452 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 453 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 454 "property '%s' can only be set to " 455 "'enabled'"), propname); 456 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 457 goto error; 458 } 459 460 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 461 (void) no_memory(hdl); 462 goto error; 463 } 464 continue; 465 } 466 467 /* 468 * Make sure this property is valid and applies to this type. 469 */ 470 if (prop == ZPROP_INVAL) { 471 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 472 "invalid property '%s'"), propname); 473 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 474 goto error; 475 } 476 477 if (zpool_prop_readonly(prop)) { 478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 479 "is readonly"), propname); 480 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 481 goto error; 482 } 483 484 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 485 &strval, &intval, errbuf) != 0) 486 goto error; 487 488 /* 489 * Perform additional checking for specific properties. 490 */ 491 switch (prop) { 492 case ZPOOL_PROP_VERSION: 493 if (intval < version || 494 !SPA_VERSION_IS_SUPPORTED(intval)) { 495 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 496 "property '%s' number %d is invalid."), 497 propname, intval); 498 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 499 goto error; 500 } 501 break; 502 503 case ZPOOL_PROP_BOOTFS: 504 if (flags.create || flags.import) { 505 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 506 "property '%s' cannot be set at creation " 507 "or import time"), propname); 508 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 509 goto error; 510 } 511 512 if (version < SPA_VERSION_BOOTFS) { 513 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 514 "pool must be upgraded to support " 515 "'%s' property"), propname); 516 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 517 goto error; 518 } 519 520 /* 521 * bootfs property value has to be a dataset name and 522 * the dataset has to be in the same pool as it sets to. 523 */ 524 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 525 strval)) { 526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 527 "is an invalid name"), strval); 528 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 529 goto error; 530 } 531 532 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 533 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 534 "could not open pool '%s'"), poolname); 535 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 536 goto error; 537 } 538 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 539 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 540 541 /* 542 * bootfs property cannot be set on a disk which has 543 * been EFI labeled. 544 */ 545 if (pool_uses_efi(nvroot)) { 546 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 547 "property '%s' not supported on " 548 "EFI labeled devices"), propname); 549 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 550 zpool_close(zhp); 551 goto error; 552 } 553 zpool_close(zhp); 554 break; 555 556 case ZPOOL_PROP_ALTROOT: 557 if (!flags.create && !flags.import) { 558 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 559 "property '%s' can only be set during pool " 560 "creation or import"), propname); 561 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 562 goto error; 563 } 564 565 if (strval[0] != '/') { 566 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 567 "bad alternate root '%s'"), strval); 568 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 569 goto error; 570 } 571 break; 572 573 case ZPOOL_PROP_CACHEFILE: 574 if (strval[0] == '\0') 575 break; 576 577 if (strcmp(strval, "none") == 0) 578 break; 579 580 if (strval[0] != '/') { 581 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 582 "property '%s' must be empty, an " 583 "absolute path, or 'none'"), propname); 584 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 585 goto error; 586 } 587 588 slash = strrchr(strval, '/'); 589 590 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 591 strcmp(slash, "/..") == 0) { 592 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 593 "'%s' is not a valid file"), strval); 594 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 595 goto error; 596 } 597 598 *slash = '\0'; 599 600 if (strval[0] != '\0' && 601 (stat64(strval, &statbuf) != 0 || 602 !S_ISDIR(statbuf.st_mode))) { 603 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 604 "'%s' is not a valid directory"), 605 strval); 606 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 607 goto error; 608 } 609 610 *slash = '/'; 611 break; 612 613 case ZPOOL_PROP_COMMENT: 614 for (check = strval; *check != '\0'; check++) { 615 if (!isprint(*check)) { 616 zfs_error_aux(hdl, 617 dgettext(TEXT_DOMAIN, 618 "comment may only have printable " 619 "characters")); 620 (void) zfs_error(hdl, EZFS_BADPROP, 621 errbuf); 622 goto error; 623 } 624 } 625 if (strlen(strval) > ZPROP_MAX_COMMENT) { 626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 627 "comment must not exceed %d characters"), 628 ZPROP_MAX_COMMENT); 629 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 630 goto error; 631 } 632 break; 633 case ZPOOL_PROP_READONLY: 634 if (!flags.import) { 635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 636 "property '%s' can only be set at " 637 "import time"), propname); 638 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 639 goto error; 640 } 641 break; 642 } 643 } 644 645 return (retprops); 646 error: 647 nvlist_free(retprops); 648 return (NULL); 649 } 650 651 /* 652 * Set zpool property : propname=propval. 653 */ 654 int 655 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 656 { 657 zfs_cmd_t zc = { 0 }; 658 int ret = -1; 659 char errbuf[1024]; 660 nvlist_t *nvl = NULL; 661 nvlist_t *realprops; 662 uint64_t version; 663 prop_flags_t flags = { 0 }; 664 665 (void) snprintf(errbuf, sizeof (errbuf), 666 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 667 zhp->zpool_name); 668 669 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 670 return (no_memory(zhp->zpool_hdl)); 671 672 if (nvlist_add_string(nvl, propname, propval) != 0) { 673 nvlist_free(nvl); 674 return (no_memory(zhp->zpool_hdl)); 675 } 676 677 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 678 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 679 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 680 nvlist_free(nvl); 681 return (-1); 682 } 683 684 nvlist_free(nvl); 685 nvl = realprops; 686 687 /* 688 * Execute the corresponding ioctl() to set this property. 689 */ 690 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 691 692 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 693 nvlist_free(nvl); 694 return (-1); 695 } 696 697 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 698 699 zcmd_free_nvlists(&zc); 700 nvlist_free(nvl); 701 702 if (ret) 703 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 704 else 705 (void) zpool_props_refresh(zhp); 706 707 return (ret); 708 } 709 710 int 711 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 712 { 713 libzfs_handle_t *hdl = zhp->zpool_hdl; 714 zprop_list_t *entry; 715 char buf[ZFS_MAXPROPLEN]; 716 nvlist_t *features = NULL; 717 zprop_list_t **last; 718 boolean_t firstexpand = (NULL == *plp); 719 720 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 721 return (-1); 722 723 last = plp; 724 while (*last != NULL) 725 last = &(*last)->pl_next; 726 727 if ((*plp)->pl_all) 728 features = zpool_get_features(zhp); 729 730 if ((*plp)->pl_all && firstexpand) { 731 for (int i = 0; i < SPA_FEATURES; i++) { 732 zprop_list_t *entry = zfs_alloc(hdl, 733 sizeof (zprop_list_t)); 734 entry->pl_prop = ZPROP_INVAL; 735 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 736 spa_feature_table[i].fi_uname); 737 entry->pl_width = strlen(entry->pl_user_prop); 738 entry->pl_all = B_TRUE; 739 740 *last = entry; 741 last = &entry->pl_next; 742 } 743 } 744 745 /* add any unsupported features */ 746 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 747 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 748 char *propname; 749 boolean_t found; 750 zprop_list_t *entry; 751 752 if (zfeature_is_supported(nvpair_name(nvp))) 753 continue; 754 755 propname = zfs_asprintf(hdl, "unsupported@%s", 756 nvpair_name(nvp)); 757 758 /* 759 * Before adding the property to the list make sure that no 760 * other pool already added the same property. 761 */ 762 found = B_FALSE; 763 entry = *plp; 764 while (entry != NULL) { 765 if (entry->pl_user_prop != NULL && 766 strcmp(propname, entry->pl_user_prop) == 0) { 767 found = B_TRUE; 768 break; 769 } 770 entry = entry->pl_next; 771 } 772 if (found) { 773 free(propname); 774 continue; 775 } 776 777 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 778 entry->pl_prop = ZPROP_INVAL; 779 entry->pl_user_prop = propname; 780 entry->pl_width = strlen(entry->pl_user_prop); 781 entry->pl_all = B_TRUE; 782 783 *last = entry; 784 last = &entry->pl_next; 785 } 786 787 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 788 789 if (entry->pl_fixed) 790 continue; 791 792 if (entry->pl_prop != ZPROP_INVAL && 793 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 794 NULL, B_FALSE) == 0) { 795 if (strlen(buf) > entry->pl_width) 796 entry->pl_width = strlen(buf); 797 } 798 } 799 800 return (0); 801 } 802 803 /* 804 * Get the state for the given feature on the given ZFS pool. 805 */ 806 int 807 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 808 size_t len) 809 { 810 uint64_t refcount; 811 boolean_t found = B_FALSE; 812 nvlist_t *features = zpool_get_features(zhp); 813 boolean_t supported; 814 const char *feature = strchr(propname, '@') + 1; 815 816 supported = zpool_prop_feature(propname); 817 ASSERT(supported || zfs_prop_unsupported(propname)); 818 819 /* 820 * Convert from feature name to feature guid. This conversion is 821 * unecessary for unsupported@... properties because they already 822 * use guids. 823 */ 824 if (supported) { 825 int ret; 826 spa_feature_t fid; 827 828 ret = zfeature_lookup_name(feature, &fid); 829 if (ret != 0) { 830 (void) strlcpy(buf, "-", len); 831 return (ENOTSUP); 832 } 833 feature = spa_feature_table[fid].fi_guid; 834 } 835 836 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 837 found = B_TRUE; 838 839 if (supported) { 840 if (!found) { 841 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 842 } else { 843 if (refcount == 0) 844 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 845 else 846 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 847 } 848 } else { 849 if (found) { 850 if (refcount == 0) { 851 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 852 } else { 853 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 854 } 855 } else { 856 (void) strlcpy(buf, "-", len); 857 return (ENOTSUP); 858 } 859 } 860 861 return (0); 862 } 863 864 /* 865 * Don't start the slice at the default block of 34; many storage 866 * devices will use a stripe width of 128k, so start there instead. 867 */ 868 #define NEW_START_BLOCK 256 869 870 /* 871 * Validate the given pool name, optionally putting an extended error message in 872 * 'buf'. 873 */ 874 boolean_t 875 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 876 { 877 namecheck_err_t why; 878 char what; 879 int ret; 880 881 ret = pool_namecheck(pool, &why, &what); 882 883 /* 884 * The rules for reserved pool names were extended at a later point. 885 * But we need to support users with existing pools that may now be 886 * invalid. So we only check for this expanded set of names during a 887 * create (or import), and only in userland. 888 */ 889 if (ret == 0 && !isopen && 890 (strncmp(pool, "mirror", 6) == 0 || 891 strncmp(pool, "raidz", 5) == 0 || 892 strncmp(pool, "spare", 5) == 0 || 893 strcmp(pool, "log") == 0)) { 894 if (hdl != NULL) 895 zfs_error_aux(hdl, 896 dgettext(TEXT_DOMAIN, "name is reserved")); 897 return (B_FALSE); 898 } 899 900 901 if (ret != 0) { 902 if (hdl != NULL) { 903 switch (why) { 904 case NAME_ERR_TOOLONG: 905 zfs_error_aux(hdl, 906 dgettext(TEXT_DOMAIN, "name is too long")); 907 break; 908 909 case NAME_ERR_INVALCHAR: 910 zfs_error_aux(hdl, 911 dgettext(TEXT_DOMAIN, "invalid character " 912 "'%c' in pool name"), what); 913 break; 914 915 case NAME_ERR_NOLETTER: 916 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 917 "name must begin with a letter")); 918 break; 919 920 case NAME_ERR_RESERVED: 921 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 922 "name is reserved")); 923 break; 924 925 case NAME_ERR_DISKLIKE: 926 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 927 "pool name is reserved")); 928 break; 929 930 case NAME_ERR_LEADING_SLASH: 931 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 932 "leading slash in name")); 933 break; 934 935 case NAME_ERR_EMPTY_COMPONENT: 936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 937 "empty component in name")); 938 break; 939 940 case NAME_ERR_TRAILING_SLASH: 941 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 942 "trailing slash in name")); 943 break; 944 945 case NAME_ERR_MULTIPLE_AT: 946 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 947 "multiple '@' delimiters in name")); 948 break; 949 950 } 951 } 952 return (B_FALSE); 953 } 954 955 return (B_TRUE); 956 } 957 958 /* 959 * Open a handle to the given pool, even if the pool is currently in the FAULTED 960 * state. 961 */ 962 zpool_handle_t * 963 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 964 { 965 zpool_handle_t *zhp; 966 boolean_t missing; 967 968 /* 969 * Make sure the pool name is valid. 970 */ 971 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 972 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 973 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 974 pool); 975 return (NULL); 976 } 977 978 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 979 return (NULL); 980 981 zhp->zpool_hdl = hdl; 982 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 983 984 if (zpool_refresh_stats(zhp, &missing) != 0) { 985 zpool_close(zhp); 986 return (NULL); 987 } 988 989 if (missing) { 990 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 991 (void) zfs_error_fmt(hdl, EZFS_NOENT, 992 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 993 zpool_close(zhp); 994 return (NULL); 995 } 996 997 return (zhp); 998 } 999 1000 /* 1001 * Like the above, but silent on error. Used when iterating over pools (because 1002 * the configuration cache may be out of date). 1003 */ 1004 int 1005 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1006 { 1007 zpool_handle_t *zhp; 1008 boolean_t missing; 1009 1010 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1011 return (-1); 1012 1013 zhp->zpool_hdl = hdl; 1014 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1015 1016 if (zpool_refresh_stats(zhp, &missing) != 0) { 1017 zpool_close(zhp); 1018 return (-1); 1019 } 1020 1021 if (missing) { 1022 zpool_close(zhp); 1023 *ret = NULL; 1024 return (0); 1025 } 1026 1027 *ret = zhp; 1028 return (0); 1029 } 1030 1031 /* 1032 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1033 * state. 1034 */ 1035 zpool_handle_t * 1036 zpool_open(libzfs_handle_t *hdl, const char *pool) 1037 { 1038 zpool_handle_t *zhp; 1039 1040 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1041 return (NULL); 1042 1043 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1044 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1045 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1046 zpool_close(zhp); 1047 return (NULL); 1048 } 1049 1050 return (zhp); 1051 } 1052 1053 /* 1054 * Close the handle. Simply frees the memory associated with the handle. 1055 */ 1056 void 1057 zpool_close(zpool_handle_t *zhp) 1058 { 1059 if (zhp->zpool_config) 1060 nvlist_free(zhp->zpool_config); 1061 if (zhp->zpool_old_config) 1062 nvlist_free(zhp->zpool_old_config); 1063 if (zhp->zpool_props) 1064 nvlist_free(zhp->zpool_props); 1065 free(zhp); 1066 } 1067 1068 /* 1069 * Return the name of the pool. 1070 */ 1071 const char * 1072 zpool_get_name(zpool_handle_t *zhp) 1073 { 1074 return (zhp->zpool_name); 1075 } 1076 1077 1078 /* 1079 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1080 */ 1081 int 1082 zpool_get_state(zpool_handle_t *zhp) 1083 { 1084 return (zhp->zpool_state); 1085 } 1086 1087 /* 1088 * Create the named pool, using the provided vdev list. It is assumed 1089 * that the consumer has already validated the contents of the nvlist, so we 1090 * don't have to worry about error semantics. 1091 */ 1092 int 1093 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1094 nvlist_t *props, nvlist_t *fsprops) 1095 { 1096 zfs_cmd_t zc = { 0 }; 1097 nvlist_t *zc_fsprops = NULL; 1098 nvlist_t *zc_props = NULL; 1099 char msg[1024]; 1100 int ret = -1; 1101 1102 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1103 "cannot create '%s'"), pool); 1104 1105 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1106 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1107 1108 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1109 return (-1); 1110 1111 if (props) { 1112 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1113 1114 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1115 SPA_VERSION_1, flags, msg)) == NULL) { 1116 goto create_failed; 1117 } 1118 } 1119 1120 if (fsprops) { 1121 uint64_t zoned; 1122 char *zonestr; 1123 1124 zoned = ((nvlist_lookup_string(fsprops, 1125 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1126 strcmp(zonestr, "on") == 0); 1127 1128 if ((zc_fsprops = zfs_valid_proplist(hdl, 1129 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 1130 goto create_failed; 1131 } 1132 if (!zc_props && 1133 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1134 goto create_failed; 1135 } 1136 if (nvlist_add_nvlist(zc_props, 1137 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1138 goto create_failed; 1139 } 1140 } 1141 1142 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1143 goto create_failed; 1144 1145 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1146 1147 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1148 1149 zcmd_free_nvlists(&zc); 1150 nvlist_free(zc_props); 1151 nvlist_free(zc_fsprops); 1152 1153 switch (errno) { 1154 case EBUSY: 1155 /* 1156 * This can happen if the user has specified the same 1157 * device multiple times. We can't reliably detect this 1158 * until we try to add it and see we already have a 1159 * label. 1160 */ 1161 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1162 "one or more vdevs refer to the same device")); 1163 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1164 1165 case EOVERFLOW: 1166 /* 1167 * This occurs when one of the devices is below 1168 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1169 * device was the problem device since there's no 1170 * reliable way to determine device size from userland. 1171 */ 1172 { 1173 char buf[64]; 1174 1175 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1176 1177 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1178 "one or more devices is less than the " 1179 "minimum size (%s)"), buf); 1180 } 1181 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1182 1183 case ENOSPC: 1184 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1185 "one or more devices is out of space")); 1186 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1187 1188 case ENOTBLK: 1189 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1190 "cache device must be a disk or disk slice")); 1191 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1192 1193 default: 1194 return (zpool_standard_error(hdl, errno, msg)); 1195 } 1196 } 1197 1198 create_failed: 1199 zcmd_free_nvlists(&zc); 1200 nvlist_free(zc_props); 1201 nvlist_free(zc_fsprops); 1202 return (ret); 1203 } 1204 1205 /* 1206 * Destroy the given pool. It is up to the caller to ensure that there are no 1207 * datasets left in the pool. 1208 */ 1209 int 1210 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1211 { 1212 zfs_cmd_t zc = { 0 }; 1213 zfs_handle_t *zfp = NULL; 1214 libzfs_handle_t *hdl = zhp->zpool_hdl; 1215 char msg[1024]; 1216 1217 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1218 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1219 return (-1); 1220 1221 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1222 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1223 1224 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1225 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1226 "cannot destroy '%s'"), zhp->zpool_name); 1227 1228 if (errno == EROFS) { 1229 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1230 "one or more devices is read only")); 1231 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1232 } else { 1233 (void) zpool_standard_error(hdl, errno, msg); 1234 } 1235 1236 if (zfp) 1237 zfs_close(zfp); 1238 return (-1); 1239 } 1240 1241 if (zfp) { 1242 remove_mountpoint(zfp); 1243 zfs_close(zfp); 1244 } 1245 1246 return (0); 1247 } 1248 1249 /* 1250 * Add the given vdevs to the pool. The caller must have already performed the 1251 * necessary verification to ensure that the vdev specification is well-formed. 1252 */ 1253 int 1254 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1255 { 1256 zfs_cmd_t zc = { 0 }; 1257 int ret; 1258 libzfs_handle_t *hdl = zhp->zpool_hdl; 1259 char msg[1024]; 1260 nvlist_t **spares, **l2cache; 1261 uint_t nspares, nl2cache; 1262 1263 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1264 "cannot add to '%s'"), zhp->zpool_name); 1265 1266 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1267 SPA_VERSION_SPARES && 1268 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1269 &spares, &nspares) == 0) { 1270 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1271 "upgraded to add hot spares")); 1272 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1273 } 1274 1275 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1276 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1277 uint64_t s; 1278 1279 for (s = 0; s < nspares; s++) { 1280 char *path; 1281 1282 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1283 &path) == 0 && pool_uses_efi(spares[s])) { 1284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1285 "device '%s' contains an EFI label and " 1286 "cannot be used on root pools."), 1287 zpool_vdev_name(hdl, NULL, spares[s], 1288 B_FALSE)); 1289 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1290 } 1291 } 1292 } 1293 1294 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1295 SPA_VERSION_L2CACHE && 1296 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1297 &l2cache, &nl2cache) == 0) { 1298 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1299 "upgraded to add cache devices")); 1300 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1301 } 1302 1303 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1304 return (-1); 1305 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1306 1307 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1308 switch (errno) { 1309 case EBUSY: 1310 /* 1311 * This can happen if the user has specified the same 1312 * device multiple times. We can't reliably detect this 1313 * until we try to add it and see we already have a 1314 * label. 1315 */ 1316 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1317 "one or more vdevs refer to the same device")); 1318 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1319 break; 1320 1321 case EOVERFLOW: 1322 /* 1323 * This occurrs when one of the devices is below 1324 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1325 * device was the problem device since there's no 1326 * reliable way to determine device size from userland. 1327 */ 1328 { 1329 char buf[64]; 1330 1331 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1332 1333 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1334 "device is less than the minimum " 1335 "size (%s)"), buf); 1336 } 1337 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1338 break; 1339 1340 case ENOTSUP: 1341 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1342 "pool must be upgraded to add these vdevs")); 1343 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1344 break; 1345 1346 case EDOM: 1347 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1348 "root pool can not have multiple vdevs" 1349 " or separate logs")); 1350 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1351 break; 1352 1353 case ENOTBLK: 1354 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1355 "cache device must be a disk or disk slice")); 1356 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1357 break; 1358 1359 default: 1360 (void) zpool_standard_error(hdl, errno, msg); 1361 } 1362 1363 ret = -1; 1364 } else { 1365 ret = 0; 1366 } 1367 1368 zcmd_free_nvlists(&zc); 1369 1370 return (ret); 1371 } 1372 1373 /* 1374 * Exports the pool from the system. The caller must ensure that there are no 1375 * mounted datasets in the pool. 1376 */ 1377 static int 1378 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1379 const char *log_str) 1380 { 1381 zfs_cmd_t zc = { 0 }; 1382 char msg[1024]; 1383 1384 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1385 "cannot export '%s'"), zhp->zpool_name); 1386 1387 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1388 zc.zc_cookie = force; 1389 zc.zc_guid = hardforce; 1390 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1391 1392 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1393 switch (errno) { 1394 case EXDEV: 1395 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1396 "use '-f' to override the following errors:\n" 1397 "'%s' has an active shared spare which could be" 1398 " used by other pools once '%s' is exported."), 1399 zhp->zpool_name, zhp->zpool_name); 1400 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1401 msg)); 1402 default: 1403 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1404 msg)); 1405 } 1406 } 1407 1408 return (0); 1409 } 1410 1411 int 1412 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1413 { 1414 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1415 } 1416 1417 int 1418 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1419 { 1420 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1421 } 1422 1423 static void 1424 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1425 nvlist_t *config) 1426 { 1427 nvlist_t *nv = NULL; 1428 uint64_t rewindto; 1429 int64_t loss = -1; 1430 struct tm t; 1431 char timestr[128]; 1432 1433 if (!hdl->libzfs_printerr || config == NULL) 1434 return; 1435 1436 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1437 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1438 return; 1439 } 1440 1441 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1442 return; 1443 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1444 1445 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1446 strftime(timestr, 128, 0, &t) != 0) { 1447 if (dryrun) { 1448 (void) printf(dgettext(TEXT_DOMAIN, 1449 "Would be able to return %s " 1450 "to its state as of %s.\n"), 1451 name, timestr); 1452 } else { 1453 (void) printf(dgettext(TEXT_DOMAIN, 1454 "Pool %s returned to its state as of %s.\n"), 1455 name, timestr); 1456 } 1457 if (loss > 120) { 1458 (void) printf(dgettext(TEXT_DOMAIN, 1459 "%s approximately %lld "), 1460 dryrun ? "Would discard" : "Discarded", 1461 (loss + 30) / 60); 1462 (void) printf(dgettext(TEXT_DOMAIN, 1463 "minutes of transactions.\n")); 1464 } else if (loss > 0) { 1465 (void) printf(dgettext(TEXT_DOMAIN, 1466 "%s approximately %lld "), 1467 dryrun ? "Would discard" : "Discarded", loss); 1468 (void) printf(dgettext(TEXT_DOMAIN, 1469 "seconds of transactions.\n")); 1470 } 1471 } 1472 } 1473 1474 void 1475 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1476 nvlist_t *config) 1477 { 1478 nvlist_t *nv = NULL; 1479 int64_t loss = -1; 1480 uint64_t edata = UINT64_MAX; 1481 uint64_t rewindto; 1482 struct tm t; 1483 char timestr[128]; 1484 1485 if (!hdl->libzfs_printerr) 1486 return; 1487 1488 if (reason >= 0) 1489 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1490 else 1491 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1492 1493 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1494 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1495 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1496 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1497 goto no_info; 1498 1499 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1500 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1501 &edata); 1502 1503 (void) printf(dgettext(TEXT_DOMAIN, 1504 "Recovery is possible, but will result in some data loss.\n")); 1505 1506 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1507 strftime(timestr, 128, 0, &t) != 0) { 1508 (void) printf(dgettext(TEXT_DOMAIN, 1509 "\tReturning the pool to its state as of %s\n" 1510 "\tshould correct the problem. "), 1511 timestr); 1512 } else { 1513 (void) printf(dgettext(TEXT_DOMAIN, 1514 "\tReverting the pool to an earlier state " 1515 "should correct the problem.\n\t")); 1516 } 1517 1518 if (loss > 120) { 1519 (void) printf(dgettext(TEXT_DOMAIN, 1520 "Approximately %lld minutes of data\n" 1521 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1522 } else if (loss > 0) { 1523 (void) printf(dgettext(TEXT_DOMAIN, 1524 "Approximately %lld seconds of data\n" 1525 "\tmust be discarded, irreversibly. "), loss); 1526 } 1527 if (edata != 0 && edata != UINT64_MAX) { 1528 if (edata == 1) { 1529 (void) printf(dgettext(TEXT_DOMAIN, 1530 "After rewind, at least\n" 1531 "\tone persistent user-data error will remain. ")); 1532 } else { 1533 (void) printf(dgettext(TEXT_DOMAIN, 1534 "After rewind, several\n" 1535 "\tpersistent user-data errors will remain. ")); 1536 } 1537 } 1538 (void) printf(dgettext(TEXT_DOMAIN, 1539 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1540 reason >= 0 ? "clear" : "import", name); 1541 1542 (void) printf(dgettext(TEXT_DOMAIN, 1543 "A scrub of the pool\n" 1544 "\tis strongly recommended after recovery.\n")); 1545 return; 1546 1547 no_info: 1548 (void) printf(dgettext(TEXT_DOMAIN, 1549 "Destroy and re-create the pool from\n\ta backup source.\n")); 1550 } 1551 1552 /* 1553 * zpool_import() is a contracted interface. Should be kept the same 1554 * if possible. 1555 * 1556 * Applications should use zpool_import_props() to import a pool with 1557 * new properties value to be set. 1558 */ 1559 int 1560 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1561 char *altroot) 1562 { 1563 nvlist_t *props = NULL; 1564 int ret; 1565 1566 if (altroot != NULL) { 1567 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1568 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1569 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1570 newname)); 1571 } 1572 1573 if (nvlist_add_string(props, 1574 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1575 nvlist_add_string(props, 1576 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1577 nvlist_free(props); 1578 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1579 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1580 newname)); 1581 } 1582 } 1583 1584 ret = zpool_import_props(hdl, config, newname, props, 1585 ZFS_IMPORT_NORMAL); 1586 if (props) 1587 nvlist_free(props); 1588 return (ret); 1589 } 1590 1591 static void 1592 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1593 int indent) 1594 { 1595 nvlist_t **child; 1596 uint_t c, children; 1597 char *vname; 1598 uint64_t is_log = 0; 1599 1600 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1601 &is_log); 1602 1603 if (name != NULL) 1604 (void) printf("\t%*s%s%s\n", indent, "", name, 1605 is_log ? " [log]" : ""); 1606 1607 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1608 &child, &children) != 0) 1609 return; 1610 1611 for (c = 0; c < children; c++) { 1612 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1613 print_vdev_tree(hdl, vname, child[c], indent + 2); 1614 free(vname); 1615 } 1616 } 1617 1618 void 1619 zpool_print_unsup_feat(nvlist_t *config) 1620 { 1621 nvlist_t *nvinfo, *unsup_feat; 1622 1623 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1624 0); 1625 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1626 &unsup_feat) == 0); 1627 1628 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1629 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1630 char *desc; 1631 1632 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1633 verify(nvpair_value_string(nvp, &desc) == 0); 1634 1635 if (strlen(desc) > 0) 1636 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1637 else 1638 (void) printf("\t%s\n", nvpair_name(nvp)); 1639 } 1640 } 1641 1642 /* 1643 * Import the given pool using the known configuration and a list of 1644 * properties to be set. The configuration should have come from 1645 * zpool_find_import(). The 'newname' parameters control whether the pool 1646 * is imported with a different name. 1647 */ 1648 int 1649 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1650 nvlist_t *props, int flags) 1651 { 1652 zfs_cmd_t zc = { 0 }; 1653 zpool_rewind_policy_t policy; 1654 nvlist_t *nv = NULL; 1655 nvlist_t *nvinfo = NULL; 1656 nvlist_t *missing = NULL; 1657 char *thename; 1658 char *origname; 1659 int ret; 1660 int error = 0; 1661 char errbuf[1024]; 1662 1663 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1664 &origname) == 0); 1665 1666 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1667 "cannot import pool '%s'"), origname); 1668 1669 if (newname != NULL) { 1670 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1671 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1672 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1673 newname)); 1674 thename = (char *)newname; 1675 } else { 1676 thename = origname; 1677 } 1678 1679 if (props) { 1680 uint64_t version; 1681 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1682 1683 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1684 &version) == 0); 1685 1686 if ((props = zpool_valid_proplist(hdl, origname, 1687 props, version, flags, errbuf)) == NULL) { 1688 return (-1); 1689 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1690 nvlist_free(props); 1691 return (-1); 1692 } 1693 } 1694 1695 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1696 1697 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1698 &zc.zc_guid) == 0); 1699 1700 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1701 nvlist_free(props); 1702 return (-1); 1703 } 1704 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1705 nvlist_free(props); 1706 return (-1); 1707 } 1708 1709 zc.zc_cookie = flags; 1710 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1711 errno == ENOMEM) { 1712 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1713 zcmd_free_nvlists(&zc); 1714 return (-1); 1715 } 1716 } 1717 if (ret != 0) 1718 error = errno; 1719 1720 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1721 zpool_get_rewind_policy(config, &policy); 1722 1723 if (error) { 1724 char desc[1024]; 1725 1726 /* 1727 * Dry-run failed, but we print out what success 1728 * looks like if we found a best txg 1729 */ 1730 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1731 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1732 B_TRUE, nv); 1733 nvlist_free(nv); 1734 return (-1); 1735 } 1736 1737 if (newname == NULL) 1738 (void) snprintf(desc, sizeof (desc), 1739 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1740 thename); 1741 else 1742 (void) snprintf(desc, sizeof (desc), 1743 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1744 origname, thename); 1745 1746 switch (error) { 1747 case ENOTSUP: 1748 if (nv != NULL && nvlist_lookup_nvlist(nv, 1749 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1750 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1751 (void) printf(dgettext(TEXT_DOMAIN, "This " 1752 "pool uses the following feature(s) not " 1753 "supported by this system:\n")); 1754 zpool_print_unsup_feat(nv); 1755 if (nvlist_exists(nvinfo, 1756 ZPOOL_CONFIG_CAN_RDONLY)) { 1757 (void) printf(dgettext(TEXT_DOMAIN, 1758 "All unsupported features are only " 1759 "required for writing to the pool." 1760 "\nThe pool can be imported using " 1761 "'-o readonly=on'.\n")); 1762 } 1763 } 1764 /* 1765 * Unsupported version. 1766 */ 1767 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1768 break; 1769 1770 case EINVAL: 1771 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1772 break; 1773 1774 case EROFS: 1775 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1776 "one or more devices is read only")); 1777 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1778 break; 1779 1780 case ENXIO: 1781 if (nv && nvlist_lookup_nvlist(nv, 1782 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1783 nvlist_lookup_nvlist(nvinfo, 1784 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1785 (void) printf(dgettext(TEXT_DOMAIN, 1786 "The devices below are missing, use " 1787 "'-m' to import the pool anyway:\n")); 1788 print_vdev_tree(hdl, NULL, missing, 2); 1789 (void) printf("\n"); 1790 } 1791 (void) zpool_standard_error(hdl, error, desc); 1792 break; 1793 1794 case EEXIST: 1795 (void) zpool_standard_error(hdl, error, desc); 1796 break; 1797 1798 default: 1799 (void) zpool_standard_error(hdl, error, desc); 1800 zpool_explain_recover(hdl, 1801 newname ? origname : thename, -error, nv); 1802 break; 1803 } 1804 1805 nvlist_free(nv); 1806 ret = -1; 1807 } else { 1808 zpool_handle_t *zhp; 1809 1810 /* 1811 * This should never fail, but play it safe anyway. 1812 */ 1813 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1814 ret = -1; 1815 else if (zhp != NULL) 1816 zpool_close(zhp); 1817 if (policy.zrp_request & 1818 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1819 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1820 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1821 } 1822 nvlist_free(nv); 1823 return (0); 1824 } 1825 1826 zcmd_free_nvlists(&zc); 1827 nvlist_free(props); 1828 1829 return (ret); 1830 } 1831 1832 /* 1833 * Scan the pool. 1834 */ 1835 int 1836 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1837 { 1838 zfs_cmd_t zc = { 0 }; 1839 char msg[1024]; 1840 libzfs_handle_t *hdl = zhp->zpool_hdl; 1841 1842 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1843 zc.zc_cookie = func; 1844 1845 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1846 (errno == ENOENT && func != POOL_SCAN_NONE)) 1847 return (0); 1848 1849 if (func == POOL_SCAN_SCRUB) { 1850 (void) snprintf(msg, sizeof (msg), 1851 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1852 } else if (func == POOL_SCAN_NONE) { 1853 (void) snprintf(msg, sizeof (msg), 1854 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1855 zc.zc_name); 1856 } else { 1857 assert(!"unexpected result"); 1858 } 1859 1860 if (errno == EBUSY) { 1861 nvlist_t *nvroot; 1862 pool_scan_stat_t *ps = NULL; 1863 uint_t psc; 1864 1865 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1866 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1867 (void) nvlist_lookup_uint64_array(nvroot, 1868 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1869 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1870 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1871 else 1872 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1873 } else if (errno == ENOENT) { 1874 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1875 } else { 1876 return (zpool_standard_error(hdl, errno, msg)); 1877 } 1878 } 1879 1880 /* 1881 * This provides a very minimal check whether a given string is likely a 1882 * c#t#d# style string. Users of this are expected to do their own 1883 * verification of the s# part. 1884 */ 1885 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1886 1887 /* 1888 * More elaborate version for ones which may start with "/dev/dsk/" 1889 * and the like. 1890 */ 1891 static int 1892 ctd_check_path(char *str) { 1893 /* 1894 * If it starts with a slash, check the last component. 1895 */ 1896 if (str && str[0] == '/') { 1897 char *tmp = strrchr(str, '/'); 1898 1899 /* 1900 * If it ends in "/old", check the second-to-last 1901 * component of the string instead. 1902 */ 1903 if (tmp != str && strcmp(tmp, "/old") == 0) { 1904 for (tmp--; *tmp != '/'; tmp--) 1905 ; 1906 } 1907 str = tmp + 1; 1908 } 1909 return (CTD_CHECK(str)); 1910 } 1911 1912 /* 1913 * Find a vdev that matches the search criteria specified. We use the 1914 * the nvpair name to determine how we should look for the device. 1915 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1916 * spare; but FALSE if its an INUSE spare. 1917 */ 1918 static nvlist_t * 1919 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1920 boolean_t *l2cache, boolean_t *log) 1921 { 1922 uint_t c, children; 1923 nvlist_t **child; 1924 nvlist_t *ret; 1925 uint64_t is_log; 1926 char *srchkey; 1927 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1928 1929 /* Nothing to look for */ 1930 if (search == NULL || pair == NULL) 1931 return (NULL); 1932 1933 /* Obtain the key we will use to search */ 1934 srchkey = nvpair_name(pair); 1935 1936 switch (nvpair_type(pair)) { 1937 case DATA_TYPE_UINT64: 1938 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1939 uint64_t srchval, theguid; 1940 1941 verify(nvpair_value_uint64(pair, &srchval) == 0); 1942 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1943 &theguid) == 0); 1944 if (theguid == srchval) 1945 return (nv); 1946 } 1947 break; 1948 1949 case DATA_TYPE_STRING: { 1950 char *srchval, *val; 1951 1952 verify(nvpair_value_string(pair, &srchval) == 0); 1953 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1954 break; 1955 1956 /* 1957 * Search for the requested value. Special cases: 1958 * 1959 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1960 * "s0" or "s0/old". The "s0" part is hidden from the user, 1961 * but included in the string, so this matches around it. 1962 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1963 * 1964 * Otherwise, all other searches are simple string compares. 1965 */ 1966 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1967 ctd_check_path(val)) { 1968 uint64_t wholedisk = 0; 1969 1970 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1971 &wholedisk); 1972 if (wholedisk) { 1973 int slen = strlen(srchval); 1974 int vlen = strlen(val); 1975 1976 if (slen != vlen - 2) 1977 break; 1978 1979 /* 1980 * make_leaf_vdev() should only set 1981 * wholedisk for ZPOOL_CONFIG_PATHs which 1982 * will include "/dev/dsk/", giving plenty of 1983 * room for the indices used next. 1984 */ 1985 ASSERT(vlen >= 6); 1986 1987 /* 1988 * strings identical except trailing "s0" 1989 */ 1990 if (strcmp(&val[vlen - 2], "s0") == 0 && 1991 strncmp(srchval, val, slen) == 0) 1992 return (nv); 1993 1994 /* 1995 * strings identical except trailing "s0/old" 1996 */ 1997 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 1998 strcmp(&srchval[slen - 4], "/old") == 0 && 1999 strncmp(srchval, val, slen - 4) == 0) 2000 return (nv); 2001 2002 break; 2003 } 2004 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2005 char *type, *idx, *end, *p; 2006 uint64_t id, vdev_id; 2007 2008 /* 2009 * Determine our vdev type, keeping in mind 2010 * that the srchval is composed of a type and 2011 * vdev id pair (i.e. mirror-4). 2012 */ 2013 if ((type = strdup(srchval)) == NULL) 2014 return (NULL); 2015 2016 if ((p = strrchr(type, '-')) == NULL) { 2017 free(type); 2018 break; 2019 } 2020 idx = p + 1; 2021 *p = '\0'; 2022 2023 /* 2024 * If the types don't match then keep looking. 2025 */ 2026 if (strncmp(val, type, strlen(val)) != 0) { 2027 free(type); 2028 break; 2029 } 2030 2031 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2032 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2033 strncmp(type, VDEV_TYPE_MIRROR, 2034 strlen(VDEV_TYPE_MIRROR)) == 0); 2035 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2036 &id) == 0); 2037 2038 errno = 0; 2039 vdev_id = strtoull(idx, &end, 10); 2040 2041 free(type); 2042 if (errno != 0) 2043 return (NULL); 2044 2045 /* 2046 * Now verify that we have the correct vdev id. 2047 */ 2048 if (vdev_id == id) 2049 return (nv); 2050 } 2051 2052 /* 2053 * Common case 2054 */ 2055 if (strcmp(srchval, val) == 0) 2056 return (nv); 2057 break; 2058 } 2059 2060 default: 2061 break; 2062 } 2063 2064 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2065 &child, &children) != 0) 2066 return (NULL); 2067 2068 for (c = 0; c < children; c++) { 2069 if ((ret = vdev_to_nvlist_iter(child[c], search, 2070 avail_spare, l2cache, NULL)) != NULL) { 2071 /* 2072 * The 'is_log' value is only set for the toplevel 2073 * vdev, not the leaf vdevs. So we always lookup the 2074 * log device from the root of the vdev tree (where 2075 * 'log' is non-NULL). 2076 */ 2077 if (log != NULL && 2078 nvlist_lookup_uint64(child[c], 2079 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2080 is_log) { 2081 *log = B_TRUE; 2082 } 2083 return (ret); 2084 } 2085 } 2086 2087 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2088 &child, &children) == 0) { 2089 for (c = 0; c < children; c++) { 2090 if ((ret = vdev_to_nvlist_iter(child[c], search, 2091 avail_spare, l2cache, NULL)) != NULL) { 2092 *avail_spare = B_TRUE; 2093 return (ret); 2094 } 2095 } 2096 } 2097 2098 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2099 &child, &children) == 0) { 2100 for (c = 0; c < children; c++) { 2101 if ((ret = vdev_to_nvlist_iter(child[c], search, 2102 avail_spare, l2cache, NULL)) != NULL) { 2103 *l2cache = B_TRUE; 2104 return (ret); 2105 } 2106 } 2107 } 2108 2109 return (NULL); 2110 } 2111 2112 /* 2113 * Given a physical path (minus the "/devices" prefix), find the 2114 * associated vdev. 2115 */ 2116 nvlist_t * 2117 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2118 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2119 { 2120 nvlist_t *search, *nvroot, *ret; 2121 2122 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2123 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2124 2125 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2126 &nvroot) == 0); 2127 2128 *avail_spare = B_FALSE; 2129 *l2cache = B_FALSE; 2130 if (log != NULL) 2131 *log = B_FALSE; 2132 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2133 nvlist_free(search); 2134 2135 return (ret); 2136 } 2137 2138 /* 2139 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2140 */ 2141 boolean_t 2142 zpool_vdev_is_interior(const char *name) 2143 { 2144 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2145 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2146 return (B_TRUE); 2147 return (B_FALSE); 2148 } 2149 2150 nvlist_t * 2151 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2152 boolean_t *l2cache, boolean_t *log) 2153 { 2154 char buf[MAXPATHLEN]; 2155 char *end; 2156 nvlist_t *nvroot, *search, *ret; 2157 uint64_t guid; 2158 2159 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2160 2161 guid = strtoull(path, &end, 10); 2162 if (guid != 0 && *end == '\0') { 2163 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2164 } else if (zpool_vdev_is_interior(path)) { 2165 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2166 } else if (path[0] != '/') { 2167 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 2168 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2169 } else { 2170 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2171 } 2172 2173 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2174 &nvroot) == 0); 2175 2176 *avail_spare = B_FALSE; 2177 *l2cache = B_FALSE; 2178 if (log != NULL) 2179 *log = B_FALSE; 2180 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2181 nvlist_free(search); 2182 2183 return (ret); 2184 } 2185 2186 static int 2187 vdev_online(nvlist_t *nv) 2188 { 2189 uint64_t ival; 2190 2191 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2192 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2193 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2194 return (0); 2195 2196 return (1); 2197 } 2198 2199 /* 2200 * Helper function for zpool_get_physpaths(). 2201 */ 2202 static int 2203 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2204 size_t *bytes_written) 2205 { 2206 size_t bytes_left, pos, rsz; 2207 char *tmppath; 2208 const char *format; 2209 2210 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2211 &tmppath) != 0) 2212 return (EZFS_NODEVICE); 2213 2214 pos = *bytes_written; 2215 bytes_left = physpath_size - pos; 2216 format = (pos == 0) ? "%s" : " %s"; 2217 2218 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2219 *bytes_written += rsz; 2220 2221 if (rsz >= bytes_left) { 2222 /* if physpath was not copied properly, clear it */ 2223 if (bytes_left != 0) { 2224 physpath[pos] = 0; 2225 } 2226 return (EZFS_NOSPC); 2227 } 2228 return (0); 2229 } 2230 2231 static int 2232 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2233 size_t *rsz, boolean_t is_spare) 2234 { 2235 char *type; 2236 int ret; 2237 2238 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2239 return (EZFS_INVALCONFIG); 2240 2241 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2242 /* 2243 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2244 * For a spare vdev, we only want to boot from the active 2245 * spare device. 2246 */ 2247 if (is_spare) { 2248 uint64_t spare = 0; 2249 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2250 &spare); 2251 if (!spare) 2252 return (EZFS_INVALCONFIG); 2253 } 2254 2255 if (vdev_online(nv)) { 2256 if ((ret = vdev_get_one_physpath(nv, physpath, 2257 phypath_size, rsz)) != 0) 2258 return (ret); 2259 } 2260 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2261 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2262 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2263 nvlist_t **child; 2264 uint_t count; 2265 int i, ret; 2266 2267 if (nvlist_lookup_nvlist_array(nv, 2268 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2269 return (EZFS_INVALCONFIG); 2270 2271 for (i = 0; i < count; i++) { 2272 ret = vdev_get_physpaths(child[i], physpath, 2273 phypath_size, rsz, is_spare); 2274 if (ret == EZFS_NOSPC) 2275 return (ret); 2276 } 2277 } 2278 2279 return (EZFS_POOL_INVALARG); 2280 } 2281 2282 /* 2283 * Get phys_path for a root pool config. 2284 * Return 0 on success; non-zero on failure. 2285 */ 2286 static int 2287 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2288 { 2289 size_t rsz; 2290 nvlist_t *vdev_root; 2291 nvlist_t **child; 2292 uint_t count; 2293 char *type; 2294 2295 rsz = 0; 2296 2297 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2298 &vdev_root) != 0) 2299 return (EZFS_INVALCONFIG); 2300 2301 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2302 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2303 &child, &count) != 0) 2304 return (EZFS_INVALCONFIG); 2305 2306 /* 2307 * root pool can not have EFI labeled disks and can only have 2308 * a single top-level vdev. 2309 */ 2310 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2311 pool_uses_efi(vdev_root)) 2312 return (EZFS_POOL_INVALARG); 2313 2314 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2315 B_FALSE); 2316 2317 /* No online devices */ 2318 if (rsz == 0) 2319 return (EZFS_NODEVICE); 2320 2321 return (0); 2322 } 2323 2324 /* 2325 * Get phys_path for a root pool 2326 * Return 0 on success; non-zero on failure. 2327 */ 2328 int 2329 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2330 { 2331 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2332 phypath_size)); 2333 } 2334 2335 /* 2336 * If the device has being dynamically expanded then we need to relabel 2337 * the disk to use the new unallocated space. 2338 */ 2339 static int 2340 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2341 { 2342 char path[MAXPATHLEN]; 2343 char errbuf[1024]; 2344 int fd, error; 2345 int (*_efi_use_whole_disk)(int); 2346 2347 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2348 "efi_use_whole_disk")) == NULL) 2349 return (-1); 2350 2351 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2352 2353 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2354 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2355 "relabel '%s': unable to open device"), name); 2356 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2357 } 2358 2359 /* 2360 * It's possible that we might encounter an error if the device 2361 * does not have any unallocated space left. If so, we simply 2362 * ignore that error and continue on. 2363 */ 2364 error = _efi_use_whole_disk(fd); 2365 (void) close(fd); 2366 if (error && error != VT_ENOSPC) { 2367 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2368 "relabel '%s': unable to read disk capacity"), name); 2369 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2370 } 2371 return (0); 2372 } 2373 2374 /* 2375 * Bring the specified vdev online. The 'flags' parameter is a set of the 2376 * ZFS_ONLINE_* flags. 2377 */ 2378 int 2379 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2380 vdev_state_t *newstate) 2381 { 2382 zfs_cmd_t zc = { 0 }; 2383 char msg[1024]; 2384 nvlist_t *tgt; 2385 boolean_t avail_spare, l2cache, islog; 2386 libzfs_handle_t *hdl = zhp->zpool_hdl; 2387 2388 if (flags & ZFS_ONLINE_EXPAND) { 2389 (void) snprintf(msg, sizeof (msg), 2390 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2391 } else { 2392 (void) snprintf(msg, sizeof (msg), 2393 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2394 } 2395 2396 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2397 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2398 &islog)) == NULL) 2399 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2400 2401 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2402 2403 if (avail_spare) 2404 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2405 2406 if (flags & ZFS_ONLINE_EXPAND || 2407 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2408 char *pathname = NULL; 2409 uint64_t wholedisk = 0; 2410 2411 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2412 &wholedisk); 2413 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2414 &pathname) == 0); 2415 2416 /* 2417 * XXX - L2ARC 1.0 devices can't support expansion. 2418 */ 2419 if (l2cache) { 2420 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2421 "cannot expand cache devices")); 2422 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2423 } 2424 2425 if (wholedisk) { 2426 pathname += strlen(DISK_ROOT) + 1; 2427 (void) zpool_relabel_disk(hdl, pathname); 2428 } 2429 } 2430 2431 zc.zc_cookie = VDEV_STATE_ONLINE; 2432 zc.zc_obj = flags; 2433 2434 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2435 if (errno == EINVAL) { 2436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2437 "from this pool into a new one. Use '%s' " 2438 "instead"), "zpool detach"); 2439 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2440 } 2441 return (zpool_standard_error(hdl, errno, msg)); 2442 } 2443 2444 *newstate = zc.zc_cookie; 2445 return (0); 2446 } 2447 2448 /* 2449 * Take the specified vdev offline 2450 */ 2451 int 2452 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2453 { 2454 zfs_cmd_t zc = { 0 }; 2455 char msg[1024]; 2456 nvlist_t *tgt; 2457 boolean_t avail_spare, l2cache; 2458 libzfs_handle_t *hdl = zhp->zpool_hdl; 2459 2460 (void) snprintf(msg, sizeof (msg), 2461 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2462 2463 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2464 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2465 NULL)) == NULL) 2466 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2467 2468 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2469 2470 if (avail_spare) 2471 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2472 2473 zc.zc_cookie = VDEV_STATE_OFFLINE; 2474 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2475 2476 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2477 return (0); 2478 2479 switch (errno) { 2480 case EBUSY: 2481 2482 /* 2483 * There are no other replicas of this device. 2484 */ 2485 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2486 2487 case EEXIST: 2488 /* 2489 * The log device has unplayed logs 2490 */ 2491 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2492 2493 default: 2494 return (zpool_standard_error(hdl, errno, msg)); 2495 } 2496 } 2497 2498 /* 2499 * Mark the given vdev faulted. 2500 */ 2501 int 2502 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2503 { 2504 zfs_cmd_t zc = { 0 }; 2505 char msg[1024]; 2506 libzfs_handle_t *hdl = zhp->zpool_hdl; 2507 2508 (void) snprintf(msg, sizeof (msg), 2509 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2510 2511 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2512 zc.zc_guid = guid; 2513 zc.zc_cookie = VDEV_STATE_FAULTED; 2514 zc.zc_obj = aux; 2515 2516 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2517 return (0); 2518 2519 switch (errno) { 2520 case EBUSY: 2521 2522 /* 2523 * There are no other replicas of this device. 2524 */ 2525 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2526 2527 default: 2528 return (zpool_standard_error(hdl, errno, msg)); 2529 } 2530 2531 } 2532 2533 /* 2534 * Mark the given vdev degraded. 2535 */ 2536 int 2537 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2538 { 2539 zfs_cmd_t zc = { 0 }; 2540 char msg[1024]; 2541 libzfs_handle_t *hdl = zhp->zpool_hdl; 2542 2543 (void) snprintf(msg, sizeof (msg), 2544 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2545 2546 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2547 zc.zc_guid = guid; 2548 zc.zc_cookie = VDEV_STATE_DEGRADED; 2549 zc.zc_obj = aux; 2550 2551 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2552 return (0); 2553 2554 return (zpool_standard_error(hdl, errno, msg)); 2555 } 2556 2557 /* 2558 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2559 * a hot spare. 2560 */ 2561 static boolean_t 2562 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2563 { 2564 nvlist_t **child; 2565 uint_t c, children; 2566 char *type; 2567 2568 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2569 &children) == 0) { 2570 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2571 &type) == 0); 2572 2573 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2574 children == 2 && child[which] == tgt) 2575 return (B_TRUE); 2576 2577 for (c = 0; c < children; c++) 2578 if (is_replacing_spare(child[c], tgt, which)) 2579 return (B_TRUE); 2580 } 2581 2582 return (B_FALSE); 2583 } 2584 2585 /* 2586 * Attach new_disk (fully described by nvroot) to old_disk. 2587 * If 'replacing' is specified, the new disk will replace the old one. 2588 */ 2589 int 2590 zpool_vdev_attach(zpool_handle_t *zhp, 2591 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2592 { 2593 zfs_cmd_t zc = { 0 }; 2594 char msg[1024]; 2595 int ret; 2596 nvlist_t *tgt; 2597 boolean_t avail_spare, l2cache, islog; 2598 uint64_t val; 2599 char *newname; 2600 nvlist_t **child; 2601 uint_t children; 2602 nvlist_t *config_root; 2603 libzfs_handle_t *hdl = zhp->zpool_hdl; 2604 boolean_t rootpool = zpool_is_bootable(zhp); 2605 2606 if (replacing) 2607 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2608 "cannot replace %s with %s"), old_disk, new_disk); 2609 else 2610 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2611 "cannot attach %s to %s"), new_disk, old_disk); 2612 2613 /* 2614 * If this is a root pool, make sure that we're not attaching an 2615 * EFI labeled device. 2616 */ 2617 if (rootpool && pool_uses_efi(nvroot)) { 2618 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2619 "EFI labeled devices are not supported on root pools.")); 2620 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2621 } 2622 2623 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2624 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2625 &islog)) == 0) 2626 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2627 2628 if (avail_spare) 2629 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2630 2631 if (l2cache) 2632 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2633 2634 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2635 zc.zc_cookie = replacing; 2636 2637 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2638 &child, &children) != 0 || children != 1) { 2639 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2640 "new device must be a single disk")); 2641 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2642 } 2643 2644 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2645 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2646 2647 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2648 return (-1); 2649 2650 /* 2651 * If the target is a hot spare that has been swapped in, we can only 2652 * replace it with another hot spare. 2653 */ 2654 if (replacing && 2655 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2656 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2657 NULL) == NULL || !avail_spare) && 2658 is_replacing_spare(config_root, tgt, 1)) { 2659 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2660 "can only be replaced by another hot spare")); 2661 free(newname); 2662 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2663 } 2664 2665 free(newname); 2666 2667 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2668 return (-1); 2669 2670 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2671 2672 zcmd_free_nvlists(&zc); 2673 2674 if (ret == 0) { 2675 if (rootpool) { 2676 /* 2677 * XXX need a better way to prevent user from 2678 * booting up a half-baked vdev. 2679 */ 2680 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2681 "sure to wait until resilver is done " 2682 "before rebooting.\n")); 2683 } 2684 return (0); 2685 } 2686 2687 switch (errno) { 2688 case ENOTSUP: 2689 /* 2690 * Can't attach to or replace this type of vdev. 2691 */ 2692 if (replacing) { 2693 uint64_t version = zpool_get_prop_int(zhp, 2694 ZPOOL_PROP_VERSION, NULL); 2695 2696 if (islog) 2697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2698 "cannot replace a log with a spare")); 2699 else if (version >= SPA_VERSION_MULTI_REPLACE) 2700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2701 "already in replacing/spare config; wait " 2702 "for completion or use 'zpool detach'")); 2703 else 2704 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2705 "cannot replace a replacing device")); 2706 } else { 2707 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2708 "can only attach to mirrors and top-level " 2709 "disks")); 2710 } 2711 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2712 break; 2713 2714 case EINVAL: 2715 /* 2716 * The new device must be a single disk. 2717 */ 2718 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2719 "new device must be a single disk")); 2720 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2721 break; 2722 2723 case EBUSY: 2724 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2725 new_disk); 2726 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2727 break; 2728 2729 case EOVERFLOW: 2730 /* 2731 * The new device is too small. 2732 */ 2733 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2734 "device is too small")); 2735 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2736 break; 2737 2738 case EDOM: 2739 /* 2740 * The new device has a different alignment requirement. 2741 */ 2742 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2743 "devices have different sector alignment")); 2744 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2745 break; 2746 2747 case ENAMETOOLONG: 2748 /* 2749 * The resulting top-level vdev spec won't fit in the label. 2750 */ 2751 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2752 break; 2753 2754 default: 2755 (void) zpool_standard_error(hdl, errno, msg); 2756 } 2757 2758 return (-1); 2759 } 2760 2761 /* 2762 * Detach the specified device. 2763 */ 2764 int 2765 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2766 { 2767 zfs_cmd_t zc = { 0 }; 2768 char msg[1024]; 2769 nvlist_t *tgt; 2770 boolean_t avail_spare, l2cache; 2771 libzfs_handle_t *hdl = zhp->zpool_hdl; 2772 2773 (void) snprintf(msg, sizeof (msg), 2774 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2775 2776 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2777 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2778 NULL)) == 0) 2779 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2780 2781 if (avail_spare) 2782 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2783 2784 if (l2cache) 2785 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2786 2787 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2788 2789 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2790 return (0); 2791 2792 switch (errno) { 2793 2794 case ENOTSUP: 2795 /* 2796 * Can't detach from this type of vdev. 2797 */ 2798 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2799 "applicable to mirror and replacing vdevs")); 2800 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2801 break; 2802 2803 case EBUSY: 2804 /* 2805 * There are no other replicas of this device. 2806 */ 2807 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2808 break; 2809 2810 default: 2811 (void) zpool_standard_error(hdl, errno, msg); 2812 } 2813 2814 return (-1); 2815 } 2816 2817 /* 2818 * Find a mirror vdev in the source nvlist. 2819 * 2820 * The mchild array contains a list of disks in one of the top-level mirrors 2821 * of the source pool. The schild array contains a list of disks that the 2822 * user specified on the command line. We loop over the mchild array to 2823 * see if any entry in the schild array matches. 2824 * 2825 * If a disk in the mchild array is found in the schild array, we return 2826 * the index of that entry. Otherwise we return -1. 2827 */ 2828 static int 2829 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2830 nvlist_t **schild, uint_t schildren) 2831 { 2832 uint_t mc; 2833 2834 for (mc = 0; mc < mchildren; mc++) { 2835 uint_t sc; 2836 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2837 mchild[mc], B_FALSE); 2838 2839 for (sc = 0; sc < schildren; sc++) { 2840 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2841 schild[sc], B_FALSE); 2842 boolean_t result = (strcmp(mpath, spath) == 0); 2843 2844 free(spath); 2845 if (result) { 2846 free(mpath); 2847 return (mc); 2848 } 2849 } 2850 2851 free(mpath); 2852 } 2853 2854 return (-1); 2855 } 2856 2857 /* 2858 * Split a mirror pool. If newroot points to null, then a new nvlist 2859 * is generated and it is the responsibility of the caller to free it. 2860 */ 2861 int 2862 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2863 nvlist_t *props, splitflags_t flags) 2864 { 2865 zfs_cmd_t zc = { 0 }; 2866 char msg[1024]; 2867 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2868 nvlist_t **varray = NULL, *zc_props = NULL; 2869 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2870 libzfs_handle_t *hdl = zhp->zpool_hdl; 2871 uint64_t vers; 2872 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2873 int retval = 0; 2874 2875 (void) snprintf(msg, sizeof (msg), 2876 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2877 2878 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2879 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2880 2881 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2882 (void) fprintf(stderr, gettext("Internal error: unable to " 2883 "retrieve pool configuration\n")); 2884 return (-1); 2885 } 2886 2887 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2888 == 0); 2889 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2890 2891 if (props) { 2892 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2893 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2894 props, vers, flags, msg)) == NULL) 2895 return (-1); 2896 } 2897 2898 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2899 &children) != 0) { 2900 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2901 "Source pool is missing vdev tree")); 2902 if (zc_props) 2903 nvlist_free(zc_props); 2904 return (-1); 2905 } 2906 2907 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2908 vcount = 0; 2909 2910 if (*newroot == NULL || 2911 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2912 &newchild, &newchildren) != 0) 2913 newchildren = 0; 2914 2915 for (c = 0; c < children; c++) { 2916 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2917 char *type; 2918 nvlist_t **mchild, *vdev; 2919 uint_t mchildren; 2920 int entry; 2921 2922 /* 2923 * Unlike cache & spares, slogs are stored in the 2924 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2925 */ 2926 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2927 &is_log); 2928 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2929 &is_hole); 2930 if (is_log || is_hole) { 2931 /* 2932 * Create a hole vdev and put it in the config. 2933 */ 2934 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2935 goto out; 2936 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2937 VDEV_TYPE_HOLE) != 0) 2938 goto out; 2939 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2940 1) != 0) 2941 goto out; 2942 if (lastlog == 0) 2943 lastlog = vcount; 2944 varray[vcount++] = vdev; 2945 continue; 2946 } 2947 lastlog = 0; 2948 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2949 == 0); 2950 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2951 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2952 "Source pool must be composed only of mirrors\n")); 2953 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2954 goto out; 2955 } 2956 2957 verify(nvlist_lookup_nvlist_array(child[c], 2958 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2959 2960 /* find or add an entry for this top-level vdev */ 2961 if (newchildren > 0 && 2962 (entry = find_vdev_entry(zhp, mchild, mchildren, 2963 newchild, newchildren)) >= 0) { 2964 /* We found a disk that the user specified. */ 2965 vdev = mchild[entry]; 2966 ++found; 2967 } else { 2968 /* User didn't specify a disk for this vdev. */ 2969 vdev = mchild[mchildren - 1]; 2970 } 2971 2972 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2973 goto out; 2974 } 2975 2976 /* did we find every disk the user specified? */ 2977 if (found != newchildren) { 2978 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2979 "include at most one disk from each mirror")); 2980 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2981 goto out; 2982 } 2983 2984 /* Prepare the nvlist for populating. */ 2985 if (*newroot == NULL) { 2986 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2987 goto out; 2988 freelist = B_TRUE; 2989 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2990 VDEV_TYPE_ROOT) != 0) 2991 goto out; 2992 } else { 2993 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 2994 } 2995 2996 /* Add all the children we found */ 2997 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 2998 lastlog == 0 ? vcount : lastlog) != 0) 2999 goto out; 3000 3001 /* 3002 * If we're just doing a dry run, exit now with success. 3003 */ 3004 if (flags.dryrun) { 3005 memory_err = B_FALSE; 3006 freelist = B_FALSE; 3007 goto out; 3008 } 3009 3010 /* now build up the config list & call the ioctl */ 3011 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3012 goto out; 3013 3014 if (nvlist_add_nvlist(newconfig, 3015 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3016 nvlist_add_string(newconfig, 3017 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3018 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3019 goto out; 3020 3021 /* 3022 * The new pool is automatically part of the namespace unless we 3023 * explicitly export it. 3024 */ 3025 if (!flags.import) 3026 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3027 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3028 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3029 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3030 goto out; 3031 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3032 goto out; 3033 3034 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3035 retval = zpool_standard_error(hdl, errno, msg); 3036 goto out; 3037 } 3038 3039 freelist = B_FALSE; 3040 memory_err = B_FALSE; 3041 3042 out: 3043 if (varray != NULL) { 3044 int v; 3045 3046 for (v = 0; v < vcount; v++) 3047 nvlist_free(varray[v]); 3048 free(varray); 3049 } 3050 zcmd_free_nvlists(&zc); 3051 if (zc_props) 3052 nvlist_free(zc_props); 3053 if (newconfig) 3054 nvlist_free(newconfig); 3055 if (freelist) { 3056 nvlist_free(*newroot); 3057 *newroot = NULL; 3058 } 3059 3060 if (retval != 0) 3061 return (retval); 3062 3063 if (memory_err) 3064 return (no_memory(hdl)); 3065 3066 return (0); 3067 } 3068 3069 /* 3070 * Remove the given device. Currently, this is supported only for hot spares 3071 * and level 2 cache devices. 3072 */ 3073 int 3074 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3075 { 3076 zfs_cmd_t zc = { 0 }; 3077 char msg[1024]; 3078 nvlist_t *tgt; 3079 boolean_t avail_spare, l2cache, islog; 3080 libzfs_handle_t *hdl = zhp->zpool_hdl; 3081 uint64_t version; 3082 3083 (void) snprintf(msg, sizeof (msg), 3084 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3085 3086 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3087 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3088 &islog)) == 0) 3089 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3090 /* 3091 * XXX - this should just go away. 3092 */ 3093 if (!avail_spare && !l2cache && !islog) { 3094 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3095 "only inactive hot spares, cache, top-level, " 3096 "or log devices can be removed")); 3097 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3098 } 3099 3100 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3101 if (islog && version < SPA_VERSION_HOLES) { 3102 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3103 "pool must be upgrade to support log removal")); 3104 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3105 } 3106 3107 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3108 3109 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3110 return (0); 3111 3112 return (zpool_standard_error(hdl, errno, msg)); 3113 } 3114 3115 /* 3116 * Clear the errors for the pool, or the particular device if specified. 3117 */ 3118 int 3119 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3120 { 3121 zfs_cmd_t zc = { 0 }; 3122 char msg[1024]; 3123 nvlist_t *tgt; 3124 zpool_rewind_policy_t policy; 3125 boolean_t avail_spare, l2cache; 3126 libzfs_handle_t *hdl = zhp->zpool_hdl; 3127 nvlist_t *nvi = NULL; 3128 int error; 3129 3130 if (path) 3131 (void) snprintf(msg, sizeof (msg), 3132 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3133 path); 3134 else 3135 (void) snprintf(msg, sizeof (msg), 3136 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3137 zhp->zpool_name); 3138 3139 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3140 if (path) { 3141 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3142 &l2cache, NULL)) == 0) 3143 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3144 3145 /* 3146 * Don't allow error clearing for hot spares. Do allow 3147 * error clearing for l2cache devices. 3148 */ 3149 if (avail_spare) 3150 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3151 3152 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3153 &zc.zc_guid) == 0); 3154 } 3155 3156 zpool_get_rewind_policy(rewindnvl, &policy); 3157 zc.zc_cookie = policy.zrp_request; 3158 3159 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3160 return (-1); 3161 3162 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3163 return (-1); 3164 3165 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3166 errno == ENOMEM) { 3167 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3168 zcmd_free_nvlists(&zc); 3169 return (-1); 3170 } 3171 } 3172 3173 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3174 errno != EPERM && errno != EACCES)) { 3175 if (policy.zrp_request & 3176 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3177 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3178 zpool_rewind_exclaim(hdl, zc.zc_name, 3179 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3180 nvi); 3181 nvlist_free(nvi); 3182 } 3183 zcmd_free_nvlists(&zc); 3184 return (0); 3185 } 3186 3187 zcmd_free_nvlists(&zc); 3188 return (zpool_standard_error(hdl, errno, msg)); 3189 } 3190 3191 /* 3192 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3193 */ 3194 int 3195 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3196 { 3197 zfs_cmd_t zc = { 0 }; 3198 char msg[1024]; 3199 libzfs_handle_t *hdl = zhp->zpool_hdl; 3200 3201 (void) snprintf(msg, sizeof (msg), 3202 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3203 guid); 3204 3205 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3206 zc.zc_guid = guid; 3207 zc.zc_cookie = ZPOOL_NO_REWIND; 3208 3209 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3210 return (0); 3211 3212 return (zpool_standard_error(hdl, errno, msg)); 3213 } 3214 3215 /* 3216 * Change the GUID for a pool. 3217 */ 3218 int 3219 zpool_reguid(zpool_handle_t *zhp) 3220 { 3221 char msg[1024]; 3222 libzfs_handle_t *hdl = zhp->zpool_hdl; 3223 zfs_cmd_t zc = { 0 }; 3224 3225 (void) snprintf(msg, sizeof (msg), 3226 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3227 3228 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3229 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3230 return (0); 3231 3232 return (zpool_standard_error(hdl, errno, msg)); 3233 } 3234 3235 /* 3236 * Reopen the pool. 3237 */ 3238 int 3239 zpool_reopen(zpool_handle_t *zhp) 3240 { 3241 zfs_cmd_t zc = { 0 }; 3242 char msg[1024]; 3243 libzfs_handle_t *hdl = zhp->zpool_hdl; 3244 3245 (void) snprintf(msg, sizeof (msg), 3246 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3247 zhp->zpool_name); 3248 3249 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3250 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3251 return (0); 3252 return (zpool_standard_error(hdl, errno, msg)); 3253 } 3254 3255 /* 3256 * Convert from a devid string to a path. 3257 */ 3258 static char * 3259 devid_to_path(char *devid_str) 3260 { 3261 ddi_devid_t devid; 3262 char *minor; 3263 char *path; 3264 devid_nmlist_t *list = NULL; 3265 int ret; 3266 3267 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3268 return (NULL); 3269 3270 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3271 3272 devid_str_free(minor); 3273 devid_free(devid); 3274 3275 if (ret != 0) 3276 return (NULL); 3277 3278 if ((path = strdup(list[0].devname)) == NULL) 3279 return (NULL); 3280 3281 devid_free_nmlist(list); 3282 3283 return (path); 3284 } 3285 3286 /* 3287 * Convert from a path to a devid string. 3288 */ 3289 static char * 3290 path_to_devid(const char *path) 3291 { 3292 int fd; 3293 ddi_devid_t devid; 3294 char *minor, *ret; 3295 3296 if ((fd = open(path, O_RDONLY)) < 0) 3297 return (NULL); 3298 3299 minor = NULL; 3300 ret = NULL; 3301 if (devid_get(fd, &devid) == 0) { 3302 if (devid_get_minor_name(fd, &minor) == 0) 3303 ret = devid_str_encode(devid, minor); 3304 if (minor != NULL) 3305 devid_str_free(minor); 3306 devid_free(devid); 3307 } 3308 (void) close(fd); 3309 3310 return (ret); 3311 } 3312 3313 /* 3314 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3315 * ignore any failure here, since a common case is for an unprivileged user to 3316 * type 'zpool status', and we'll display the correct information anyway. 3317 */ 3318 static void 3319 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3320 { 3321 zfs_cmd_t zc = { 0 }; 3322 3323 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3324 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3325 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3326 &zc.zc_guid) == 0); 3327 3328 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3329 } 3330 3331 /* 3332 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3333 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3334 * We also check if this is a whole disk, in which case we strip off the 3335 * trailing 's0' slice name. 3336 * 3337 * This routine is also responsible for identifying when disks have been 3338 * reconfigured in a new location. The kernel will have opened the device by 3339 * devid, but the path will still refer to the old location. To catch this, we 3340 * first do a path -> devid translation (which is fast for the common case). If 3341 * the devid matches, we're done. If not, we do a reverse devid -> path 3342 * translation and issue the appropriate ioctl() to update the path of the vdev. 3343 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3344 * of these checks. 3345 */ 3346 char * 3347 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3348 boolean_t verbose) 3349 { 3350 char *path, *devid; 3351 uint64_t value; 3352 char buf[64]; 3353 vdev_stat_t *vs; 3354 uint_t vsc; 3355 3356 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3357 &value) == 0) { 3358 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3359 &value) == 0); 3360 (void) snprintf(buf, sizeof (buf), "%llu", 3361 (u_longlong_t)value); 3362 path = buf; 3363 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3364 3365 /* 3366 * If the device is dead (faulted, offline, etc) then don't 3367 * bother opening it. Otherwise we may be forcing the user to 3368 * open a misbehaving device, which can have undesirable 3369 * effects. 3370 */ 3371 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3372 (uint64_t **)&vs, &vsc) != 0 || 3373 vs->vs_state >= VDEV_STATE_DEGRADED) && 3374 zhp != NULL && 3375 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3376 /* 3377 * Determine if the current path is correct. 3378 */ 3379 char *newdevid = path_to_devid(path); 3380 3381 if (newdevid == NULL || 3382 strcmp(devid, newdevid) != 0) { 3383 char *newpath; 3384 3385 if ((newpath = devid_to_path(devid)) != NULL) { 3386 /* 3387 * Update the path appropriately. 3388 */ 3389 set_path(zhp, nv, newpath); 3390 if (nvlist_add_string(nv, 3391 ZPOOL_CONFIG_PATH, newpath) == 0) 3392 verify(nvlist_lookup_string(nv, 3393 ZPOOL_CONFIG_PATH, 3394 &path) == 0); 3395 free(newpath); 3396 } 3397 } 3398 3399 if (newdevid) 3400 devid_str_free(newdevid); 3401 } 3402 3403 if (strncmp(path, "/dev/dsk/", 9) == 0) 3404 path += 9; 3405 3406 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3407 &value) == 0 && value) { 3408 int pathlen = strlen(path); 3409 char *tmp = zfs_strdup(hdl, path); 3410 3411 /* 3412 * If it starts with c#, and ends with "s0", chop 3413 * the "s0" off, or if it ends with "s0/old", remove 3414 * the "s0" from the middle. 3415 */ 3416 if (CTD_CHECK(tmp)) { 3417 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3418 tmp[pathlen - 2] = '\0'; 3419 } else if (pathlen > 6 && 3420 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3421 (void) strcpy(&tmp[pathlen - 6], 3422 "/old"); 3423 } 3424 } 3425 return (tmp); 3426 } 3427 } else { 3428 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3429 3430 /* 3431 * If it's a raidz device, we need to stick in the parity level. 3432 */ 3433 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3434 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3435 &value) == 0); 3436 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3437 (u_longlong_t)value); 3438 path = buf; 3439 } 3440 3441 /* 3442 * We identify each top-level vdev by using a <type-id> 3443 * naming convention. 3444 */ 3445 if (verbose) { 3446 uint64_t id; 3447 3448 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3449 &id) == 0); 3450 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3451 (u_longlong_t)id); 3452 path = buf; 3453 } 3454 } 3455 3456 return (zfs_strdup(hdl, path)); 3457 } 3458 3459 static int 3460 zbookmark_compare(const void *a, const void *b) 3461 { 3462 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3463 } 3464 3465 /* 3466 * Retrieve the persistent error log, uniquify the members, and return to the 3467 * caller. 3468 */ 3469 int 3470 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3471 { 3472 zfs_cmd_t zc = { 0 }; 3473 uint64_t count; 3474 zbookmark_phys_t *zb = NULL; 3475 int i; 3476 3477 /* 3478 * Retrieve the raw error list from the kernel. If the number of errors 3479 * has increased, allocate more space and continue until we get the 3480 * entire list. 3481 */ 3482 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3483 &count) == 0); 3484 if (count == 0) 3485 return (0); 3486 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3487 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3488 return (-1); 3489 zc.zc_nvlist_dst_size = count; 3490 (void) strcpy(zc.zc_name, zhp->zpool_name); 3491 for (;;) { 3492 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3493 &zc) != 0) { 3494 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3495 if (errno == ENOMEM) { 3496 void *dst; 3497 3498 count = zc.zc_nvlist_dst_size; 3499 dst = zfs_alloc(zhp->zpool_hdl, count * 3500 sizeof (zbookmark_phys_t)); 3501 if (dst == NULL) 3502 return (-1); 3503 zc.zc_nvlist_dst = (uintptr_t)dst; 3504 } else { 3505 return (-1); 3506 } 3507 } else { 3508 break; 3509 } 3510 } 3511 3512 /* 3513 * Sort the resulting bookmarks. This is a little confusing due to the 3514 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3515 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3516 * _not_ copied as part of the process. So we point the start of our 3517 * array appropriate and decrement the total number of elements. 3518 */ 3519 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3520 zc.zc_nvlist_dst_size; 3521 count -= zc.zc_nvlist_dst_size; 3522 3523 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_compare); 3524 3525 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3526 3527 /* 3528 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3529 */ 3530 for (i = 0; i < count; i++) { 3531 nvlist_t *nv; 3532 3533 /* ignoring zb_blkid and zb_level for now */ 3534 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3535 zb[i-1].zb_object == zb[i].zb_object) 3536 continue; 3537 3538 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3539 goto nomem; 3540 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3541 zb[i].zb_objset) != 0) { 3542 nvlist_free(nv); 3543 goto nomem; 3544 } 3545 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3546 zb[i].zb_object) != 0) { 3547 nvlist_free(nv); 3548 goto nomem; 3549 } 3550 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3551 nvlist_free(nv); 3552 goto nomem; 3553 } 3554 nvlist_free(nv); 3555 } 3556 3557 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3558 return (0); 3559 3560 nomem: 3561 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3562 return (no_memory(zhp->zpool_hdl)); 3563 } 3564 3565 /* 3566 * Upgrade a ZFS pool to the latest on-disk version. 3567 */ 3568 int 3569 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3570 { 3571 zfs_cmd_t zc = { 0 }; 3572 libzfs_handle_t *hdl = zhp->zpool_hdl; 3573 3574 (void) strcpy(zc.zc_name, zhp->zpool_name); 3575 zc.zc_cookie = new_version; 3576 3577 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3578 return (zpool_standard_error_fmt(hdl, errno, 3579 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3580 zhp->zpool_name)); 3581 return (0); 3582 } 3583 3584 void 3585 zfs_save_arguments(int argc, char **argv, char *string, int len) 3586 { 3587 (void) strlcpy(string, basename(argv[0]), len); 3588 for (int i = 1; i < argc; i++) { 3589 (void) strlcat(string, " ", len); 3590 (void) strlcat(string, argv[i], len); 3591 } 3592 } 3593 3594 int 3595 zpool_log_history(libzfs_handle_t *hdl, const char *message) 3596 { 3597 zfs_cmd_t zc = { 0 }; 3598 nvlist_t *args; 3599 int err; 3600 3601 args = fnvlist_alloc(); 3602 fnvlist_add_string(args, "message", message); 3603 err = zcmd_write_src_nvlist(hdl, &zc, args); 3604 if (err == 0) 3605 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3606 nvlist_free(args); 3607 zcmd_free_nvlists(&zc); 3608 return (err); 3609 } 3610 3611 /* 3612 * Perform ioctl to get some command history of a pool. 3613 * 3614 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3615 * logical offset of the history buffer to start reading from. 3616 * 3617 * Upon return, 'off' is the next logical offset to read from and 3618 * 'len' is the actual amount of bytes read into 'buf'. 3619 */ 3620 static int 3621 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3622 { 3623 zfs_cmd_t zc = { 0 }; 3624 libzfs_handle_t *hdl = zhp->zpool_hdl; 3625 3626 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3627 3628 zc.zc_history = (uint64_t)(uintptr_t)buf; 3629 zc.zc_history_len = *len; 3630 zc.zc_history_offset = *off; 3631 3632 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3633 switch (errno) { 3634 case EPERM: 3635 return (zfs_error_fmt(hdl, EZFS_PERM, 3636 dgettext(TEXT_DOMAIN, 3637 "cannot show history for pool '%s'"), 3638 zhp->zpool_name)); 3639 case ENOENT: 3640 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3641 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3642 "'%s'"), zhp->zpool_name)); 3643 case ENOTSUP: 3644 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3645 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3646 "'%s', pool must be upgraded"), zhp->zpool_name)); 3647 default: 3648 return (zpool_standard_error_fmt(hdl, errno, 3649 dgettext(TEXT_DOMAIN, 3650 "cannot get history for '%s'"), zhp->zpool_name)); 3651 } 3652 } 3653 3654 *len = zc.zc_history_len; 3655 *off = zc.zc_history_offset; 3656 3657 return (0); 3658 } 3659 3660 /* 3661 * Process the buffer of nvlists, unpacking and storing each nvlist record 3662 * into 'records'. 'leftover' is set to the number of bytes that weren't 3663 * processed as there wasn't a complete record. 3664 */ 3665 int 3666 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3667 nvlist_t ***records, uint_t *numrecords) 3668 { 3669 uint64_t reclen; 3670 nvlist_t *nv; 3671 int i; 3672 3673 while (bytes_read > sizeof (reclen)) { 3674 3675 /* get length of packed record (stored as little endian) */ 3676 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3677 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3678 3679 if (bytes_read < sizeof (reclen) + reclen) 3680 break; 3681 3682 /* unpack record */ 3683 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3684 return (ENOMEM); 3685 bytes_read -= sizeof (reclen) + reclen; 3686 buf += sizeof (reclen) + reclen; 3687 3688 /* add record to nvlist array */ 3689 (*numrecords)++; 3690 if (ISP2(*numrecords + 1)) { 3691 *records = realloc(*records, 3692 *numrecords * 2 * sizeof (nvlist_t *)); 3693 } 3694 (*records)[*numrecords - 1] = nv; 3695 } 3696 3697 *leftover = bytes_read; 3698 return (0); 3699 } 3700 3701 #define HIS_BUF_LEN (128*1024) 3702 3703 /* 3704 * Retrieve the command history of a pool. 3705 */ 3706 int 3707 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3708 { 3709 char buf[HIS_BUF_LEN]; 3710 uint64_t off = 0; 3711 nvlist_t **records = NULL; 3712 uint_t numrecords = 0; 3713 int err, i; 3714 3715 do { 3716 uint64_t bytes_read = sizeof (buf); 3717 uint64_t leftover; 3718 3719 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3720 break; 3721 3722 /* if nothing else was read in, we're at EOF, just return */ 3723 if (!bytes_read) 3724 break; 3725 3726 if ((err = zpool_history_unpack(buf, bytes_read, 3727 &leftover, &records, &numrecords)) != 0) 3728 break; 3729 off -= leftover; 3730 3731 /* CONSTCOND */ 3732 } while (1); 3733 3734 if (!err) { 3735 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3736 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3737 records, numrecords) == 0); 3738 } 3739 for (i = 0; i < numrecords; i++) 3740 nvlist_free(records[i]); 3741 free(records); 3742 3743 return (err); 3744 } 3745 3746 void 3747 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3748 char *pathname, size_t len) 3749 { 3750 zfs_cmd_t zc = { 0 }; 3751 boolean_t mounted = B_FALSE; 3752 char *mntpnt = NULL; 3753 char dsname[MAXNAMELEN]; 3754 3755 if (dsobj == 0) { 3756 /* special case for the MOS */ 3757 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3758 return; 3759 } 3760 3761 /* get the dataset's name */ 3762 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3763 zc.zc_obj = dsobj; 3764 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3765 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3766 /* just write out a path of two object numbers */ 3767 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3768 dsobj, obj); 3769 return; 3770 } 3771 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3772 3773 /* find out if the dataset is mounted */ 3774 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3775 3776 /* get the corrupted object's path */ 3777 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3778 zc.zc_obj = obj; 3779 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3780 &zc) == 0) { 3781 if (mounted) { 3782 (void) snprintf(pathname, len, "%s%s", mntpnt, 3783 zc.zc_value); 3784 } else { 3785 (void) snprintf(pathname, len, "%s:%s", 3786 dsname, zc.zc_value); 3787 } 3788 } else { 3789 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3790 } 3791 free(mntpnt); 3792 } 3793 3794 /* 3795 * Read the EFI label from the config, if a label does not exist then 3796 * pass back the error to the caller. If the caller has passed a non-NULL 3797 * diskaddr argument then we set it to the starting address of the EFI 3798 * partition. 3799 */ 3800 static int 3801 read_efi_label(nvlist_t *config, diskaddr_t *sb) 3802 { 3803 char *path; 3804 int fd; 3805 char diskname[MAXPATHLEN]; 3806 int err = -1; 3807 3808 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3809 return (err); 3810 3811 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3812 strrchr(path, '/')); 3813 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3814 struct dk_gpt *vtoc; 3815 3816 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3817 if (sb != NULL) 3818 *sb = vtoc->efi_parts[0].p_start; 3819 efi_free(vtoc); 3820 } 3821 (void) close(fd); 3822 } 3823 return (err); 3824 } 3825 3826 /* 3827 * determine where a partition starts on a disk in the current 3828 * configuration 3829 */ 3830 static diskaddr_t 3831 find_start_block(nvlist_t *config) 3832 { 3833 nvlist_t **child; 3834 uint_t c, children; 3835 diskaddr_t sb = MAXOFFSET_T; 3836 uint64_t wholedisk; 3837 3838 if (nvlist_lookup_nvlist_array(config, 3839 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3840 if (nvlist_lookup_uint64(config, 3841 ZPOOL_CONFIG_WHOLE_DISK, 3842 &wholedisk) != 0 || !wholedisk) { 3843 return (MAXOFFSET_T); 3844 } 3845 if (read_efi_label(config, &sb) < 0) 3846 sb = MAXOFFSET_T; 3847 return (sb); 3848 } 3849 3850 for (c = 0; c < children; c++) { 3851 sb = find_start_block(child[c]); 3852 if (sb != MAXOFFSET_T) { 3853 return (sb); 3854 } 3855 } 3856 return (MAXOFFSET_T); 3857 } 3858 3859 /* 3860 * Label an individual disk. The name provided is the short name, 3861 * stripped of any leading /dev path. 3862 */ 3863 int 3864 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) 3865 { 3866 char path[MAXPATHLEN]; 3867 struct dk_gpt *vtoc; 3868 int fd; 3869 size_t resv = EFI_MIN_RESV_SIZE; 3870 uint64_t slice_size; 3871 diskaddr_t start_block; 3872 char errbuf[1024]; 3873 3874 /* prepare an error message just in case */ 3875 (void) snprintf(errbuf, sizeof (errbuf), 3876 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3877 3878 if (zhp) { 3879 nvlist_t *nvroot; 3880 3881 if (zpool_is_bootable(zhp)) { 3882 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3883 "EFI labeled devices are not supported on root " 3884 "pools.")); 3885 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3886 } 3887 3888 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3889 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3890 3891 if (zhp->zpool_start_block == 0) 3892 start_block = find_start_block(nvroot); 3893 else 3894 start_block = zhp->zpool_start_block; 3895 zhp->zpool_start_block = start_block; 3896 } else { 3897 /* new pool */ 3898 start_block = NEW_START_BLOCK; 3899 } 3900 3901 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3902 BACKUP_SLICE); 3903 3904 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3905 /* 3906 * This shouldn't happen. We've long since verified that this 3907 * is a valid device. 3908 */ 3909 zfs_error_aux(hdl, 3910 dgettext(TEXT_DOMAIN, "unable to open device")); 3911 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3912 } 3913 3914 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3915 /* 3916 * The only way this can fail is if we run out of memory, or we 3917 * were unable to read the disk's capacity 3918 */ 3919 if (errno == ENOMEM) 3920 (void) no_memory(hdl); 3921 3922 (void) close(fd); 3923 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3924 "unable to read disk capacity"), name); 3925 3926 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3927 } 3928 3929 slice_size = vtoc->efi_last_u_lba + 1; 3930 slice_size -= EFI_MIN_RESV_SIZE; 3931 if (start_block == MAXOFFSET_T) 3932 start_block = NEW_START_BLOCK; 3933 slice_size -= start_block; 3934 3935 vtoc->efi_parts[0].p_start = start_block; 3936 vtoc->efi_parts[0].p_size = slice_size; 3937 3938 /* 3939 * Why we use V_USR: V_BACKUP confuses users, and is considered 3940 * disposable by some EFI utilities (since EFI doesn't have a backup 3941 * slice). V_UNASSIGNED is supposed to be used only for zero size 3942 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3943 * etc. were all pretty specific. V_USR is as close to reality as we 3944 * can get, in the absence of V_OTHER. 3945 */ 3946 vtoc->efi_parts[0].p_tag = V_USR; 3947 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3948 3949 vtoc->efi_parts[8].p_start = slice_size + start_block; 3950 vtoc->efi_parts[8].p_size = resv; 3951 vtoc->efi_parts[8].p_tag = V_RESERVED; 3952 3953 if (efi_write(fd, vtoc) != 0) { 3954 /* 3955 * Some block drivers (like pcata) may not support EFI 3956 * GPT labels. Print out a helpful error message dir- 3957 * ecting the user to manually label the disk and give 3958 * a specific slice. 3959 */ 3960 (void) close(fd); 3961 efi_free(vtoc); 3962 3963 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3964 "try using fdisk(1M) and then provide a specific slice")); 3965 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3966 } 3967 3968 (void) close(fd); 3969 efi_free(vtoc); 3970 return (0); 3971 } 3972 3973 static boolean_t 3974 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3975 { 3976 char *type; 3977 nvlist_t **child; 3978 uint_t children, c; 3979 3980 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3981 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 3982 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3983 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3985 "vdev type '%s' is not supported"), type); 3986 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3987 return (B_FALSE); 3988 } 3989 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3990 &child, &children) == 0) { 3991 for (c = 0; c < children; c++) { 3992 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3993 return (B_FALSE); 3994 } 3995 } 3996 return (B_TRUE); 3997 } 3998 3999 /* 4000 * Check if this zvol is allowable for use as a dump device; zero if 4001 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4002 * 4003 * Allowable storage configurations include mirrors, all raidz variants, and 4004 * pools with log, cache, and spare devices. Pools which are backed by files or 4005 * have missing/hole vdevs are not suitable. 4006 */ 4007 int 4008 zvol_check_dump_config(char *arg) 4009 { 4010 zpool_handle_t *zhp = NULL; 4011 nvlist_t *config, *nvroot; 4012 char *p, *volname; 4013 nvlist_t **top; 4014 uint_t toplevels; 4015 libzfs_handle_t *hdl; 4016 char errbuf[1024]; 4017 char poolname[ZPOOL_MAXNAMELEN]; 4018 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4019 int ret = 1; 4020 4021 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4022 return (-1); 4023 } 4024 4025 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4026 "dump is not supported on device '%s'"), arg); 4027 4028 if ((hdl = libzfs_init()) == NULL) 4029 return (1); 4030 libzfs_print_on_error(hdl, B_TRUE); 4031 4032 volname = arg + pathlen; 4033 4034 /* check the configuration of the pool */ 4035 if ((p = strchr(volname, '/')) == NULL) { 4036 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4037 "malformed dataset name")); 4038 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4039 return (1); 4040 } else if (p - volname >= ZFS_MAXNAMELEN) { 4041 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4042 "dataset name is too long")); 4043 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4044 return (1); 4045 } else { 4046 (void) strncpy(poolname, volname, p - volname); 4047 poolname[p - volname] = '\0'; 4048 } 4049 4050 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4051 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4052 "could not open pool '%s'"), poolname); 4053 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4054 goto out; 4055 } 4056 config = zpool_get_config(zhp, NULL); 4057 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4058 &nvroot) != 0) { 4059 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4060 "could not obtain vdev configuration for '%s'"), poolname); 4061 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4062 goto out; 4063 } 4064 4065 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4066 &top, &toplevels) == 0); 4067 4068 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4069 goto out; 4070 } 4071 ret = 0; 4072 4073 out: 4074 if (zhp) 4075 zpool_close(zhp); 4076 libzfs_fini(hdl); 4077 return (ret); 4078 } 4079