1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 */ 28 29 #include <ctype.h> 30 #include <errno.h> 31 #include <devid.h> 32 #include <fcntl.h> 33 #include <libintl.h> 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <strings.h> 37 #include <unistd.h> 38 #include <libgen.h> 39 #include <sys/efi_partition.h> 40 #include <sys/vtoc.h> 41 #include <sys/zfs_ioctl.h> 42 #include <dlfcn.h> 43 44 #include "zfs_namecheck.h" 45 #include "zfs_prop.h" 46 #include "libzfs_impl.h" 47 #include "zfs_comutil.h" 48 #include "zfeature_common.h" 49 50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52 #define DISK_ROOT "/dev/dsk" 53 #define RDISK_ROOT "/dev/rdsk" 54 #define BACKUP_SLICE "s2" 55 56 typedef struct prop_flags { 57 int create:1; /* Validate property on creation */ 58 int import:1; /* Validate property on import */ 59 } prop_flags_t; 60 61 /* 62 * ==================================================================== 63 * zpool property functions 64 * ==================================================================== 65 */ 66 67 static int 68 zpool_get_all_props(zpool_handle_t *zhp) 69 { 70 zfs_cmd_t zc = { 0 }; 71 libzfs_handle_t *hdl = zhp->zpool_hdl; 72 73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 74 75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 76 return (-1); 77 78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 79 if (errno == ENOMEM) { 80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 81 zcmd_free_nvlists(&zc); 82 return (-1); 83 } 84 } else { 85 zcmd_free_nvlists(&zc); 86 return (-1); 87 } 88 } 89 90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 91 zcmd_free_nvlists(&zc); 92 return (-1); 93 } 94 95 zcmd_free_nvlists(&zc); 96 97 return (0); 98 } 99 100 static int 101 zpool_props_refresh(zpool_handle_t *zhp) 102 { 103 nvlist_t *old_props; 104 105 old_props = zhp->zpool_props; 106 107 if (zpool_get_all_props(zhp) != 0) 108 return (-1); 109 110 nvlist_free(old_props); 111 return (0); 112 } 113 114 static char * 115 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 116 zprop_source_t *src) 117 { 118 nvlist_t *nv, *nvl; 119 uint64_t ival; 120 char *value; 121 zprop_source_t source; 122 123 nvl = zhp->zpool_props; 124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 126 source = ival; 127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 128 } else { 129 source = ZPROP_SRC_DEFAULT; 130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 131 value = "-"; 132 } 133 134 if (src) 135 *src = source; 136 137 return (value); 138 } 139 140 uint64_t 141 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 142 { 143 nvlist_t *nv, *nvl; 144 uint64_t value; 145 zprop_source_t source; 146 147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 148 /* 149 * zpool_get_all_props() has most likely failed because 150 * the pool is faulted, but if all we need is the top level 151 * vdev's guid then get it from the zhp config nvlist. 152 */ 153 if ((prop == ZPOOL_PROP_GUID) && 154 (nvlist_lookup_nvlist(zhp->zpool_config, 155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 157 == 0)) { 158 return (value); 159 } 160 return (zpool_prop_default_numeric(prop)); 161 } 162 163 nvl = zhp->zpool_props; 164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 166 source = value; 167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 168 } else { 169 source = ZPROP_SRC_DEFAULT; 170 value = zpool_prop_default_numeric(prop); 171 } 172 173 if (src) 174 *src = source; 175 176 return (value); 177 } 178 179 /* 180 * Map VDEV STATE to printed strings. 181 */ 182 char * 183 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 184 { 185 switch (state) { 186 case VDEV_STATE_CLOSED: 187 case VDEV_STATE_OFFLINE: 188 return (gettext("OFFLINE")); 189 case VDEV_STATE_REMOVED: 190 return (gettext("REMOVED")); 191 case VDEV_STATE_CANT_OPEN: 192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 193 return (gettext("FAULTED")); 194 else if (aux == VDEV_AUX_SPLIT_POOL) 195 return (gettext("SPLIT")); 196 else 197 return (gettext("UNAVAIL")); 198 case VDEV_STATE_FAULTED: 199 return (gettext("FAULTED")); 200 case VDEV_STATE_DEGRADED: 201 return (gettext("DEGRADED")); 202 case VDEV_STATE_HEALTHY: 203 return (gettext("ONLINE")); 204 } 205 206 return (gettext("UNKNOWN")); 207 } 208 209 /* 210 * Get a zpool property value for 'prop' and return the value in 211 * a pre-allocated buffer. 212 */ 213 int 214 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 215 zprop_source_t *srctype, boolean_t literal) 216 { 217 uint64_t intval; 218 const char *strval; 219 zprop_source_t src = ZPROP_SRC_NONE; 220 nvlist_t *nvroot; 221 vdev_stat_t *vs; 222 uint_t vsc; 223 224 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 225 switch (prop) { 226 case ZPOOL_PROP_NAME: 227 (void) strlcpy(buf, zpool_get_name(zhp), len); 228 break; 229 230 case ZPOOL_PROP_HEALTH: 231 (void) strlcpy(buf, "FAULTED", len); 232 break; 233 234 case ZPOOL_PROP_GUID: 235 intval = zpool_get_prop_int(zhp, prop, &src); 236 (void) snprintf(buf, len, "%llu", intval); 237 break; 238 239 case ZPOOL_PROP_ALTROOT: 240 case ZPOOL_PROP_CACHEFILE: 241 case ZPOOL_PROP_COMMENT: 242 if (zhp->zpool_props != NULL || 243 zpool_get_all_props(zhp) == 0) { 244 (void) strlcpy(buf, 245 zpool_get_prop_string(zhp, prop, &src), 246 len); 247 break; 248 } 249 /* FALLTHROUGH */ 250 default: 251 (void) strlcpy(buf, "-", len); 252 break; 253 } 254 255 if (srctype != NULL) 256 *srctype = src; 257 return (0); 258 } 259 260 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 261 prop != ZPOOL_PROP_NAME) 262 return (-1); 263 264 switch (zpool_prop_get_type(prop)) { 265 case PROP_TYPE_STRING: 266 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 267 len); 268 break; 269 270 case PROP_TYPE_NUMBER: 271 intval = zpool_get_prop_int(zhp, prop, &src); 272 273 switch (prop) { 274 case ZPOOL_PROP_SIZE: 275 case ZPOOL_PROP_ALLOCATED: 276 case ZPOOL_PROP_FREE: 277 case ZPOOL_PROP_FREEING: 278 case ZPOOL_PROP_LEAKED: 279 if (literal) { 280 (void) snprintf(buf, len, "%llu", 281 (u_longlong_t)intval); 282 } else { 283 (void) zfs_nicenum(intval, buf, len); 284 } 285 break; 286 case ZPOOL_PROP_EXPANDSZ: 287 if (intval == 0) { 288 (void) strlcpy(buf, "-", len); 289 } else if (literal) { 290 (void) snprintf(buf, len, "%llu", 291 (u_longlong_t)intval); 292 } else { 293 (void) zfs_nicenum(intval, buf, len); 294 } 295 break; 296 case ZPOOL_PROP_CAPACITY: 297 if (literal) { 298 (void) snprintf(buf, len, "%llu", 299 (u_longlong_t)intval); 300 } else { 301 (void) snprintf(buf, len, "%llu%%", 302 (u_longlong_t)intval); 303 } 304 break; 305 case ZPOOL_PROP_FRAGMENTATION: 306 if (intval == UINT64_MAX) { 307 (void) strlcpy(buf, "-", len); 308 } else { 309 (void) snprintf(buf, len, "%llu%%", 310 (u_longlong_t)intval); 311 } 312 break; 313 case ZPOOL_PROP_DEDUPRATIO: 314 (void) snprintf(buf, len, "%llu.%02llux", 315 (u_longlong_t)(intval / 100), 316 (u_longlong_t)(intval % 100)); 317 break; 318 case ZPOOL_PROP_HEALTH: 319 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 320 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 321 verify(nvlist_lookup_uint64_array(nvroot, 322 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 323 == 0); 324 325 (void) strlcpy(buf, zpool_state_to_name(intval, 326 vs->vs_aux), len); 327 break; 328 case ZPOOL_PROP_VERSION: 329 if (intval >= SPA_VERSION_FEATURES) { 330 (void) snprintf(buf, len, "-"); 331 break; 332 } 333 /* FALLTHROUGH */ 334 default: 335 (void) snprintf(buf, len, "%llu", intval); 336 } 337 break; 338 339 case PROP_TYPE_INDEX: 340 intval = zpool_get_prop_int(zhp, prop, &src); 341 if (zpool_prop_index_to_string(prop, intval, &strval) 342 != 0) 343 return (-1); 344 (void) strlcpy(buf, strval, len); 345 break; 346 347 default: 348 abort(); 349 } 350 351 if (srctype) 352 *srctype = src; 353 354 return (0); 355 } 356 357 /* 358 * Check if the bootfs name has the same pool name as it is set to. 359 * Assuming bootfs is a valid dataset name. 360 */ 361 static boolean_t 362 bootfs_name_valid(const char *pool, char *bootfs) 363 { 364 int len = strlen(pool); 365 366 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 367 return (B_FALSE); 368 369 if (strncmp(pool, bootfs, len) == 0 && 370 (bootfs[len] == '/' || bootfs[len] == '\0')) 371 return (B_TRUE); 372 373 return (B_FALSE); 374 } 375 376 /* 377 * Inspect the configuration to determine if any of the devices contain 378 * an EFI label. 379 */ 380 static boolean_t 381 pool_uses_efi(nvlist_t *config) 382 { 383 nvlist_t **child; 384 uint_t c, children; 385 386 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 387 &child, &children) != 0) 388 return (read_efi_label(config, NULL) >= 0); 389 390 for (c = 0; c < children; c++) { 391 if (pool_uses_efi(child[c])) 392 return (B_TRUE); 393 } 394 return (B_FALSE); 395 } 396 397 boolean_t 398 zpool_is_bootable(zpool_handle_t *zhp) 399 { 400 char bootfs[ZPOOL_MAXNAMELEN]; 401 402 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 403 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 404 sizeof (bootfs)) != 0); 405 } 406 407 408 /* 409 * Given an nvlist of zpool properties to be set, validate that they are 410 * correct, and parse any numeric properties (index, boolean, etc) if they are 411 * specified as strings. 412 */ 413 static nvlist_t * 414 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 415 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 416 { 417 nvpair_t *elem; 418 nvlist_t *retprops; 419 zpool_prop_t prop; 420 char *strval; 421 uint64_t intval; 422 char *slash, *check; 423 struct stat64 statbuf; 424 zpool_handle_t *zhp; 425 nvlist_t *nvroot; 426 427 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 428 (void) no_memory(hdl); 429 return (NULL); 430 } 431 432 elem = NULL; 433 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 434 const char *propname = nvpair_name(elem); 435 436 prop = zpool_name_to_prop(propname); 437 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 438 int err; 439 char *fname = strchr(propname, '@') + 1; 440 441 err = zfeature_lookup_name(fname, NULL); 442 if (err != 0) { 443 ASSERT3U(err, ==, ENOENT); 444 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 445 "invalid feature '%s'"), fname); 446 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 447 goto error; 448 } 449 450 if (nvpair_type(elem) != DATA_TYPE_STRING) { 451 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 452 "'%s' must be a string"), propname); 453 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 454 goto error; 455 } 456 457 (void) nvpair_value_string(elem, &strval); 458 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 459 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 460 "property '%s' can only be set to " 461 "'enabled'"), propname); 462 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 463 goto error; 464 } 465 466 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 467 (void) no_memory(hdl); 468 goto error; 469 } 470 continue; 471 } 472 473 /* 474 * Make sure this property is valid and applies to this type. 475 */ 476 if (prop == ZPROP_INVAL) { 477 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 478 "invalid property '%s'"), propname); 479 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 480 goto error; 481 } 482 483 if (zpool_prop_readonly(prop)) { 484 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 485 "is readonly"), propname); 486 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 487 goto error; 488 } 489 490 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 491 &strval, &intval, errbuf) != 0) 492 goto error; 493 494 /* 495 * Perform additional checking for specific properties. 496 */ 497 switch (prop) { 498 case ZPOOL_PROP_VERSION: 499 if (intval < version || 500 !SPA_VERSION_IS_SUPPORTED(intval)) { 501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 502 "property '%s' number %d is invalid."), 503 propname, intval); 504 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 505 goto error; 506 } 507 break; 508 509 case ZPOOL_PROP_BOOTFS: 510 if (flags.create || flags.import) { 511 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 512 "property '%s' cannot be set at creation " 513 "or import time"), propname); 514 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 515 goto error; 516 } 517 518 if (version < SPA_VERSION_BOOTFS) { 519 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 520 "pool must be upgraded to support " 521 "'%s' property"), propname); 522 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 523 goto error; 524 } 525 526 /* 527 * bootfs property value has to be a dataset name and 528 * the dataset has to be in the same pool as it sets to. 529 */ 530 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 531 strval)) { 532 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 533 "is an invalid name"), strval); 534 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 535 goto error; 536 } 537 538 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 539 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 540 "could not open pool '%s'"), poolname); 541 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 542 goto error; 543 } 544 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 545 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 546 547 /* 548 * bootfs property cannot be set on a disk which has 549 * been EFI labeled. 550 */ 551 if (pool_uses_efi(nvroot)) { 552 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 553 "property '%s' not supported on " 554 "EFI labeled devices"), propname); 555 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 556 zpool_close(zhp); 557 goto error; 558 } 559 zpool_close(zhp); 560 break; 561 562 case ZPOOL_PROP_ALTROOT: 563 if (!flags.create && !flags.import) { 564 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 565 "property '%s' can only be set during pool " 566 "creation or import"), propname); 567 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 568 goto error; 569 } 570 571 if (strval[0] != '/') { 572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 573 "bad alternate root '%s'"), strval); 574 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 575 goto error; 576 } 577 break; 578 579 case ZPOOL_PROP_CACHEFILE: 580 if (strval[0] == '\0') 581 break; 582 583 if (strcmp(strval, "none") == 0) 584 break; 585 586 if (strval[0] != '/') { 587 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 588 "property '%s' must be empty, an " 589 "absolute path, or 'none'"), propname); 590 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 591 goto error; 592 } 593 594 slash = strrchr(strval, '/'); 595 596 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 597 strcmp(slash, "/..") == 0) { 598 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 599 "'%s' is not a valid file"), strval); 600 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 601 goto error; 602 } 603 604 *slash = '\0'; 605 606 if (strval[0] != '\0' && 607 (stat64(strval, &statbuf) != 0 || 608 !S_ISDIR(statbuf.st_mode))) { 609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 610 "'%s' is not a valid directory"), 611 strval); 612 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 613 goto error; 614 } 615 616 *slash = '/'; 617 break; 618 619 case ZPOOL_PROP_COMMENT: 620 for (check = strval; *check != '\0'; check++) { 621 if (!isprint(*check)) { 622 zfs_error_aux(hdl, 623 dgettext(TEXT_DOMAIN, 624 "comment may only have printable " 625 "characters")); 626 (void) zfs_error(hdl, EZFS_BADPROP, 627 errbuf); 628 goto error; 629 } 630 } 631 if (strlen(strval) > ZPROP_MAX_COMMENT) { 632 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 633 "comment must not exceed %d characters"), 634 ZPROP_MAX_COMMENT); 635 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 636 goto error; 637 } 638 break; 639 case ZPOOL_PROP_READONLY: 640 if (!flags.import) { 641 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 642 "property '%s' can only be set at " 643 "import time"), propname); 644 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 645 goto error; 646 } 647 break; 648 } 649 } 650 651 return (retprops); 652 error: 653 nvlist_free(retprops); 654 return (NULL); 655 } 656 657 /* 658 * Set zpool property : propname=propval. 659 */ 660 int 661 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 662 { 663 zfs_cmd_t zc = { 0 }; 664 int ret = -1; 665 char errbuf[1024]; 666 nvlist_t *nvl = NULL; 667 nvlist_t *realprops; 668 uint64_t version; 669 prop_flags_t flags = { 0 }; 670 671 (void) snprintf(errbuf, sizeof (errbuf), 672 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 673 zhp->zpool_name); 674 675 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 676 return (no_memory(zhp->zpool_hdl)); 677 678 if (nvlist_add_string(nvl, propname, propval) != 0) { 679 nvlist_free(nvl); 680 return (no_memory(zhp->zpool_hdl)); 681 } 682 683 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 684 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 685 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 686 nvlist_free(nvl); 687 return (-1); 688 } 689 690 nvlist_free(nvl); 691 nvl = realprops; 692 693 /* 694 * Execute the corresponding ioctl() to set this property. 695 */ 696 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 697 698 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 699 nvlist_free(nvl); 700 return (-1); 701 } 702 703 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 704 705 zcmd_free_nvlists(&zc); 706 nvlist_free(nvl); 707 708 if (ret) 709 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 710 else 711 (void) zpool_props_refresh(zhp); 712 713 return (ret); 714 } 715 716 int 717 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 718 { 719 libzfs_handle_t *hdl = zhp->zpool_hdl; 720 zprop_list_t *entry; 721 char buf[ZFS_MAXPROPLEN]; 722 nvlist_t *features = NULL; 723 zprop_list_t **last; 724 boolean_t firstexpand = (NULL == *plp); 725 726 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 727 return (-1); 728 729 last = plp; 730 while (*last != NULL) 731 last = &(*last)->pl_next; 732 733 if ((*plp)->pl_all) 734 features = zpool_get_features(zhp); 735 736 if ((*plp)->pl_all && firstexpand) { 737 for (int i = 0; i < SPA_FEATURES; i++) { 738 zprop_list_t *entry = zfs_alloc(hdl, 739 sizeof (zprop_list_t)); 740 entry->pl_prop = ZPROP_INVAL; 741 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 742 spa_feature_table[i].fi_uname); 743 entry->pl_width = strlen(entry->pl_user_prop); 744 entry->pl_all = B_TRUE; 745 746 *last = entry; 747 last = &entry->pl_next; 748 } 749 } 750 751 /* add any unsupported features */ 752 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 753 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 754 char *propname; 755 boolean_t found; 756 zprop_list_t *entry; 757 758 if (zfeature_is_supported(nvpair_name(nvp))) 759 continue; 760 761 propname = zfs_asprintf(hdl, "unsupported@%s", 762 nvpair_name(nvp)); 763 764 /* 765 * Before adding the property to the list make sure that no 766 * other pool already added the same property. 767 */ 768 found = B_FALSE; 769 entry = *plp; 770 while (entry != NULL) { 771 if (entry->pl_user_prop != NULL && 772 strcmp(propname, entry->pl_user_prop) == 0) { 773 found = B_TRUE; 774 break; 775 } 776 entry = entry->pl_next; 777 } 778 if (found) { 779 free(propname); 780 continue; 781 } 782 783 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 784 entry->pl_prop = ZPROP_INVAL; 785 entry->pl_user_prop = propname; 786 entry->pl_width = strlen(entry->pl_user_prop); 787 entry->pl_all = B_TRUE; 788 789 *last = entry; 790 last = &entry->pl_next; 791 } 792 793 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 794 795 if (entry->pl_fixed) 796 continue; 797 798 if (entry->pl_prop != ZPROP_INVAL && 799 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 800 NULL, B_FALSE) == 0) { 801 if (strlen(buf) > entry->pl_width) 802 entry->pl_width = strlen(buf); 803 } 804 } 805 806 return (0); 807 } 808 809 /* 810 * Get the state for the given feature on the given ZFS pool. 811 */ 812 int 813 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 814 size_t len) 815 { 816 uint64_t refcount; 817 boolean_t found = B_FALSE; 818 nvlist_t *features = zpool_get_features(zhp); 819 boolean_t supported; 820 const char *feature = strchr(propname, '@') + 1; 821 822 supported = zpool_prop_feature(propname); 823 ASSERT(supported || zfs_prop_unsupported(propname)); 824 825 /* 826 * Convert from feature name to feature guid. This conversion is 827 * unecessary for unsupported@... properties because they already 828 * use guids. 829 */ 830 if (supported) { 831 int ret; 832 spa_feature_t fid; 833 834 ret = zfeature_lookup_name(feature, &fid); 835 if (ret != 0) { 836 (void) strlcpy(buf, "-", len); 837 return (ENOTSUP); 838 } 839 feature = spa_feature_table[fid].fi_guid; 840 } 841 842 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 843 found = B_TRUE; 844 845 if (supported) { 846 if (!found) { 847 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 848 } else { 849 if (refcount == 0) 850 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 851 else 852 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 853 } 854 } else { 855 if (found) { 856 if (refcount == 0) { 857 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 858 } else { 859 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 860 } 861 } else { 862 (void) strlcpy(buf, "-", len); 863 return (ENOTSUP); 864 } 865 } 866 867 return (0); 868 } 869 870 /* 871 * Don't start the slice at the default block of 34; many storage 872 * devices will use a stripe width of 128k, so start there instead. 873 */ 874 #define NEW_START_BLOCK 256 875 876 /* 877 * Validate the given pool name, optionally putting an extended error message in 878 * 'buf'. 879 */ 880 boolean_t 881 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 882 { 883 namecheck_err_t why; 884 char what; 885 int ret; 886 887 ret = pool_namecheck(pool, &why, &what); 888 889 /* 890 * The rules for reserved pool names were extended at a later point. 891 * But we need to support users with existing pools that may now be 892 * invalid. So we only check for this expanded set of names during a 893 * create (or import), and only in userland. 894 */ 895 if (ret == 0 && !isopen && 896 (strncmp(pool, "mirror", 6) == 0 || 897 strncmp(pool, "raidz", 5) == 0 || 898 strncmp(pool, "spare", 5) == 0 || 899 strcmp(pool, "log") == 0)) { 900 if (hdl != NULL) 901 zfs_error_aux(hdl, 902 dgettext(TEXT_DOMAIN, "name is reserved")); 903 return (B_FALSE); 904 } 905 906 907 if (ret != 0) { 908 if (hdl != NULL) { 909 switch (why) { 910 case NAME_ERR_TOOLONG: 911 zfs_error_aux(hdl, 912 dgettext(TEXT_DOMAIN, "name is too long")); 913 break; 914 915 case NAME_ERR_INVALCHAR: 916 zfs_error_aux(hdl, 917 dgettext(TEXT_DOMAIN, "invalid character " 918 "'%c' in pool name"), what); 919 break; 920 921 case NAME_ERR_NOLETTER: 922 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 923 "name must begin with a letter")); 924 break; 925 926 case NAME_ERR_RESERVED: 927 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 928 "name is reserved")); 929 break; 930 931 case NAME_ERR_DISKLIKE: 932 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 933 "pool name is reserved")); 934 break; 935 936 case NAME_ERR_LEADING_SLASH: 937 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 938 "leading slash in name")); 939 break; 940 941 case NAME_ERR_EMPTY_COMPONENT: 942 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 943 "empty component in name")); 944 break; 945 946 case NAME_ERR_TRAILING_SLASH: 947 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 948 "trailing slash in name")); 949 break; 950 951 case NAME_ERR_MULTIPLE_AT: 952 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 953 "multiple '@' delimiters in name")); 954 break; 955 956 } 957 } 958 return (B_FALSE); 959 } 960 961 return (B_TRUE); 962 } 963 964 /* 965 * Open a handle to the given pool, even if the pool is currently in the FAULTED 966 * state. 967 */ 968 zpool_handle_t * 969 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 970 { 971 zpool_handle_t *zhp; 972 boolean_t missing; 973 974 /* 975 * Make sure the pool name is valid. 976 */ 977 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 978 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 979 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 980 pool); 981 return (NULL); 982 } 983 984 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 985 return (NULL); 986 987 zhp->zpool_hdl = hdl; 988 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 989 990 if (zpool_refresh_stats(zhp, &missing) != 0) { 991 zpool_close(zhp); 992 return (NULL); 993 } 994 995 if (missing) { 996 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 997 (void) zfs_error_fmt(hdl, EZFS_NOENT, 998 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 999 zpool_close(zhp); 1000 return (NULL); 1001 } 1002 1003 return (zhp); 1004 } 1005 1006 /* 1007 * Like the above, but silent on error. Used when iterating over pools (because 1008 * the configuration cache may be out of date). 1009 */ 1010 int 1011 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1012 { 1013 zpool_handle_t *zhp; 1014 boolean_t missing; 1015 1016 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1017 return (-1); 1018 1019 zhp->zpool_hdl = hdl; 1020 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1021 1022 if (zpool_refresh_stats(zhp, &missing) != 0) { 1023 zpool_close(zhp); 1024 return (-1); 1025 } 1026 1027 if (missing) { 1028 zpool_close(zhp); 1029 *ret = NULL; 1030 return (0); 1031 } 1032 1033 *ret = zhp; 1034 return (0); 1035 } 1036 1037 /* 1038 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1039 * state. 1040 */ 1041 zpool_handle_t * 1042 zpool_open(libzfs_handle_t *hdl, const char *pool) 1043 { 1044 zpool_handle_t *zhp; 1045 1046 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1047 return (NULL); 1048 1049 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1050 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1051 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1052 zpool_close(zhp); 1053 return (NULL); 1054 } 1055 1056 return (zhp); 1057 } 1058 1059 /* 1060 * Close the handle. Simply frees the memory associated with the handle. 1061 */ 1062 void 1063 zpool_close(zpool_handle_t *zhp) 1064 { 1065 if (zhp->zpool_config) 1066 nvlist_free(zhp->zpool_config); 1067 if (zhp->zpool_old_config) 1068 nvlist_free(zhp->zpool_old_config); 1069 if (zhp->zpool_props) 1070 nvlist_free(zhp->zpool_props); 1071 free(zhp); 1072 } 1073 1074 /* 1075 * Return the name of the pool. 1076 */ 1077 const char * 1078 zpool_get_name(zpool_handle_t *zhp) 1079 { 1080 return (zhp->zpool_name); 1081 } 1082 1083 1084 /* 1085 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1086 */ 1087 int 1088 zpool_get_state(zpool_handle_t *zhp) 1089 { 1090 return (zhp->zpool_state); 1091 } 1092 1093 /* 1094 * Create the named pool, using the provided vdev list. It is assumed 1095 * that the consumer has already validated the contents of the nvlist, so we 1096 * don't have to worry about error semantics. 1097 */ 1098 int 1099 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1100 nvlist_t *props, nvlist_t *fsprops) 1101 { 1102 zfs_cmd_t zc = { 0 }; 1103 nvlist_t *zc_fsprops = NULL; 1104 nvlist_t *zc_props = NULL; 1105 char msg[1024]; 1106 int ret = -1; 1107 1108 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1109 "cannot create '%s'"), pool); 1110 1111 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1112 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1113 1114 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1115 return (-1); 1116 1117 if (props) { 1118 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1119 1120 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1121 SPA_VERSION_1, flags, msg)) == NULL) { 1122 goto create_failed; 1123 } 1124 } 1125 1126 if (fsprops) { 1127 uint64_t zoned; 1128 char *zonestr; 1129 1130 zoned = ((nvlist_lookup_string(fsprops, 1131 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1132 strcmp(zonestr, "on") == 0); 1133 1134 if ((zc_fsprops = zfs_valid_proplist(hdl, 1135 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 1136 goto create_failed; 1137 } 1138 if (!zc_props && 1139 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1140 goto create_failed; 1141 } 1142 if (nvlist_add_nvlist(zc_props, 1143 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1144 goto create_failed; 1145 } 1146 } 1147 1148 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1149 goto create_failed; 1150 1151 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1152 1153 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1154 1155 zcmd_free_nvlists(&zc); 1156 nvlist_free(zc_props); 1157 nvlist_free(zc_fsprops); 1158 1159 switch (errno) { 1160 case EBUSY: 1161 /* 1162 * This can happen if the user has specified the same 1163 * device multiple times. We can't reliably detect this 1164 * until we try to add it and see we already have a 1165 * label. 1166 */ 1167 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1168 "one or more vdevs refer to the same device")); 1169 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1170 1171 case EOVERFLOW: 1172 /* 1173 * This occurs when one of the devices is below 1174 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1175 * device was the problem device since there's no 1176 * reliable way to determine device size from userland. 1177 */ 1178 { 1179 char buf[64]; 1180 1181 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1182 1183 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1184 "one or more devices is less than the " 1185 "minimum size (%s)"), buf); 1186 } 1187 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1188 1189 case ENOSPC: 1190 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1191 "one or more devices is out of space")); 1192 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1193 1194 case ENOTBLK: 1195 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1196 "cache device must be a disk or disk slice")); 1197 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1198 1199 default: 1200 return (zpool_standard_error(hdl, errno, msg)); 1201 } 1202 } 1203 1204 create_failed: 1205 zcmd_free_nvlists(&zc); 1206 nvlist_free(zc_props); 1207 nvlist_free(zc_fsprops); 1208 return (ret); 1209 } 1210 1211 /* 1212 * Destroy the given pool. It is up to the caller to ensure that there are no 1213 * datasets left in the pool. 1214 */ 1215 int 1216 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1217 { 1218 zfs_cmd_t zc = { 0 }; 1219 zfs_handle_t *zfp = NULL; 1220 libzfs_handle_t *hdl = zhp->zpool_hdl; 1221 char msg[1024]; 1222 1223 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1224 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1225 return (-1); 1226 1227 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1228 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1229 1230 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1231 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1232 "cannot destroy '%s'"), zhp->zpool_name); 1233 1234 if (errno == EROFS) { 1235 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1236 "one or more devices is read only")); 1237 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1238 } else { 1239 (void) zpool_standard_error(hdl, errno, msg); 1240 } 1241 1242 if (zfp) 1243 zfs_close(zfp); 1244 return (-1); 1245 } 1246 1247 if (zfp) { 1248 remove_mountpoint(zfp); 1249 zfs_close(zfp); 1250 } 1251 1252 return (0); 1253 } 1254 1255 /* 1256 * Add the given vdevs to the pool. The caller must have already performed the 1257 * necessary verification to ensure that the vdev specification is well-formed. 1258 */ 1259 int 1260 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1261 { 1262 zfs_cmd_t zc = { 0 }; 1263 int ret; 1264 libzfs_handle_t *hdl = zhp->zpool_hdl; 1265 char msg[1024]; 1266 nvlist_t **spares, **l2cache; 1267 uint_t nspares, nl2cache; 1268 1269 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1270 "cannot add to '%s'"), zhp->zpool_name); 1271 1272 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1273 SPA_VERSION_SPARES && 1274 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1275 &spares, &nspares) == 0) { 1276 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1277 "upgraded to add hot spares")); 1278 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1279 } 1280 1281 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1282 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1283 uint64_t s; 1284 1285 for (s = 0; s < nspares; s++) { 1286 char *path; 1287 1288 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1289 &path) == 0 && pool_uses_efi(spares[s])) { 1290 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1291 "device '%s' contains an EFI label and " 1292 "cannot be used on root pools."), 1293 zpool_vdev_name(hdl, NULL, spares[s], 1294 B_FALSE)); 1295 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1296 } 1297 } 1298 } 1299 1300 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1301 SPA_VERSION_L2CACHE && 1302 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1303 &l2cache, &nl2cache) == 0) { 1304 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1305 "upgraded to add cache devices")); 1306 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1307 } 1308 1309 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1310 return (-1); 1311 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1312 1313 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1314 switch (errno) { 1315 case EBUSY: 1316 /* 1317 * This can happen if the user has specified the same 1318 * device multiple times. We can't reliably detect this 1319 * until we try to add it and see we already have a 1320 * label. 1321 */ 1322 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1323 "one or more vdevs refer to the same device")); 1324 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1325 break; 1326 1327 case EOVERFLOW: 1328 /* 1329 * This occurrs when one of the devices is below 1330 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1331 * device was the problem device since there's no 1332 * reliable way to determine device size from userland. 1333 */ 1334 { 1335 char buf[64]; 1336 1337 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1338 1339 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1340 "device is less than the minimum " 1341 "size (%s)"), buf); 1342 } 1343 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1344 break; 1345 1346 case ENOTSUP: 1347 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1348 "pool must be upgraded to add these vdevs")); 1349 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1350 break; 1351 1352 case EDOM: 1353 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1354 "root pool can not have multiple vdevs" 1355 " or separate logs")); 1356 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1357 break; 1358 1359 case ENOTBLK: 1360 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1361 "cache device must be a disk or disk slice")); 1362 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1363 break; 1364 1365 default: 1366 (void) zpool_standard_error(hdl, errno, msg); 1367 } 1368 1369 ret = -1; 1370 } else { 1371 ret = 0; 1372 } 1373 1374 zcmd_free_nvlists(&zc); 1375 1376 return (ret); 1377 } 1378 1379 /* 1380 * Exports the pool from the system. The caller must ensure that there are no 1381 * mounted datasets in the pool. 1382 */ 1383 static int 1384 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1385 const char *log_str) 1386 { 1387 zfs_cmd_t zc = { 0 }; 1388 char msg[1024]; 1389 1390 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1391 "cannot export '%s'"), zhp->zpool_name); 1392 1393 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1394 zc.zc_cookie = force; 1395 zc.zc_guid = hardforce; 1396 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1397 1398 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1399 switch (errno) { 1400 case EXDEV: 1401 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1402 "use '-f' to override the following errors:\n" 1403 "'%s' has an active shared spare which could be" 1404 " used by other pools once '%s' is exported."), 1405 zhp->zpool_name, zhp->zpool_name); 1406 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1407 msg)); 1408 default: 1409 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1410 msg)); 1411 } 1412 } 1413 1414 return (0); 1415 } 1416 1417 int 1418 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1419 { 1420 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1421 } 1422 1423 int 1424 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1425 { 1426 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1427 } 1428 1429 static void 1430 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1431 nvlist_t *config) 1432 { 1433 nvlist_t *nv = NULL; 1434 uint64_t rewindto; 1435 int64_t loss = -1; 1436 struct tm t; 1437 char timestr[128]; 1438 1439 if (!hdl->libzfs_printerr || config == NULL) 1440 return; 1441 1442 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1443 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1444 return; 1445 } 1446 1447 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1448 return; 1449 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1450 1451 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1452 strftime(timestr, 128, 0, &t) != 0) { 1453 if (dryrun) { 1454 (void) printf(dgettext(TEXT_DOMAIN, 1455 "Would be able to return %s " 1456 "to its state as of %s.\n"), 1457 name, timestr); 1458 } else { 1459 (void) printf(dgettext(TEXT_DOMAIN, 1460 "Pool %s returned to its state as of %s.\n"), 1461 name, timestr); 1462 } 1463 if (loss > 120) { 1464 (void) printf(dgettext(TEXT_DOMAIN, 1465 "%s approximately %lld "), 1466 dryrun ? "Would discard" : "Discarded", 1467 (loss + 30) / 60); 1468 (void) printf(dgettext(TEXT_DOMAIN, 1469 "minutes of transactions.\n")); 1470 } else if (loss > 0) { 1471 (void) printf(dgettext(TEXT_DOMAIN, 1472 "%s approximately %lld "), 1473 dryrun ? "Would discard" : "Discarded", loss); 1474 (void) printf(dgettext(TEXT_DOMAIN, 1475 "seconds of transactions.\n")); 1476 } 1477 } 1478 } 1479 1480 void 1481 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1482 nvlist_t *config) 1483 { 1484 nvlist_t *nv = NULL; 1485 int64_t loss = -1; 1486 uint64_t edata = UINT64_MAX; 1487 uint64_t rewindto; 1488 struct tm t; 1489 char timestr[128]; 1490 1491 if (!hdl->libzfs_printerr) 1492 return; 1493 1494 if (reason >= 0) 1495 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1496 else 1497 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1498 1499 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1500 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1501 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1502 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1503 goto no_info; 1504 1505 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1506 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1507 &edata); 1508 1509 (void) printf(dgettext(TEXT_DOMAIN, 1510 "Recovery is possible, but will result in some data loss.\n")); 1511 1512 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1513 strftime(timestr, 128, 0, &t) != 0) { 1514 (void) printf(dgettext(TEXT_DOMAIN, 1515 "\tReturning the pool to its state as of %s\n" 1516 "\tshould correct the problem. "), 1517 timestr); 1518 } else { 1519 (void) printf(dgettext(TEXT_DOMAIN, 1520 "\tReverting the pool to an earlier state " 1521 "should correct the problem.\n\t")); 1522 } 1523 1524 if (loss > 120) { 1525 (void) printf(dgettext(TEXT_DOMAIN, 1526 "Approximately %lld minutes of data\n" 1527 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1528 } else if (loss > 0) { 1529 (void) printf(dgettext(TEXT_DOMAIN, 1530 "Approximately %lld seconds of data\n" 1531 "\tmust be discarded, irreversibly. "), loss); 1532 } 1533 if (edata != 0 && edata != UINT64_MAX) { 1534 if (edata == 1) { 1535 (void) printf(dgettext(TEXT_DOMAIN, 1536 "After rewind, at least\n" 1537 "\tone persistent user-data error will remain. ")); 1538 } else { 1539 (void) printf(dgettext(TEXT_DOMAIN, 1540 "After rewind, several\n" 1541 "\tpersistent user-data errors will remain. ")); 1542 } 1543 } 1544 (void) printf(dgettext(TEXT_DOMAIN, 1545 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1546 reason >= 0 ? "clear" : "import", name); 1547 1548 (void) printf(dgettext(TEXT_DOMAIN, 1549 "A scrub of the pool\n" 1550 "\tis strongly recommended after recovery.\n")); 1551 return; 1552 1553 no_info: 1554 (void) printf(dgettext(TEXT_DOMAIN, 1555 "Destroy and re-create the pool from\n\ta backup source.\n")); 1556 } 1557 1558 /* 1559 * zpool_import() is a contracted interface. Should be kept the same 1560 * if possible. 1561 * 1562 * Applications should use zpool_import_props() to import a pool with 1563 * new properties value to be set. 1564 */ 1565 int 1566 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1567 char *altroot) 1568 { 1569 nvlist_t *props = NULL; 1570 int ret; 1571 1572 if (altroot != NULL) { 1573 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1574 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1575 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1576 newname)); 1577 } 1578 1579 if (nvlist_add_string(props, 1580 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1581 nvlist_add_string(props, 1582 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1583 nvlist_free(props); 1584 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1585 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1586 newname)); 1587 } 1588 } 1589 1590 ret = zpool_import_props(hdl, config, newname, props, 1591 ZFS_IMPORT_NORMAL); 1592 if (props) 1593 nvlist_free(props); 1594 return (ret); 1595 } 1596 1597 static void 1598 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1599 int indent) 1600 { 1601 nvlist_t **child; 1602 uint_t c, children; 1603 char *vname; 1604 uint64_t is_log = 0; 1605 1606 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1607 &is_log); 1608 1609 if (name != NULL) 1610 (void) printf("\t%*s%s%s\n", indent, "", name, 1611 is_log ? " [log]" : ""); 1612 1613 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1614 &child, &children) != 0) 1615 return; 1616 1617 for (c = 0; c < children; c++) { 1618 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1619 print_vdev_tree(hdl, vname, child[c], indent + 2); 1620 free(vname); 1621 } 1622 } 1623 1624 void 1625 zpool_print_unsup_feat(nvlist_t *config) 1626 { 1627 nvlist_t *nvinfo, *unsup_feat; 1628 1629 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1630 0); 1631 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1632 &unsup_feat) == 0); 1633 1634 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1635 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1636 char *desc; 1637 1638 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1639 verify(nvpair_value_string(nvp, &desc) == 0); 1640 1641 if (strlen(desc) > 0) 1642 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1643 else 1644 (void) printf("\t%s\n", nvpair_name(nvp)); 1645 } 1646 } 1647 1648 /* 1649 * Import the given pool using the known configuration and a list of 1650 * properties to be set. The configuration should have come from 1651 * zpool_find_import(). The 'newname' parameters control whether the pool 1652 * is imported with a different name. 1653 */ 1654 int 1655 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1656 nvlist_t *props, int flags) 1657 { 1658 zfs_cmd_t zc = { 0 }; 1659 zpool_rewind_policy_t policy; 1660 nvlist_t *nv = NULL; 1661 nvlist_t *nvinfo = NULL; 1662 nvlist_t *missing = NULL; 1663 char *thename; 1664 char *origname; 1665 int ret; 1666 int error = 0; 1667 char errbuf[1024]; 1668 1669 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1670 &origname) == 0); 1671 1672 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1673 "cannot import pool '%s'"), origname); 1674 1675 if (newname != NULL) { 1676 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1677 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1678 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1679 newname)); 1680 thename = (char *)newname; 1681 } else { 1682 thename = origname; 1683 } 1684 1685 if (props) { 1686 uint64_t version; 1687 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1688 1689 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1690 &version) == 0); 1691 1692 if ((props = zpool_valid_proplist(hdl, origname, 1693 props, version, flags, errbuf)) == NULL) { 1694 return (-1); 1695 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1696 nvlist_free(props); 1697 return (-1); 1698 } 1699 } 1700 1701 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1702 1703 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1704 &zc.zc_guid) == 0); 1705 1706 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1707 nvlist_free(props); 1708 return (-1); 1709 } 1710 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1711 nvlist_free(props); 1712 return (-1); 1713 } 1714 1715 zc.zc_cookie = flags; 1716 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1717 errno == ENOMEM) { 1718 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1719 zcmd_free_nvlists(&zc); 1720 return (-1); 1721 } 1722 } 1723 if (ret != 0) 1724 error = errno; 1725 1726 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1727 zpool_get_rewind_policy(config, &policy); 1728 1729 if (error) { 1730 char desc[1024]; 1731 1732 /* 1733 * Dry-run failed, but we print out what success 1734 * looks like if we found a best txg 1735 */ 1736 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1737 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1738 B_TRUE, nv); 1739 nvlist_free(nv); 1740 return (-1); 1741 } 1742 1743 if (newname == NULL) 1744 (void) snprintf(desc, sizeof (desc), 1745 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1746 thename); 1747 else 1748 (void) snprintf(desc, sizeof (desc), 1749 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1750 origname, thename); 1751 1752 switch (error) { 1753 case ENOTSUP: 1754 if (nv != NULL && nvlist_lookup_nvlist(nv, 1755 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1756 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1757 (void) printf(dgettext(TEXT_DOMAIN, "This " 1758 "pool uses the following feature(s) not " 1759 "supported by this system:\n")); 1760 zpool_print_unsup_feat(nv); 1761 if (nvlist_exists(nvinfo, 1762 ZPOOL_CONFIG_CAN_RDONLY)) { 1763 (void) printf(dgettext(TEXT_DOMAIN, 1764 "All unsupported features are only " 1765 "required for writing to the pool." 1766 "\nThe pool can be imported using " 1767 "'-o readonly=on'.\n")); 1768 } 1769 } 1770 /* 1771 * Unsupported version. 1772 */ 1773 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1774 break; 1775 1776 case EINVAL: 1777 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1778 break; 1779 1780 case EROFS: 1781 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1782 "one or more devices is read only")); 1783 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1784 break; 1785 1786 case ENXIO: 1787 if (nv && nvlist_lookup_nvlist(nv, 1788 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1789 nvlist_lookup_nvlist(nvinfo, 1790 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1791 (void) printf(dgettext(TEXT_DOMAIN, 1792 "The devices below are missing, use " 1793 "'-m' to import the pool anyway:\n")); 1794 print_vdev_tree(hdl, NULL, missing, 2); 1795 (void) printf("\n"); 1796 } 1797 (void) zpool_standard_error(hdl, error, desc); 1798 break; 1799 1800 case EEXIST: 1801 (void) zpool_standard_error(hdl, error, desc); 1802 break; 1803 1804 default: 1805 (void) zpool_standard_error(hdl, error, desc); 1806 zpool_explain_recover(hdl, 1807 newname ? origname : thename, -error, nv); 1808 break; 1809 } 1810 1811 nvlist_free(nv); 1812 ret = -1; 1813 } else { 1814 zpool_handle_t *zhp; 1815 1816 /* 1817 * This should never fail, but play it safe anyway. 1818 */ 1819 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1820 ret = -1; 1821 else if (zhp != NULL) 1822 zpool_close(zhp); 1823 if (policy.zrp_request & 1824 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1825 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1826 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1827 } 1828 nvlist_free(nv); 1829 return (0); 1830 } 1831 1832 zcmd_free_nvlists(&zc); 1833 nvlist_free(props); 1834 1835 return (ret); 1836 } 1837 1838 /* 1839 * Scan the pool. 1840 */ 1841 int 1842 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1843 { 1844 zfs_cmd_t zc = { 0 }; 1845 char msg[1024]; 1846 libzfs_handle_t *hdl = zhp->zpool_hdl; 1847 1848 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1849 zc.zc_cookie = func; 1850 1851 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1852 (errno == ENOENT && func != POOL_SCAN_NONE)) 1853 return (0); 1854 1855 if (func == POOL_SCAN_SCRUB) { 1856 (void) snprintf(msg, sizeof (msg), 1857 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1858 } else if (func == POOL_SCAN_NONE) { 1859 (void) snprintf(msg, sizeof (msg), 1860 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1861 zc.zc_name); 1862 } else { 1863 assert(!"unexpected result"); 1864 } 1865 1866 if (errno == EBUSY) { 1867 nvlist_t *nvroot; 1868 pool_scan_stat_t *ps = NULL; 1869 uint_t psc; 1870 1871 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1872 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1873 (void) nvlist_lookup_uint64_array(nvroot, 1874 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1875 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1876 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1877 else 1878 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1879 } else if (errno == ENOENT) { 1880 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1881 } else { 1882 return (zpool_standard_error(hdl, errno, msg)); 1883 } 1884 } 1885 1886 /* 1887 * This provides a very minimal check whether a given string is likely a 1888 * c#t#d# style string. Users of this are expected to do their own 1889 * verification of the s# part. 1890 */ 1891 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1892 1893 /* 1894 * More elaborate version for ones which may start with "/dev/dsk/" 1895 * and the like. 1896 */ 1897 static int 1898 ctd_check_path(char *str) { 1899 /* 1900 * If it starts with a slash, check the last component. 1901 */ 1902 if (str && str[0] == '/') { 1903 char *tmp = strrchr(str, '/'); 1904 1905 /* 1906 * If it ends in "/old", check the second-to-last 1907 * component of the string instead. 1908 */ 1909 if (tmp != str && strcmp(tmp, "/old") == 0) { 1910 for (tmp--; *tmp != '/'; tmp--) 1911 ; 1912 } 1913 str = tmp + 1; 1914 } 1915 return (CTD_CHECK(str)); 1916 } 1917 1918 /* 1919 * Find a vdev that matches the search criteria specified. We use the 1920 * the nvpair name to determine how we should look for the device. 1921 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1922 * spare; but FALSE if its an INUSE spare. 1923 */ 1924 static nvlist_t * 1925 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1926 boolean_t *l2cache, boolean_t *log) 1927 { 1928 uint_t c, children; 1929 nvlist_t **child; 1930 nvlist_t *ret; 1931 uint64_t is_log; 1932 char *srchkey; 1933 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1934 1935 /* Nothing to look for */ 1936 if (search == NULL || pair == NULL) 1937 return (NULL); 1938 1939 /* Obtain the key we will use to search */ 1940 srchkey = nvpair_name(pair); 1941 1942 switch (nvpair_type(pair)) { 1943 case DATA_TYPE_UINT64: 1944 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1945 uint64_t srchval, theguid; 1946 1947 verify(nvpair_value_uint64(pair, &srchval) == 0); 1948 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1949 &theguid) == 0); 1950 if (theguid == srchval) 1951 return (nv); 1952 } 1953 break; 1954 1955 case DATA_TYPE_STRING: { 1956 char *srchval, *val; 1957 1958 verify(nvpair_value_string(pair, &srchval) == 0); 1959 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1960 break; 1961 1962 /* 1963 * Search for the requested value. Special cases: 1964 * 1965 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1966 * "s0" or "s0/old". The "s0" part is hidden from the user, 1967 * but included in the string, so this matches around it. 1968 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1969 * 1970 * Otherwise, all other searches are simple string compares. 1971 */ 1972 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1973 ctd_check_path(val)) { 1974 uint64_t wholedisk = 0; 1975 1976 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1977 &wholedisk); 1978 if (wholedisk) { 1979 int slen = strlen(srchval); 1980 int vlen = strlen(val); 1981 1982 if (slen != vlen - 2) 1983 break; 1984 1985 /* 1986 * make_leaf_vdev() should only set 1987 * wholedisk for ZPOOL_CONFIG_PATHs which 1988 * will include "/dev/dsk/", giving plenty of 1989 * room for the indices used next. 1990 */ 1991 ASSERT(vlen >= 6); 1992 1993 /* 1994 * strings identical except trailing "s0" 1995 */ 1996 if (strcmp(&val[vlen - 2], "s0") == 0 && 1997 strncmp(srchval, val, slen) == 0) 1998 return (nv); 1999 2000 /* 2001 * strings identical except trailing "s0/old" 2002 */ 2003 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 2004 strcmp(&srchval[slen - 4], "/old") == 0 && 2005 strncmp(srchval, val, slen - 4) == 0) 2006 return (nv); 2007 2008 break; 2009 } 2010 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2011 char *type, *idx, *end, *p; 2012 uint64_t id, vdev_id; 2013 2014 /* 2015 * Determine our vdev type, keeping in mind 2016 * that the srchval is composed of a type and 2017 * vdev id pair (i.e. mirror-4). 2018 */ 2019 if ((type = strdup(srchval)) == NULL) 2020 return (NULL); 2021 2022 if ((p = strrchr(type, '-')) == NULL) { 2023 free(type); 2024 break; 2025 } 2026 idx = p + 1; 2027 *p = '\0'; 2028 2029 /* 2030 * If the types don't match then keep looking. 2031 */ 2032 if (strncmp(val, type, strlen(val)) != 0) { 2033 free(type); 2034 break; 2035 } 2036 2037 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2038 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2039 strncmp(type, VDEV_TYPE_MIRROR, 2040 strlen(VDEV_TYPE_MIRROR)) == 0); 2041 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2042 &id) == 0); 2043 2044 errno = 0; 2045 vdev_id = strtoull(idx, &end, 10); 2046 2047 free(type); 2048 if (errno != 0) 2049 return (NULL); 2050 2051 /* 2052 * Now verify that we have the correct vdev id. 2053 */ 2054 if (vdev_id == id) 2055 return (nv); 2056 } 2057 2058 /* 2059 * Common case 2060 */ 2061 if (strcmp(srchval, val) == 0) 2062 return (nv); 2063 break; 2064 } 2065 2066 default: 2067 break; 2068 } 2069 2070 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2071 &child, &children) != 0) 2072 return (NULL); 2073 2074 for (c = 0; c < children; c++) { 2075 if ((ret = vdev_to_nvlist_iter(child[c], search, 2076 avail_spare, l2cache, NULL)) != NULL) { 2077 /* 2078 * The 'is_log' value is only set for the toplevel 2079 * vdev, not the leaf vdevs. So we always lookup the 2080 * log device from the root of the vdev tree (where 2081 * 'log' is non-NULL). 2082 */ 2083 if (log != NULL && 2084 nvlist_lookup_uint64(child[c], 2085 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2086 is_log) { 2087 *log = B_TRUE; 2088 } 2089 return (ret); 2090 } 2091 } 2092 2093 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2094 &child, &children) == 0) { 2095 for (c = 0; c < children; c++) { 2096 if ((ret = vdev_to_nvlist_iter(child[c], search, 2097 avail_spare, l2cache, NULL)) != NULL) { 2098 *avail_spare = B_TRUE; 2099 return (ret); 2100 } 2101 } 2102 } 2103 2104 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2105 &child, &children) == 0) { 2106 for (c = 0; c < children; c++) { 2107 if ((ret = vdev_to_nvlist_iter(child[c], search, 2108 avail_spare, l2cache, NULL)) != NULL) { 2109 *l2cache = B_TRUE; 2110 return (ret); 2111 } 2112 } 2113 } 2114 2115 return (NULL); 2116 } 2117 2118 /* 2119 * Given a physical path (minus the "/devices" prefix), find the 2120 * associated vdev. 2121 */ 2122 nvlist_t * 2123 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2124 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2125 { 2126 nvlist_t *search, *nvroot, *ret; 2127 2128 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2129 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2130 2131 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2132 &nvroot) == 0); 2133 2134 *avail_spare = B_FALSE; 2135 *l2cache = B_FALSE; 2136 if (log != NULL) 2137 *log = B_FALSE; 2138 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2139 nvlist_free(search); 2140 2141 return (ret); 2142 } 2143 2144 /* 2145 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2146 */ 2147 boolean_t 2148 zpool_vdev_is_interior(const char *name) 2149 { 2150 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2151 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2152 return (B_TRUE); 2153 return (B_FALSE); 2154 } 2155 2156 nvlist_t * 2157 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2158 boolean_t *l2cache, boolean_t *log) 2159 { 2160 char buf[MAXPATHLEN]; 2161 char *end; 2162 nvlist_t *nvroot, *search, *ret; 2163 uint64_t guid; 2164 2165 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2166 2167 guid = strtoull(path, &end, 10); 2168 if (guid != 0 && *end == '\0') { 2169 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2170 } else if (zpool_vdev_is_interior(path)) { 2171 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2172 } else if (path[0] != '/') { 2173 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 2174 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2175 } else { 2176 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2177 } 2178 2179 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2180 &nvroot) == 0); 2181 2182 *avail_spare = B_FALSE; 2183 *l2cache = B_FALSE; 2184 if (log != NULL) 2185 *log = B_FALSE; 2186 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2187 nvlist_free(search); 2188 2189 return (ret); 2190 } 2191 2192 static int 2193 vdev_online(nvlist_t *nv) 2194 { 2195 uint64_t ival; 2196 2197 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2198 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2199 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2200 return (0); 2201 2202 return (1); 2203 } 2204 2205 /* 2206 * Helper function for zpool_get_physpaths(). 2207 */ 2208 static int 2209 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2210 size_t *bytes_written) 2211 { 2212 size_t bytes_left, pos, rsz; 2213 char *tmppath; 2214 const char *format; 2215 2216 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2217 &tmppath) != 0) 2218 return (EZFS_NODEVICE); 2219 2220 pos = *bytes_written; 2221 bytes_left = physpath_size - pos; 2222 format = (pos == 0) ? "%s" : " %s"; 2223 2224 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2225 *bytes_written += rsz; 2226 2227 if (rsz >= bytes_left) { 2228 /* if physpath was not copied properly, clear it */ 2229 if (bytes_left != 0) { 2230 physpath[pos] = 0; 2231 } 2232 return (EZFS_NOSPC); 2233 } 2234 return (0); 2235 } 2236 2237 static int 2238 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2239 size_t *rsz, boolean_t is_spare) 2240 { 2241 char *type; 2242 int ret; 2243 2244 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2245 return (EZFS_INVALCONFIG); 2246 2247 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2248 /* 2249 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2250 * For a spare vdev, we only want to boot from the active 2251 * spare device. 2252 */ 2253 if (is_spare) { 2254 uint64_t spare = 0; 2255 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2256 &spare); 2257 if (!spare) 2258 return (EZFS_INVALCONFIG); 2259 } 2260 2261 if (vdev_online(nv)) { 2262 if ((ret = vdev_get_one_physpath(nv, physpath, 2263 phypath_size, rsz)) != 0) 2264 return (ret); 2265 } 2266 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2267 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2268 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2269 nvlist_t **child; 2270 uint_t count; 2271 int i, ret; 2272 2273 if (nvlist_lookup_nvlist_array(nv, 2274 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2275 return (EZFS_INVALCONFIG); 2276 2277 for (i = 0; i < count; i++) { 2278 ret = vdev_get_physpaths(child[i], physpath, 2279 phypath_size, rsz, is_spare); 2280 if (ret == EZFS_NOSPC) 2281 return (ret); 2282 } 2283 } 2284 2285 return (EZFS_POOL_INVALARG); 2286 } 2287 2288 /* 2289 * Get phys_path for a root pool config. 2290 * Return 0 on success; non-zero on failure. 2291 */ 2292 static int 2293 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2294 { 2295 size_t rsz; 2296 nvlist_t *vdev_root; 2297 nvlist_t **child; 2298 uint_t count; 2299 char *type; 2300 2301 rsz = 0; 2302 2303 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2304 &vdev_root) != 0) 2305 return (EZFS_INVALCONFIG); 2306 2307 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2308 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2309 &child, &count) != 0) 2310 return (EZFS_INVALCONFIG); 2311 2312 /* 2313 * root pool can not have EFI labeled disks and can only have 2314 * a single top-level vdev. 2315 */ 2316 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2317 pool_uses_efi(vdev_root)) 2318 return (EZFS_POOL_INVALARG); 2319 2320 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2321 B_FALSE); 2322 2323 /* No online devices */ 2324 if (rsz == 0) 2325 return (EZFS_NODEVICE); 2326 2327 return (0); 2328 } 2329 2330 /* 2331 * Get phys_path for a root pool 2332 * Return 0 on success; non-zero on failure. 2333 */ 2334 int 2335 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2336 { 2337 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2338 phypath_size)); 2339 } 2340 2341 /* 2342 * If the device has being dynamically expanded then we need to relabel 2343 * the disk to use the new unallocated space. 2344 */ 2345 static int 2346 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2347 { 2348 char path[MAXPATHLEN]; 2349 char errbuf[1024]; 2350 int fd, error; 2351 int (*_efi_use_whole_disk)(int); 2352 2353 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2354 "efi_use_whole_disk")) == NULL) 2355 return (-1); 2356 2357 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2358 2359 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2360 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2361 "relabel '%s': unable to open device"), name); 2362 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2363 } 2364 2365 /* 2366 * It's possible that we might encounter an error if the device 2367 * does not have any unallocated space left. If so, we simply 2368 * ignore that error and continue on. 2369 */ 2370 error = _efi_use_whole_disk(fd); 2371 (void) close(fd); 2372 if (error && error != VT_ENOSPC) { 2373 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2374 "relabel '%s': unable to read disk capacity"), name); 2375 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2376 } 2377 return (0); 2378 } 2379 2380 /* 2381 * Bring the specified vdev online. The 'flags' parameter is a set of the 2382 * ZFS_ONLINE_* flags. 2383 */ 2384 int 2385 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2386 vdev_state_t *newstate) 2387 { 2388 zfs_cmd_t zc = { 0 }; 2389 char msg[1024]; 2390 nvlist_t *tgt; 2391 boolean_t avail_spare, l2cache, islog; 2392 libzfs_handle_t *hdl = zhp->zpool_hdl; 2393 2394 if (flags & ZFS_ONLINE_EXPAND) { 2395 (void) snprintf(msg, sizeof (msg), 2396 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2397 } else { 2398 (void) snprintf(msg, sizeof (msg), 2399 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2400 } 2401 2402 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2403 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2404 &islog)) == NULL) 2405 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2406 2407 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2408 2409 if (avail_spare) 2410 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2411 2412 if (flags & ZFS_ONLINE_EXPAND || 2413 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2414 char *pathname = NULL; 2415 uint64_t wholedisk = 0; 2416 2417 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2418 &wholedisk); 2419 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2420 &pathname) == 0); 2421 2422 /* 2423 * XXX - L2ARC 1.0 devices can't support expansion. 2424 */ 2425 if (l2cache) { 2426 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2427 "cannot expand cache devices")); 2428 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2429 } 2430 2431 if (wholedisk) { 2432 pathname += strlen(DISK_ROOT) + 1; 2433 (void) zpool_relabel_disk(hdl, pathname); 2434 } 2435 } 2436 2437 zc.zc_cookie = VDEV_STATE_ONLINE; 2438 zc.zc_obj = flags; 2439 2440 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2441 if (errno == EINVAL) { 2442 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2443 "from this pool into a new one. Use '%s' " 2444 "instead"), "zpool detach"); 2445 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2446 } 2447 return (zpool_standard_error(hdl, errno, msg)); 2448 } 2449 2450 *newstate = zc.zc_cookie; 2451 return (0); 2452 } 2453 2454 /* 2455 * Take the specified vdev offline 2456 */ 2457 int 2458 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2459 { 2460 zfs_cmd_t zc = { 0 }; 2461 char msg[1024]; 2462 nvlist_t *tgt; 2463 boolean_t avail_spare, l2cache; 2464 libzfs_handle_t *hdl = zhp->zpool_hdl; 2465 2466 (void) snprintf(msg, sizeof (msg), 2467 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2468 2469 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2470 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2471 NULL)) == NULL) 2472 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2473 2474 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2475 2476 if (avail_spare) 2477 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2478 2479 zc.zc_cookie = VDEV_STATE_OFFLINE; 2480 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2481 2482 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2483 return (0); 2484 2485 switch (errno) { 2486 case EBUSY: 2487 2488 /* 2489 * There are no other replicas of this device. 2490 */ 2491 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2492 2493 case EEXIST: 2494 /* 2495 * The log device has unplayed logs 2496 */ 2497 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2498 2499 default: 2500 return (zpool_standard_error(hdl, errno, msg)); 2501 } 2502 } 2503 2504 /* 2505 * Mark the given vdev faulted. 2506 */ 2507 int 2508 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2509 { 2510 zfs_cmd_t zc = { 0 }; 2511 char msg[1024]; 2512 libzfs_handle_t *hdl = zhp->zpool_hdl; 2513 2514 (void) snprintf(msg, sizeof (msg), 2515 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2516 2517 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2518 zc.zc_guid = guid; 2519 zc.zc_cookie = VDEV_STATE_FAULTED; 2520 zc.zc_obj = aux; 2521 2522 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2523 return (0); 2524 2525 switch (errno) { 2526 case EBUSY: 2527 2528 /* 2529 * There are no other replicas of this device. 2530 */ 2531 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2532 2533 default: 2534 return (zpool_standard_error(hdl, errno, msg)); 2535 } 2536 2537 } 2538 2539 /* 2540 * Mark the given vdev degraded. 2541 */ 2542 int 2543 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2544 { 2545 zfs_cmd_t zc = { 0 }; 2546 char msg[1024]; 2547 libzfs_handle_t *hdl = zhp->zpool_hdl; 2548 2549 (void) snprintf(msg, sizeof (msg), 2550 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2551 2552 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2553 zc.zc_guid = guid; 2554 zc.zc_cookie = VDEV_STATE_DEGRADED; 2555 zc.zc_obj = aux; 2556 2557 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2558 return (0); 2559 2560 return (zpool_standard_error(hdl, errno, msg)); 2561 } 2562 2563 /* 2564 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2565 * a hot spare. 2566 */ 2567 static boolean_t 2568 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2569 { 2570 nvlist_t **child; 2571 uint_t c, children; 2572 char *type; 2573 2574 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2575 &children) == 0) { 2576 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2577 &type) == 0); 2578 2579 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2580 children == 2 && child[which] == tgt) 2581 return (B_TRUE); 2582 2583 for (c = 0; c < children; c++) 2584 if (is_replacing_spare(child[c], tgt, which)) 2585 return (B_TRUE); 2586 } 2587 2588 return (B_FALSE); 2589 } 2590 2591 /* 2592 * Attach new_disk (fully described by nvroot) to old_disk. 2593 * If 'replacing' is specified, the new disk will replace the old one. 2594 */ 2595 int 2596 zpool_vdev_attach(zpool_handle_t *zhp, 2597 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2598 { 2599 zfs_cmd_t zc = { 0 }; 2600 char msg[1024]; 2601 int ret; 2602 nvlist_t *tgt; 2603 boolean_t avail_spare, l2cache, islog; 2604 uint64_t val; 2605 char *newname; 2606 nvlist_t **child; 2607 uint_t children; 2608 nvlist_t *config_root; 2609 libzfs_handle_t *hdl = zhp->zpool_hdl; 2610 boolean_t rootpool = zpool_is_bootable(zhp); 2611 2612 if (replacing) 2613 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2614 "cannot replace %s with %s"), old_disk, new_disk); 2615 else 2616 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2617 "cannot attach %s to %s"), new_disk, old_disk); 2618 2619 /* 2620 * If this is a root pool, make sure that we're not attaching an 2621 * EFI labeled device. 2622 */ 2623 if (rootpool && pool_uses_efi(nvroot)) { 2624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2625 "EFI labeled devices are not supported on root pools.")); 2626 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2627 } 2628 2629 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2630 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2631 &islog)) == 0) 2632 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2633 2634 if (avail_spare) 2635 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2636 2637 if (l2cache) 2638 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2639 2640 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2641 zc.zc_cookie = replacing; 2642 2643 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2644 &child, &children) != 0 || children != 1) { 2645 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2646 "new device must be a single disk")); 2647 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2648 } 2649 2650 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2651 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2652 2653 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2654 return (-1); 2655 2656 /* 2657 * If the target is a hot spare that has been swapped in, we can only 2658 * replace it with another hot spare. 2659 */ 2660 if (replacing && 2661 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2662 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2663 NULL) == NULL || !avail_spare) && 2664 is_replacing_spare(config_root, tgt, 1)) { 2665 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2666 "can only be replaced by another hot spare")); 2667 free(newname); 2668 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2669 } 2670 2671 free(newname); 2672 2673 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2674 return (-1); 2675 2676 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2677 2678 zcmd_free_nvlists(&zc); 2679 2680 if (ret == 0) { 2681 if (rootpool) { 2682 /* 2683 * XXX need a better way to prevent user from 2684 * booting up a half-baked vdev. 2685 */ 2686 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2687 "sure to wait until resilver is done " 2688 "before rebooting.\n")); 2689 } 2690 return (0); 2691 } 2692 2693 switch (errno) { 2694 case ENOTSUP: 2695 /* 2696 * Can't attach to or replace this type of vdev. 2697 */ 2698 if (replacing) { 2699 uint64_t version = zpool_get_prop_int(zhp, 2700 ZPOOL_PROP_VERSION, NULL); 2701 2702 if (islog) 2703 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2704 "cannot replace a log with a spare")); 2705 else if (version >= SPA_VERSION_MULTI_REPLACE) 2706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2707 "already in replacing/spare config; wait " 2708 "for completion or use 'zpool detach'")); 2709 else 2710 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2711 "cannot replace a replacing device")); 2712 } else { 2713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2714 "can only attach to mirrors and top-level " 2715 "disks")); 2716 } 2717 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2718 break; 2719 2720 case EINVAL: 2721 /* 2722 * The new device must be a single disk. 2723 */ 2724 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2725 "new device must be a single disk")); 2726 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2727 break; 2728 2729 case EBUSY: 2730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2731 new_disk); 2732 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2733 break; 2734 2735 case EOVERFLOW: 2736 /* 2737 * The new device is too small. 2738 */ 2739 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2740 "device is too small")); 2741 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2742 break; 2743 2744 case EDOM: 2745 /* 2746 * The new device has a different alignment requirement. 2747 */ 2748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2749 "devices have different sector alignment")); 2750 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2751 break; 2752 2753 case ENAMETOOLONG: 2754 /* 2755 * The resulting top-level vdev spec won't fit in the label. 2756 */ 2757 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2758 break; 2759 2760 default: 2761 (void) zpool_standard_error(hdl, errno, msg); 2762 } 2763 2764 return (-1); 2765 } 2766 2767 /* 2768 * Detach the specified device. 2769 */ 2770 int 2771 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2772 { 2773 zfs_cmd_t zc = { 0 }; 2774 char msg[1024]; 2775 nvlist_t *tgt; 2776 boolean_t avail_spare, l2cache; 2777 libzfs_handle_t *hdl = zhp->zpool_hdl; 2778 2779 (void) snprintf(msg, sizeof (msg), 2780 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2781 2782 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2783 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2784 NULL)) == 0) 2785 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2786 2787 if (avail_spare) 2788 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2789 2790 if (l2cache) 2791 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2792 2793 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2794 2795 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2796 return (0); 2797 2798 switch (errno) { 2799 2800 case ENOTSUP: 2801 /* 2802 * Can't detach from this type of vdev. 2803 */ 2804 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2805 "applicable to mirror and replacing vdevs")); 2806 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2807 break; 2808 2809 case EBUSY: 2810 /* 2811 * There are no other replicas of this device. 2812 */ 2813 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2814 break; 2815 2816 default: 2817 (void) zpool_standard_error(hdl, errno, msg); 2818 } 2819 2820 return (-1); 2821 } 2822 2823 /* 2824 * Find a mirror vdev in the source nvlist. 2825 * 2826 * The mchild array contains a list of disks in one of the top-level mirrors 2827 * of the source pool. The schild array contains a list of disks that the 2828 * user specified on the command line. We loop over the mchild array to 2829 * see if any entry in the schild array matches. 2830 * 2831 * If a disk in the mchild array is found in the schild array, we return 2832 * the index of that entry. Otherwise we return -1. 2833 */ 2834 static int 2835 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2836 nvlist_t **schild, uint_t schildren) 2837 { 2838 uint_t mc; 2839 2840 for (mc = 0; mc < mchildren; mc++) { 2841 uint_t sc; 2842 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2843 mchild[mc], B_FALSE); 2844 2845 for (sc = 0; sc < schildren; sc++) { 2846 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2847 schild[sc], B_FALSE); 2848 boolean_t result = (strcmp(mpath, spath) == 0); 2849 2850 free(spath); 2851 if (result) { 2852 free(mpath); 2853 return (mc); 2854 } 2855 } 2856 2857 free(mpath); 2858 } 2859 2860 return (-1); 2861 } 2862 2863 /* 2864 * Split a mirror pool. If newroot points to null, then a new nvlist 2865 * is generated and it is the responsibility of the caller to free it. 2866 */ 2867 int 2868 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2869 nvlist_t *props, splitflags_t flags) 2870 { 2871 zfs_cmd_t zc = { 0 }; 2872 char msg[1024]; 2873 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2874 nvlist_t **varray = NULL, *zc_props = NULL; 2875 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2876 libzfs_handle_t *hdl = zhp->zpool_hdl; 2877 uint64_t vers; 2878 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2879 int retval = 0; 2880 2881 (void) snprintf(msg, sizeof (msg), 2882 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2883 2884 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2885 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2886 2887 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2888 (void) fprintf(stderr, gettext("Internal error: unable to " 2889 "retrieve pool configuration\n")); 2890 return (-1); 2891 } 2892 2893 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2894 == 0); 2895 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2896 2897 if (props) { 2898 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2899 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2900 props, vers, flags, msg)) == NULL) 2901 return (-1); 2902 } 2903 2904 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2905 &children) != 0) { 2906 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2907 "Source pool is missing vdev tree")); 2908 if (zc_props) 2909 nvlist_free(zc_props); 2910 return (-1); 2911 } 2912 2913 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2914 vcount = 0; 2915 2916 if (*newroot == NULL || 2917 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2918 &newchild, &newchildren) != 0) 2919 newchildren = 0; 2920 2921 for (c = 0; c < children; c++) { 2922 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2923 char *type; 2924 nvlist_t **mchild, *vdev; 2925 uint_t mchildren; 2926 int entry; 2927 2928 /* 2929 * Unlike cache & spares, slogs are stored in the 2930 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2931 */ 2932 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2933 &is_log); 2934 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2935 &is_hole); 2936 if (is_log || is_hole) { 2937 /* 2938 * Create a hole vdev and put it in the config. 2939 */ 2940 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2941 goto out; 2942 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2943 VDEV_TYPE_HOLE) != 0) 2944 goto out; 2945 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2946 1) != 0) 2947 goto out; 2948 if (lastlog == 0) 2949 lastlog = vcount; 2950 varray[vcount++] = vdev; 2951 continue; 2952 } 2953 lastlog = 0; 2954 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2955 == 0); 2956 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2957 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2958 "Source pool must be composed only of mirrors\n")); 2959 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2960 goto out; 2961 } 2962 2963 verify(nvlist_lookup_nvlist_array(child[c], 2964 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2965 2966 /* find or add an entry for this top-level vdev */ 2967 if (newchildren > 0 && 2968 (entry = find_vdev_entry(zhp, mchild, mchildren, 2969 newchild, newchildren)) >= 0) { 2970 /* We found a disk that the user specified. */ 2971 vdev = mchild[entry]; 2972 ++found; 2973 } else { 2974 /* User didn't specify a disk for this vdev. */ 2975 vdev = mchild[mchildren - 1]; 2976 } 2977 2978 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2979 goto out; 2980 } 2981 2982 /* did we find every disk the user specified? */ 2983 if (found != newchildren) { 2984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2985 "include at most one disk from each mirror")); 2986 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2987 goto out; 2988 } 2989 2990 /* Prepare the nvlist for populating. */ 2991 if (*newroot == NULL) { 2992 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2993 goto out; 2994 freelist = B_TRUE; 2995 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2996 VDEV_TYPE_ROOT) != 0) 2997 goto out; 2998 } else { 2999 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3000 } 3001 3002 /* Add all the children we found */ 3003 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3004 lastlog == 0 ? vcount : lastlog) != 0) 3005 goto out; 3006 3007 /* 3008 * If we're just doing a dry run, exit now with success. 3009 */ 3010 if (flags.dryrun) { 3011 memory_err = B_FALSE; 3012 freelist = B_FALSE; 3013 goto out; 3014 } 3015 3016 /* now build up the config list & call the ioctl */ 3017 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3018 goto out; 3019 3020 if (nvlist_add_nvlist(newconfig, 3021 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3022 nvlist_add_string(newconfig, 3023 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3024 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3025 goto out; 3026 3027 /* 3028 * The new pool is automatically part of the namespace unless we 3029 * explicitly export it. 3030 */ 3031 if (!flags.import) 3032 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3033 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3034 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3035 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3036 goto out; 3037 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3038 goto out; 3039 3040 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3041 retval = zpool_standard_error(hdl, errno, msg); 3042 goto out; 3043 } 3044 3045 freelist = B_FALSE; 3046 memory_err = B_FALSE; 3047 3048 out: 3049 if (varray != NULL) { 3050 int v; 3051 3052 for (v = 0; v < vcount; v++) 3053 nvlist_free(varray[v]); 3054 free(varray); 3055 } 3056 zcmd_free_nvlists(&zc); 3057 if (zc_props) 3058 nvlist_free(zc_props); 3059 if (newconfig) 3060 nvlist_free(newconfig); 3061 if (freelist) { 3062 nvlist_free(*newroot); 3063 *newroot = NULL; 3064 } 3065 3066 if (retval != 0) 3067 return (retval); 3068 3069 if (memory_err) 3070 return (no_memory(hdl)); 3071 3072 return (0); 3073 } 3074 3075 /* 3076 * Remove the given device. Currently, this is supported only for hot spares 3077 * and level 2 cache devices. 3078 */ 3079 int 3080 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3081 { 3082 zfs_cmd_t zc = { 0 }; 3083 char msg[1024]; 3084 nvlist_t *tgt; 3085 boolean_t avail_spare, l2cache, islog; 3086 libzfs_handle_t *hdl = zhp->zpool_hdl; 3087 uint64_t version; 3088 3089 (void) snprintf(msg, sizeof (msg), 3090 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3091 3092 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3093 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3094 &islog)) == 0) 3095 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3096 /* 3097 * XXX - this should just go away. 3098 */ 3099 if (!avail_spare && !l2cache && !islog) { 3100 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3101 "only inactive hot spares, cache, top-level, " 3102 "or log devices can be removed")); 3103 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3104 } 3105 3106 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3107 if (islog && version < SPA_VERSION_HOLES) { 3108 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3109 "pool must be upgrade to support log removal")); 3110 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3111 } 3112 3113 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3114 3115 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3116 return (0); 3117 3118 return (zpool_standard_error(hdl, errno, msg)); 3119 } 3120 3121 /* 3122 * Clear the errors for the pool, or the particular device if specified. 3123 */ 3124 int 3125 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3126 { 3127 zfs_cmd_t zc = { 0 }; 3128 char msg[1024]; 3129 nvlist_t *tgt; 3130 zpool_rewind_policy_t policy; 3131 boolean_t avail_spare, l2cache; 3132 libzfs_handle_t *hdl = zhp->zpool_hdl; 3133 nvlist_t *nvi = NULL; 3134 int error; 3135 3136 if (path) 3137 (void) snprintf(msg, sizeof (msg), 3138 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3139 path); 3140 else 3141 (void) snprintf(msg, sizeof (msg), 3142 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3143 zhp->zpool_name); 3144 3145 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3146 if (path) { 3147 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3148 &l2cache, NULL)) == 0) 3149 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3150 3151 /* 3152 * Don't allow error clearing for hot spares. Do allow 3153 * error clearing for l2cache devices. 3154 */ 3155 if (avail_spare) 3156 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3157 3158 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3159 &zc.zc_guid) == 0); 3160 } 3161 3162 zpool_get_rewind_policy(rewindnvl, &policy); 3163 zc.zc_cookie = policy.zrp_request; 3164 3165 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3166 return (-1); 3167 3168 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3169 return (-1); 3170 3171 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3172 errno == ENOMEM) { 3173 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3174 zcmd_free_nvlists(&zc); 3175 return (-1); 3176 } 3177 } 3178 3179 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3180 errno != EPERM && errno != EACCES)) { 3181 if (policy.zrp_request & 3182 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3183 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3184 zpool_rewind_exclaim(hdl, zc.zc_name, 3185 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3186 nvi); 3187 nvlist_free(nvi); 3188 } 3189 zcmd_free_nvlists(&zc); 3190 return (0); 3191 } 3192 3193 zcmd_free_nvlists(&zc); 3194 return (zpool_standard_error(hdl, errno, msg)); 3195 } 3196 3197 /* 3198 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3199 */ 3200 int 3201 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3202 { 3203 zfs_cmd_t zc = { 0 }; 3204 char msg[1024]; 3205 libzfs_handle_t *hdl = zhp->zpool_hdl; 3206 3207 (void) snprintf(msg, sizeof (msg), 3208 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3209 guid); 3210 3211 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3212 zc.zc_guid = guid; 3213 zc.zc_cookie = ZPOOL_NO_REWIND; 3214 3215 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3216 return (0); 3217 3218 return (zpool_standard_error(hdl, errno, msg)); 3219 } 3220 3221 /* 3222 * Change the GUID for a pool. 3223 */ 3224 int 3225 zpool_reguid(zpool_handle_t *zhp) 3226 { 3227 char msg[1024]; 3228 libzfs_handle_t *hdl = zhp->zpool_hdl; 3229 zfs_cmd_t zc = { 0 }; 3230 3231 (void) snprintf(msg, sizeof (msg), 3232 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3233 3234 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3235 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3236 return (0); 3237 3238 return (zpool_standard_error(hdl, errno, msg)); 3239 } 3240 3241 /* 3242 * Reopen the pool. 3243 */ 3244 int 3245 zpool_reopen(zpool_handle_t *zhp) 3246 { 3247 zfs_cmd_t zc = { 0 }; 3248 char msg[1024]; 3249 libzfs_handle_t *hdl = zhp->zpool_hdl; 3250 3251 (void) snprintf(msg, sizeof (msg), 3252 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3253 zhp->zpool_name); 3254 3255 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3256 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3257 return (0); 3258 return (zpool_standard_error(hdl, errno, msg)); 3259 } 3260 3261 /* 3262 * Convert from a devid string to a path. 3263 */ 3264 static char * 3265 devid_to_path(char *devid_str) 3266 { 3267 ddi_devid_t devid; 3268 char *minor; 3269 char *path; 3270 devid_nmlist_t *list = NULL; 3271 int ret; 3272 3273 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3274 return (NULL); 3275 3276 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3277 3278 devid_str_free(minor); 3279 devid_free(devid); 3280 3281 if (ret != 0) 3282 return (NULL); 3283 3284 if ((path = strdup(list[0].devname)) == NULL) 3285 return (NULL); 3286 3287 devid_free_nmlist(list); 3288 3289 return (path); 3290 } 3291 3292 /* 3293 * Convert from a path to a devid string. 3294 */ 3295 static char * 3296 path_to_devid(const char *path) 3297 { 3298 int fd; 3299 ddi_devid_t devid; 3300 char *minor, *ret; 3301 3302 if ((fd = open(path, O_RDONLY)) < 0) 3303 return (NULL); 3304 3305 minor = NULL; 3306 ret = NULL; 3307 if (devid_get(fd, &devid) == 0) { 3308 if (devid_get_minor_name(fd, &minor) == 0) 3309 ret = devid_str_encode(devid, minor); 3310 if (minor != NULL) 3311 devid_str_free(minor); 3312 devid_free(devid); 3313 } 3314 (void) close(fd); 3315 3316 return (ret); 3317 } 3318 3319 /* 3320 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3321 * ignore any failure here, since a common case is for an unprivileged user to 3322 * type 'zpool status', and we'll display the correct information anyway. 3323 */ 3324 static void 3325 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3326 { 3327 zfs_cmd_t zc = { 0 }; 3328 3329 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3330 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3331 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3332 &zc.zc_guid) == 0); 3333 3334 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3335 } 3336 3337 /* 3338 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3339 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3340 * We also check if this is a whole disk, in which case we strip off the 3341 * trailing 's0' slice name. 3342 * 3343 * This routine is also responsible for identifying when disks have been 3344 * reconfigured in a new location. The kernel will have opened the device by 3345 * devid, but the path will still refer to the old location. To catch this, we 3346 * first do a path -> devid translation (which is fast for the common case). If 3347 * the devid matches, we're done. If not, we do a reverse devid -> path 3348 * translation and issue the appropriate ioctl() to update the path of the vdev. 3349 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3350 * of these checks. 3351 */ 3352 char * 3353 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3354 boolean_t verbose) 3355 { 3356 char *path, *devid; 3357 uint64_t value; 3358 char buf[64]; 3359 vdev_stat_t *vs; 3360 uint_t vsc; 3361 3362 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3363 &value) == 0) { 3364 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3365 &value) == 0); 3366 (void) snprintf(buf, sizeof (buf), "%llu", 3367 (u_longlong_t)value); 3368 path = buf; 3369 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3370 3371 /* 3372 * If the device is dead (faulted, offline, etc) then don't 3373 * bother opening it. Otherwise we may be forcing the user to 3374 * open a misbehaving device, which can have undesirable 3375 * effects. 3376 */ 3377 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3378 (uint64_t **)&vs, &vsc) != 0 || 3379 vs->vs_state >= VDEV_STATE_DEGRADED) && 3380 zhp != NULL && 3381 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3382 /* 3383 * Determine if the current path is correct. 3384 */ 3385 char *newdevid = path_to_devid(path); 3386 3387 if (newdevid == NULL || 3388 strcmp(devid, newdevid) != 0) { 3389 char *newpath; 3390 3391 if ((newpath = devid_to_path(devid)) != NULL) { 3392 /* 3393 * Update the path appropriately. 3394 */ 3395 set_path(zhp, nv, newpath); 3396 if (nvlist_add_string(nv, 3397 ZPOOL_CONFIG_PATH, newpath) == 0) 3398 verify(nvlist_lookup_string(nv, 3399 ZPOOL_CONFIG_PATH, 3400 &path) == 0); 3401 free(newpath); 3402 } 3403 } 3404 3405 if (newdevid) 3406 devid_str_free(newdevid); 3407 } 3408 3409 if (strncmp(path, "/dev/dsk/", 9) == 0) 3410 path += 9; 3411 3412 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3413 &value) == 0 && value) { 3414 int pathlen = strlen(path); 3415 char *tmp = zfs_strdup(hdl, path); 3416 3417 /* 3418 * If it starts with c#, and ends with "s0", chop 3419 * the "s0" off, or if it ends with "s0/old", remove 3420 * the "s0" from the middle. 3421 */ 3422 if (CTD_CHECK(tmp)) { 3423 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3424 tmp[pathlen - 2] = '\0'; 3425 } else if (pathlen > 6 && 3426 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3427 (void) strcpy(&tmp[pathlen - 6], 3428 "/old"); 3429 } 3430 } 3431 return (tmp); 3432 } 3433 } else { 3434 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3435 3436 /* 3437 * If it's a raidz device, we need to stick in the parity level. 3438 */ 3439 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3440 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3441 &value) == 0); 3442 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3443 (u_longlong_t)value); 3444 path = buf; 3445 } 3446 3447 /* 3448 * We identify each top-level vdev by using a <type-id> 3449 * naming convention. 3450 */ 3451 if (verbose) { 3452 uint64_t id; 3453 3454 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3455 &id) == 0); 3456 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3457 (u_longlong_t)id); 3458 path = buf; 3459 } 3460 } 3461 3462 return (zfs_strdup(hdl, path)); 3463 } 3464 3465 static int 3466 zbookmark_compare(const void *a, const void *b) 3467 { 3468 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3469 } 3470 3471 /* 3472 * Retrieve the persistent error log, uniquify the members, and return to the 3473 * caller. 3474 */ 3475 int 3476 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3477 { 3478 zfs_cmd_t zc = { 0 }; 3479 uint64_t count; 3480 zbookmark_phys_t *zb = NULL; 3481 int i; 3482 3483 /* 3484 * Retrieve the raw error list from the kernel. If the number of errors 3485 * has increased, allocate more space and continue until we get the 3486 * entire list. 3487 */ 3488 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3489 &count) == 0); 3490 if (count == 0) 3491 return (0); 3492 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3493 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3494 return (-1); 3495 zc.zc_nvlist_dst_size = count; 3496 (void) strcpy(zc.zc_name, zhp->zpool_name); 3497 for (;;) { 3498 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3499 &zc) != 0) { 3500 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3501 if (errno == ENOMEM) { 3502 void *dst; 3503 3504 count = zc.zc_nvlist_dst_size; 3505 dst = zfs_alloc(zhp->zpool_hdl, count * 3506 sizeof (zbookmark_phys_t)); 3507 if (dst == NULL) 3508 return (-1); 3509 zc.zc_nvlist_dst = (uintptr_t)dst; 3510 } else { 3511 return (-1); 3512 } 3513 } else { 3514 break; 3515 } 3516 } 3517 3518 /* 3519 * Sort the resulting bookmarks. This is a little confusing due to the 3520 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3521 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3522 * _not_ copied as part of the process. So we point the start of our 3523 * array appropriate and decrement the total number of elements. 3524 */ 3525 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3526 zc.zc_nvlist_dst_size; 3527 count -= zc.zc_nvlist_dst_size; 3528 3529 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_compare); 3530 3531 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3532 3533 /* 3534 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3535 */ 3536 for (i = 0; i < count; i++) { 3537 nvlist_t *nv; 3538 3539 /* ignoring zb_blkid and zb_level for now */ 3540 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3541 zb[i-1].zb_object == zb[i].zb_object) 3542 continue; 3543 3544 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3545 goto nomem; 3546 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3547 zb[i].zb_objset) != 0) { 3548 nvlist_free(nv); 3549 goto nomem; 3550 } 3551 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3552 zb[i].zb_object) != 0) { 3553 nvlist_free(nv); 3554 goto nomem; 3555 } 3556 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3557 nvlist_free(nv); 3558 goto nomem; 3559 } 3560 nvlist_free(nv); 3561 } 3562 3563 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3564 return (0); 3565 3566 nomem: 3567 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3568 return (no_memory(zhp->zpool_hdl)); 3569 } 3570 3571 /* 3572 * Upgrade a ZFS pool to the latest on-disk version. 3573 */ 3574 int 3575 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3576 { 3577 zfs_cmd_t zc = { 0 }; 3578 libzfs_handle_t *hdl = zhp->zpool_hdl; 3579 3580 (void) strcpy(zc.zc_name, zhp->zpool_name); 3581 zc.zc_cookie = new_version; 3582 3583 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3584 return (zpool_standard_error_fmt(hdl, errno, 3585 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3586 zhp->zpool_name)); 3587 return (0); 3588 } 3589 3590 void 3591 zfs_save_arguments(int argc, char **argv, char *string, int len) 3592 { 3593 (void) strlcpy(string, basename(argv[0]), len); 3594 for (int i = 1; i < argc; i++) { 3595 (void) strlcat(string, " ", len); 3596 (void) strlcat(string, argv[i], len); 3597 } 3598 } 3599 3600 int 3601 zpool_log_history(libzfs_handle_t *hdl, const char *message) 3602 { 3603 zfs_cmd_t zc = { 0 }; 3604 nvlist_t *args; 3605 int err; 3606 3607 args = fnvlist_alloc(); 3608 fnvlist_add_string(args, "message", message); 3609 err = zcmd_write_src_nvlist(hdl, &zc, args); 3610 if (err == 0) 3611 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3612 nvlist_free(args); 3613 zcmd_free_nvlists(&zc); 3614 return (err); 3615 } 3616 3617 /* 3618 * Perform ioctl to get some command history of a pool. 3619 * 3620 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3621 * logical offset of the history buffer to start reading from. 3622 * 3623 * Upon return, 'off' is the next logical offset to read from and 3624 * 'len' is the actual amount of bytes read into 'buf'. 3625 */ 3626 static int 3627 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3628 { 3629 zfs_cmd_t zc = { 0 }; 3630 libzfs_handle_t *hdl = zhp->zpool_hdl; 3631 3632 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3633 3634 zc.zc_history = (uint64_t)(uintptr_t)buf; 3635 zc.zc_history_len = *len; 3636 zc.zc_history_offset = *off; 3637 3638 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3639 switch (errno) { 3640 case EPERM: 3641 return (zfs_error_fmt(hdl, EZFS_PERM, 3642 dgettext(TEXT_DOMAIN, 3643 "cannot show history for pool '%s'"), 3644 zhp->zpool_name)); 3645 case ENOENT: 3646 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3647 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3648 "'%s'"), zhp->zpool_name)); 3649 case ENOTSUP: 3650 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3651 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3652 "'%s', pool must be upgraded"), zhp->zpool_name)); 3653 default: 3654 return (zpool_standard_error_fmt(hdl, errno, 3655 dgettext(TEXT_DOMAIN, 3656 "cannot get history for '%s'"), zhp->zpool_name)); 3657 } 3658 } 3659 3660 *len = zc.zc_history_len; 3661 *off = zc.zc_history_offset; 3662 3663 return (0); 3664 } 3665 3666 /* 3667 * Process the buffer of nvlists, unpacking and storing each nvlist record 3668 * into 'records'. 'leftover' is set to the number of bytes that weren't 3669 * processed as there wasn't a complete record. 3670 */ 3671 int 3672 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3673 nvlist_t ***records, uint_t *numrecords) 3674 { 3675 uint64_t reclen; 3676 nvlist_t *nv; 3677 int i; 3678 3679 while (bytes_read > sizeof (reclen)) { 3680 3681 /* get length of packed record (stored as little endian) */ 3682 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3683 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3684 3685 if (bytes_read < sizeof (reclen) + reclen) 3686 break; 3687 3688 /* unpack record */ 3689 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3690 return (ENOMEM); 3691 bytes_read -= sizeof (reclen) + reclen; 3692 buf += sizeof (reclen) + reclen; 3693 3694 /* add record to nvlist array */ 3695 (*numrecords)++; 3696 if (ISP2(*numrecords + 1)) { 3697 *records = realloc(*records, 3698 *numrecords * 2 * sizeof (nvlist_t *)); 3699 } 3700 (*records)[*numrecords - 1] = nv; 3701 } 3702 3703 *leftover = bytes_read; 3704 return (0); 3705 } 3706 3707 /* 3708 * Retrieve the command history of a pool. 3709 */ 3710 int 3711 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3712 { 3713 char *buf; 3714 int buflen = 128 * 1024; 3715 uint64_t off = 0; 3716 nvlist_t **records = NULL; 3717 uint_t numrecords = 0; 3718 int err, i; 3719 3720 buf = malloc(buflen); 3721 if (buf == NULL) 3722 return (ENOMEM); 3723 do { 3724 uint64_t bytes_read = buflen; 3725 uint64_t leftover; 3726 3727 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3728 break; 3729 3730 /* if nothing else was read in, we're at EOF, just return */ 3731 if (!bytes_read) 3732 break; 3733 3734 if ((err = zpool_history_unpack(buf, bytes_read, 3735 &leftover, &records, &numrecords)) != 0) 3736 break; 3737 off -= leftover; 3738 if (leftover == bytes_read) { 3739 /* 3740 * no progress made, because buffer is not big enough 3741 * to hold this record; resize and retry. 3742 */ 3743 buflen *= 2; 3744 free(buf); 3745 buf = malloc(buflen); 3746 if (buf == NULL) 3747 return (ENOMEM); 3748 } 3749 3750 /* CONSTCOND */ 3751 } while (1); 3752 3753 free(buf); 3754 3755 if (!err) { 3756 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3757 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3758 records, numrecords) == 0); 3759 } 3760 for (i = 0; i < numrecords; i++) 3761 nvlist_free(records[i]); 3762 free(records); 3763 3764 return (err); 3765 } 3766 3767 void 3768 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3769 char *pathname, size_t len) 3770 { 3771 zfs_cmd_t zc = { 0 }; 3772 boolean_t mounted = B_FALSE; 3773 char *mntpnt = NULL; 3774 char dsname[MAXNAMELEN]; 3775 3776 if (dsobj == 0) { 3777 /* special case for the MOS */ 3778 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3779 return; 3780 } 3781 3782 /* get the dataset's name */ 3783 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3784 zc.zc_obj = dsobj; 3785 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3786 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3787 /* just write out a path of two object numbers */ 3788 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3789 dsobj, obj); 3790 return; 3791 } 3792 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3793 3794 /* find out if the dataset is mounted */ 3795 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3796 3797 /* get the corrupted object's path */ 3798 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3799 zc.zc_obj = obj; 3800 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3801 &zc) == 0) { 3802 if (mounted) { 3803 (void) snprintf(pathname, len, "%s%s", mntpnt, 3804 zc.zc_value); 3805 } else { 3806 (void) snprintf(pathname, len, "%s:%s", 3807 dsname, zc.zc_value); 3808 } 3809 } else { 3810 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3811 } 3812 free(mntpnt); 3813 } 3814 3815 /* 3816 * Read the EFI label from the config, if a label does not exist then 3817 * pass back the error to the caller. If the caller has passed a non-NULL 3818 * diskaddr argument then we set it to the starting address of the EFI 3819 * partition. 3820 */ 3821 static int 3822 read_efi_label(nvlist_t *config, diskaddr_t *sb) 3823 { 3824 char *path; 3825 int fd; 3826 char diskname[MAXPATHLEN]; 3827 int err = -1; 3828 3829 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3830 return (err); 3831 3832 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3833 strrchr(path, '/')); 3834 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3835 struct dk_gpt *vtoc; 3836 3837 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3838 if (sb != NULL) 3839 *sb = vtoc->efi_parts[0].p_start; 3840 efi_free(vtoc); 3841 } 3842 (void) close(fd); 3843 } 3844 return (err); 3845 } 3846 3847 /* 3848 * determine where a partition starts on a disk in the current 3849 * configuration 3850 */ 3851 static diskaddr_t 3852 find_start_block(nvlist_t *config) 3853 { 3854 nvlist_t **child; 3855 uint_t c, children; 3856 diskaddr_t sb = MAXOFFSET_T; 3857 uint64_t wholedisk; 3858 3859 if (nvlist_lookup_nvlist_array(config, 3860 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3861 if (nvlist_lookup_uint64(config, 3862 ZPOOL_CONFIG_WHOLE_DISK, 3863 &wholedisk) != 0 || !wholedisk) { 3864 return (MAXOFFSET_T); 3865 } 3866 if (read_efi_label(config, &sb) < 0) 3867 sb = MAXOFFSET_T; 3868 return (sb); 3869 } 3870 3871 for (c = 0; c < children; c++) { 3872 sb = find_start_block(child[c]); 3873 if (sb != MAXOFFSET_T) { 3874 return (sb); 3875 } 3876 } 3877 return (MAXOFFSET_T); 3878 } 3879 3880 /* 3881 * Label an individual disk. The name provided is the short name, 3882 * stripped of any leading /dev path. 3883 */ 3884 int 3885 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) 3886 { 3887 char path[MAXPATHLEN]; 3888 struct dk_gpt *vtoc; 3889 int fd; 3890 size_t resv = EFI_MIN_RESV_SIZE; 3891 uint64_t slice_size; 3892 diskaddr_t start_block; 3893 char errbuf[1024]; 3894 3895 /* prepare an error message just in case */ 3896 (void) snprintf(errbuf, sizeof (errbuf), 3897 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3898 3899 if (zhp) { 3900 nvlist_t *nvroot; 3901 3902 if (zpool_is_bootable(zhp)) { 3903 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3904 "EFI labeled devices are not supported on root " 3905 "pools.")); 3906 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3907 } 3908 3909 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3910 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3911 3912 if (zhp->zpool_start_block == 0) 3913 start_block = find_start_block(nvroot); 3914 else 3915 start_block = zhp->zpool_start_block; 3916 zhp->zpool_start_block = start_block; 3917 } else { 3918 /* new pool */ 3919 start_block = NEW_START_BLOCK; 3920 } 3921 3922 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3923 BACKUP_SLICE); 3924 3925 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3926 /* 3927 * This shouldn't happen. We've long since verified that this 3928 * is a valid device. 3929 */ 3930 zfs_error_aux(hdl, 3931 dgettext(TEXT_DOMAIN, "unable to open device")); 3932 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3933 } 3934 3935 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3936 /* 3937 * The only way this can fail is if we run out of memory, or we 3938 * were unable to read the disk's capacity 3939 */ 3940 if (errno == ENOMEM) 3941 (void) no_memory(hdl); 3942 3943 (void) close(fd); 3944 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3945 "unable to read disk capacity"), name); 3946 3947 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3948 } 3949 3950 slice_size = vtoc->efi_last_u_lba + 1; 3951 slice_size -= EFI_MIN_RESV_SIZE; 3952 if (start_block == MAXOFFSET_T) 3953 start_block = NEW_START_BLOCK; 3954 slice_size -= start_block; 3955 3956 vtoc->efi_parts[0].p_start = start_block; 3957 vtoc->efi_parts[0].p_size = slice_size; 3958 3959 /* 3960 * Why we use V_USR: V_BACKUP confuses users, and is considered 3961 * disposable by some EFI utilities (since EFI doesn't have a backup 3962 * slice). V_UNASSIGNED is supposed to be used only for zero size 3963 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3964 * etc. were all pretty specific. V_USR is as close to reality as we 3965 * can get, in the absence of V_OTHER. 3966 */ 3967 vtoc->efi_parts[0].p_tag = V_USR; 3968 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3969 3970 vtoc->efi_parts[8].p_start = slice_size + start_block; 3971 vtoc->efi_parts[8].p_size = resv; 3972 vtoc->efi_parts[8].p_tag = V_RESERVED; 3973 3974 if (efi_write(fd, vtoc) != 0) { 3975 /* 3976 * Some block drivers (like pcata) may not support EFI 3977 * GPT labels. Print out a helpful error message dir- 3978 * ecting the user to manually label the disk and give 3979 * a specific slice. 3980 */ 3981 (void) close(fd); 3982 efi_free(vtoc); 3983 3984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3985 "try using fdisk(1M) and then provide a specific slice")); 3986 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3987 } 3988 3989 (void) close(fd); 3990 efi_free(vtoc); 3991 return (0); 3992 } 3993 3994 static boolean_t 3995 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3996 { 3997 char *type; 3998 nvlist_t **child; 3999 uint_t children, c; 4000 4001 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4002 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4003 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4004 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4005 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4006 "vdev type '%s' is not supported"), type); 4007 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4008 return (B_FALSE); 4009 } 4010 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4011 &child, &children) == 0) { 4012 for (c = 0; c < children; c++) { 4013 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4014 return (B_FALSE); 4015 } 4016 } 4017 return (B_TRUE); 4018 } 4019 4020 /* 4021 * Check if this zvol is allowable for use as a dump device; zero if 4022 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4023 * 4024 * Allowable storage configurations include mirrors, all raidz variants, and 4025 * pools with log, cache, and spare devices. Pools which are backed by files or 4026 * have missing/hole vdevs are not suitable. 4027 */ 4028 int 4029 zvol_check_dump_config(char *arg) 4030 { 4031 zpool_handle_t *zhp = NULL; 4032 nvlist_t *config, *nvroot; 4033 char *p, *volname; 4034 nvlist_t **top; 4035 uint_t toplevels; 4036 libzfs_handle_t *hdl; 4037 char errbuf[1024]; 4038 char poolname[ZPOOL_MAXNAMELEN]; 4039 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4040 int ret = 1; 4041 4042 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4043 return (-1); 4044 } 4045 4046 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4047 "dump is not supported on device '%s'"), arg); 4048 4049 if ((hdl = libzfs_init()) == NULL) 4050 return (1); 4051 libzfs_print_on_error(hdl, B_TRUE); 4052 4053 volname = arg + pathlen; 4054 4055 /* check the configuration of the pool */ 4056 if ((p = strchr(volname, '/')) == NULL) { 4057 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4058 "malformed dataset name")); 4059 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4060 return (1); 4061 } else if (p - volname >= ZFS_MAXNAMELEN) { 4062 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4063 "dataset name is too long")); 4064 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4065 return (1); 4066 } else { 4067 (void) strncpy(poolname, volname, p - volname); 4068 poolname[p - volname] = '\0'; 4069 } 4070 4071 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4072 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4073 "could not open pool '%s'"), poolname); 4074 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4075 goto out; 4076 } 4077 config = zpool_get_config(zhp, NULL); 4078 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4079 &nvroot) != 0) { 4080 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4081 "could not obtain vdev configuration for '%s'"), poolname); 4082 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4083 goto out; 4084 } 4085 4086 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4087 &top, &toplevels) == 0); 4088 4089 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4090 goto out; 4091 } 4092 ret = 0; 4093 4094 out: 4095 if (zhp) 4096 zpool_close(zhp); 4097 libzfs_fini(hdl); 4098 return (ret); 4099 } 4100