1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright 2016 Nexenta Systems, Inc. 27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 28 */ 29 30 #include <ctype.h> 31 #include <errno.h> 32 #include <devid.h> 33 #include <fcntl.h> 34 #include <libintl.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <strings.h> 38 #include <unistd.h> 39 #include <libgen.h> 40 #include <sys/efi_partition.h> 41 #include <sys/vtoc.h> 42 #include <sys/zfs_ioctl.h> 43 #include <dlfcn.h> 44 45 #include "zfs_namecheck.h" 46 #include "zfs_prop.h" 47 #include "libzfs_impl.h" 48 #include "zfs_comutil.h" 49 #include "zfeature_common.h" 50 51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 52 53 #define BACKUP_SLICE "s2" 54 55 typedef struct prop_flags { 56 int create:1; /* Validate property on creation */ 57 int import:1; /* Validate property on import */ 58 } prop_flags_t; 59 60 /* 61 * ==================================================================== 62 * zpool property functions 63 * ==================================================================== 64 */ 65 66 static int 67 zpool_get_all_props(zpool_handle_t *zhp) 68 { 69 zfs_cmd_t zc = { 0 }; 70 libzfs_handle_t *hdl = zhp->zpool_hdl; 71 72 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 73 74 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 75 return (-1); 76 77 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 78 if (errno == ENOMEM) { 79 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 80 zcmd_free_nvlists(&zc); 81 return (-1); 82 } 83 } else { 84 zcmd_free_nvlists(&zc); 85 return (-1); 86 } 87 } 88 89 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 90 zcmd_free_nvlists(&zc); 91 return (-1); 92 } 93 94 zcmd_free_nvlists(&zc); 95 96 return (0); 97 } 98 99 static int 100 zpool_props_refresh(zpool_handle_t *zhp) 101 { 102 nvlist_t *old_props; 103 104 old_props = zhp->zpool_props; 105 106 if (zpool_get_all_props(zhp) != 0) 107 return (-1); 108 109 nvlist_free(old_props); 110 return (0); 111 } 112 113 static char * 114 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 115 zprop_source_t *src) 116 { 117 nvlist_t *nv, *nvl; 118 uint64_t ival; 119 char *value; 120 zprop_source_t source; 121 122 nvl = zhp->zpool_props; 123 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 124 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 125 source = ival; 126 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 127 } else { 128 source = ZPROP_SRC_DEFAULT; 129 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 130 value = "-"; 131 } 132 133 if (src) 134 *src = source; 135 136 return (value); 137 } 138 139 uint64_t 140 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 141 { 142 nvlist_t *nv, *nvl; 143 uint64_t value; 144 zprop_source_t source; 145 146 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 147 /* 148 * zpool_get_all_props() has most likely failed because 149 * the pool is faulted, but if all we need is the top level 150 * vdev's guid then get it from the zhp config nvlist. 151 */ 152 if ((prop == ZPOOL_PROP_GUID) && 153 (nvlist_lookup_nvlist(zhp->zpool_config, 154 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 155 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 156 == 0)) { 157 return (value); 158 } 159 return (zpool_prop_default_numeric(prop)); 160 } 161 162 nvl = zhp->zpool_props; 163 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 164 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 165 source = value; 166 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 167 } else { 168 source = ZPROP_SRC_DEFAULT; 169 value = zpool_prop_default_numeric(prop); 170 } 171 172 if (src) 173 *src = source; 174 175 return (value); 176 } 177 178 /* 179 * Map VDEV STATE to printed strings. 180 */ 181 const char * 182 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 183 { 184 switch (state) { 185 case VDEV_STATE_CLOSED: 186 case VDEV_STATE_OFFLINE: 187 return (gettext("OFFLINE")); 188 case VDEV_STATE_REMOVED: 189 return (gettext("REMOVED")); 190 case VDEV_STATE_CANT_OPEN: 191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 192 return (gettext("FAULTED")); 193 else if (aux == VDEV_AUX_SPLIT_POOL) 194 return (gettext("SPLIT")); 195 else 196 return (gettext("UNAVAIL")); 197 case VDEV_STATE_FAULTED: 198 return (gettext("FAULTED")); 199 case VDEV_STATE_DEGRADED: 200 return (gettext("DEGRADED")); 201 case VDEV_STATE_HEALTHY: 202 return (gettext("ONLINE")); 203 204 default: 205 break; 206 } 207 208 return (gettext("UNKNOWN")); 209 } 210 211 /* 212 * Map POOL STATE to printed strings. 213 */ 214 const char * 215 zpool_pool_state_to_name(pool_state_t state) 216 { 217 switch (state) { 218 case POOL_STATE_ACTIVE: 219 return (gettext("ACTIVE")); 220 case POOL_STATE_EXPORTED: 221 return (gettext("EXPORTED")); 222 case POOL_STATE_DESTROYED: 223 return (gettext("DESTROYED")); 224 case POOL_STATE_SPARE: 225 return (gettext("SPARE")); 226 case POOL_STATE_L2CACHE: 227 return (gettext("L2CACHE")); 228 case POOL_STATE_UNINITIALIZED: 229 return (gettext("UNINITIALIZED")); 230 case POOL_STATE_UNAVAIL: 231 return (gettext("UNAVAIL")); 232 case POOL_STATE_POTENTIALLY_ACTIVE: 233 return (gettext("POTENTIALLY_ACTIVE")); 234 } 235 236 return (gettext("UNKNOWN")); 237 } 238 239 /* 240 * Get a zpool property value for 'prop' and return the value in 241 * a pre-allocated buffer. 242 */ 243 int 244 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 245 zprop_source_t *srctype, boolean_t literal) 246 { 247 uint64_t intval; 248 const char *strval; 249 zprop_source_t src = ZPROP_SRC_NONE; 250 nvlist_t *nvroot; 251 vdev_stat_t *vs; 252 uint_t vsc; 253 254 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 255 switch (prop) { 256 case ZPOOL_PROP_NAME: 257 (void) strlcpy(buf, zpool_get_name(zhp), len); 258 break; 259 260 case ZPOOL_PROP_HEALTH: 261 (void) strlcpy(buf, "FAULTED", len); 262 break; 263 264 case ZPOOL_PROP_GUID: 265 intval = zpool_get_prop_int(zhp, prop, &src); 266 (void) snprintf(buf, len, "%llu", intval); 267 break; 268 269 case ZPOOL_PROP_ALTROOT: 270 case ZPOOL_PROP_CACHEFILE: 271 case ZPOOL_PROP_COMMENT: 272 if (zhp->zpool_props != NULL || 273 zpool_get_all_props(zhp) == 0) { 274 (void) strlcpy(buf, 275 zpool_get_prop_string(zhp, prop, &src), 276 len); 277 break; 278 } 279 /* FALLTHROUGH */ 280 default: 281 (void) strlcpy(buf, "-", len); 282 break; 283 } 284 285 if (srctype != NULL) 286 *srctype = src; 287 return (0); 288 } 289 290 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 291 prop != ZPOOL_PROP_NAME) 292 return (-1); 293 294 switch (zpool_prop_get_type(prop)) { 295 case PROP_TYPE_STRING: 296 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 297 len); 298 break; 299 300 case PROP_TYPE_NUMBER: 301 intval = zpool_get_prop_int(zhp, prop, &src); 302 303 switch (prop) { 304 case ZPOOL_PROP_SIZE: 305 case ZPOOL_PROP_ALLOCATED: 306 case ZPOOL_PROP_FREE: 307 case ZPOOL_PROP_FREEING: 308 case ZPOOL_PROP_LEAKED: 309 if (literal) { 310 (void) snprintf(buf, len, "%llu", 311 (u_longlong_t)intval); 312 } else { 313 (void) zfs_nicenum(intval, buf, len); 314 } 315 break; 316 case ZPOOL_PROP_EXPANDSZ: 317 if (intval == 0) { 318 (void) strlcpy(buf, "-", len); 319 } else if (literal) { 320 (void) snprintf(buf, len, "%llu", 321 (u_longlong_t)intval); 322 } else { 323 (void) zfs_nicenum(intval, buf, len); 324 } 325 break; 326 case ZPOOL_PROP_CAPACITY: 327 if (literal) { 328 (void) snprintf(buf, len, "%llu", 329 (u_longlong_t)intval); 330 } else { 331 (void) snprintf(buf, len, "%llu%%", 332 (u_longlong_t)intval); 333 } 334 break; 335 case ZPOOL_PROP_FRAGMENTATION: 336 if (intval == UINT64_MAX) { 337 (void) strlcpy(buf, "-", len); 338 } else { 339 (void) snprintf(buf, len, "%llu%%", 340 (u_longlong_t)intval); 341 } 342 break; 343 case ZPOOL_PROP_DEDUPRATIO: 344 (void) snprintf(buf, len, "%llu.%02llux", 345 (u_longlong_t)(intval / 100), 346 (u_longlong_t)(intval % 100)); 347 break; 348 case ZPOOL_PROP_HEALTH: 349 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 350 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 351 verify(nvlist_lookup_uint64_array(nvroot, 352 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 353 == 0); 354 355 (void) strlcpy(buf, zpool_state_to_name(intval, 356 vs->vs_aux), len); 357 break; 358 case ZPOOL_PROP_VERSION: 359 if (intval >= SPA_VERSION_FEATURES) { 360 (void) snprintf(buf, len, "-"); 361 break; 362 } 363 /* FALLTHROUGH */ 364 default: 365 (void) snprintf(buf, len, "%llu", intval); 366 } 367 break; 368 369 case PROP_TYPE_INDEX: 370 intval = zpool_get_prop_int(zhp, prop, &src); 371 if (zpool_prop_index_to_string(prop, intval, &strval) 372 != 0) 373 return (-1); 374 (void) strlcpy(buf, strval, len); 375 break; 376 377 default: 378 abort(); 379 } 380 381 if (srctype) 382 *srctype = src; 383 384 return (0); 385 } 386 387 /* 388 * Check if the bootfs name has the same pool name as it is set to. 389 * Assuming bootfs is a valid dataset name. 390 */ 391 static boolean_t 392 bootfs_name_valid(const char *pool, char *bootfs) 393 { 394 int len = strlen(pool); 395 396 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 397 return (B_FALSE); 398 399 if (strncmp(pool, bootfs, len) == 0 && 400 (bootfs[len] == '/' || bootfs[len] == '\0')) 401 return (B_TRUE); 402 403 return (B_FALSE); 404 } 405 406 boolean_t 407 zpool_is_bootable(zpool_handle_t *zhp) 408 { 409 char bootfs[ZFS_MAX_DATASET_NAME_LEN]; 410 411 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 412 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 413 sizeof (bootfs)) != 0); 414 } 415 416 417 /* 418 * Given an nvlist of zpool properties to be set, validate that they are 419 * correct, and parse any numeric properties (index, boolean, etc) if they are 420 * specified as strings. 421 */ 422 static nvlist_t * 423 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 424 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 425 { 426 nvpair_t *elem; 427 nvlist_t *retprops; 428 zpool_prop_t prop; 429 char *strval; 430 uint64_t intval; 431 char *slash, *check; 432 struct stat64 statbuf; 433 zpool_handle_t *zhp; 434 435 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 436 (void) no_memory(hdl); 437 return (NULL); 438 } 439 440 elem = NULL; 441 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 442 const char *propname = nvpair_name(elem); 443 444 prop = zpool_name_to_prop(propname); 445 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 446 int err; 447 char *fname = strchr(propname, '@') + 1; 448 449 err = zfeature_lookup_name(fname, NULL); 450 if (err != 0) { 451 ASSERT3U(err, ==, ENOENT); 452 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 453 "invalid feature '%s'"), fname); 454 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 455 goto error; 456 } 457 458 if (nvpair_type(elem) != DATA_TYPE_STRING) { 459 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 460 "'%s' must be a string"), propname); 461 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 462 goto error; 463 } 464 465 (void) nvpair_value_string(elem, &strval); 466 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 468 "property '%s' can only be set to " 469 "'enabled'"), propname); 470 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 471 goto error; 472 } 473 474 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 475 (void) no_memory(hdl); 476 goto error; 477 } 478 continue; 479 } 480 481 /* 482 * Make sure this property is valid and applies to this type. 483 */ 484 if (prop == ZPROP_INVAL) { 485 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 486 "invalid property '%s'"), propname); 487 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 488 goto error; 489 } 490 491 if (zpool_prop_readonly(prop)) { 492 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 493 "is readonly"), propname); 494 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 495 goto error; 496 } 497 498 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 499 &strval, &intval, errbuf) != 0) 500 goto error; 501 502 /* 503 * Perform additional checking for specific properties. 504 */ 505 switch (prop) { 506 case ZPOOL_PROP_VERSION: 507 if (intval < version || 508 !SPA_VERSION_IS_SUPPORTED(intval)) { 509 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 510 "property '%s' number %d is invalid."), 511 propname, intval); 512 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 513 goto error; 514 } 515 break; 516 517 case ZPOOL_PROP_BOOTFS: 518 if (flags.create || flags.import) { 519 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 520 "property '%s' cannot be set at creation " 521 "or import time"), propname); 522 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 523 goto error; 524 } 525 526 if (version < SPA_VERSION_BOOTFS) { 527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 528 "pool must be upgraded to support " 529 "'%s' property"), propname); 530 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 531 goto error; 532 } 533 534 /* 535 * bootfs property value has to be a dataset name and 536 * the dataset has to be in the same pool as it sets to. 537 */ 538 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 539 strval)) { 540 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 541 "is an invalid name"), strval); 542 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 543 goto error; 544 } 545 546 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 548 "could not open pool '%s'"), poolname); 549 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 550 goto error; 551 } 552 zpool_close(zhp); 553 break; 554 555 case ZPOOL_PROP_ALTROOT: 556 if (!flags.create && !flags.import) { 557 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 558 "property '%s' can only be set during pool " 559 "creation or import"), propname); 560 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 561 goto error; 562 } 563 564 if (strval[0] != '/') { 565 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 566 "bad alternate root '%s'"), strval); 567 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 568 goto error; 569 } 570 break; 571 572 case ZPOOL_PROP_CACHEFILE: 573 if (strval[0] == '\0') 574 break; 575 576 if (strcmp(strval, "none") == 0) 577 break; 578 579 if (strval[0] != '/') { 580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 581 "property '%s' must be empty, an " 582 "absolute path, or 'none'"), propname); 583 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 584 goto error; 585 } 586 587 slash = strrchr(strval, '/'); 588 589 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 590 strcmp(slash, "/..") == 0) { 591 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 592 "'%s' is not a valid file"), strval); 593 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 594 goto error; 595 } 596 597 *slash = '\0'; 598 599 if (strval[0] != '\0' && 600 (stat64(strval, &statbuf) != 0 || 601 !S_ISDIR(statbuf.st_mode))) { 602 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 603 "'%s' is not a valid directory"), 604 strval); 605 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 606 goto error; 607 } 608 609 *slash = '/'; 610 break; 611 612 case ZPOOL_PROP_COMMENT: 613 for (check = strval; *check != '\0'; check++) { 614 if (!isprint(*check)) { 615 zfs_error_aux(hdl, 616 dgettext(TEXT_DOMAIN, 617 "comment may only have printable " 618 "characters")); 619 (void) zfs_error(hdl, EZFS_BADPROP, 620 errbuf); 621 goto error; 622 } 623 } 624 if (strlen(strval) > ZPROP_MAX_COMMENT) { 625 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 626 "comment must not exceed %d characters"), 627 ZPROP_MAX_COMMENT); 628 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 629 goto error; 630 } 631 break; 632 case ZPOOL_PROP_READONLY: 633 if (!flags.import) { 634 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 635 "property '%s' can only be set at " 636 "import time"), propname); 637 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 638 goto error; 639 } 640 break; 641 642 default: 643 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 644 "property '%s'(%d) not defined"), propname, prop); 645 break; 646 } 647 } 648 649 return (retprops); 650 error: 651 nvlist_free(retprops); 652 return (NULL); 653 } 654 655 /* 656 * Set zpool property : propname=propval. 657 */ 658 int 659 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 660 { 661 zfs_cmd_t zc = { 0 }; 662 int ret = -1; 663 char errbuf[1024]; 664 nvlist_t *nvl = NULL; 665 nvlist_t *realprops; 666 uint64_t version; 667 prop_flags_t flags = { 0 }; 668 669 (void) snprintf(errbuf, sizeof (errbuf), 670 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 671 zhp->zpool_name); 672 673 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 674 return (no_memory(zhp->zpool_hdl)); 675 676 if (nvlist_add_string(nvl, propname, propval) != 0) { 677 nvlist_free(nvl); 678 return (no_memory(zhp->zpool_hdl)); 679 } 680 681 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 682 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 683 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 684 nvlist_free(nvl); 685 return (-1); 686 } 687 688 nvlist_free(nvl); 689 nvl = realprops; 690 691 /* 692 * Execute the corresponding ioctl() to set this property. 693 */ 694 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 695 696 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 697 nvlist_free(nvl); 698 return (-1); 699 } 700 701 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 702 703 zcmd_free_nvlists(&zc); 704 nvlist_free(nvl); 705 706 if (ret) 707 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 708 else 709 (void) zpool_props_refresh(zhp); 710 711 return (ret); 712 } 713 714 int 715 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 716 { 717 libzfs_handle_t *hdl = zhp->zpool_hdl; 718 zprop_list_t *entry; 719 char buf[ZFS_MAXPROPLEN]; 720 nvlist_t *features = NULL; 721 zprop_list_t **last; 722 boolean_t firstexpand = (NULL == *plp); 723 724 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 725 return (-1); 726 727 last = plp; 728 while (*last != NULL) 729 last = &(*last)->pl_next; 730 731 if ((*plp)->pl_all) 732 features = zpool_get_features(zhp); 733 734 if ((*plp)->pl_all && firstexpand) { 735 for (int i = 0; i < SPA_FEATURES; i++) { 736 zprop_list_t *entry = zfs_alloc(hdl, 737 sizeof (zprop_list_t)); 738 entry->pl_prop = ZPROP_INVAL; 739 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 740 spa_feature_table[i].fi_uname); 741 entry->pl_width = strlen(entry->pl_user_prop); 742 entry->pl_all = B_TRUE; 743 744 *last = entry; 745 last = &entry->pl_next; 746 } 747 } 748 749 /* add any unsupported features */ 750 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 751 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 752 char *propname; 753 boolean_t found; 754 zprop_list_t *entry; 755 756 if (zfeature_is_supported(nvpair_name(nvp))) 757 continue; 758 759 propname = zfs_asprintf(hdl, "unsupported@%s", 760 nvpair_name(nvp)); 761 762 /* 763 * Before adding the property to the list make sure that no 764 * other pool already added the same property. 765 */ 766 found = B_FALSE; 767 entry = *plp; 768 while (entry != NULL) { 769 if (entry->pl_user_prop != NULL && 770 strcmp(propname, entry->pl_user_prop) == 0) { 771 found = B_TRUE; 772 break; 773 } 774 entry = entry->pl_next; 775 } 776 if (found) { 777 free(propname); 778 continue; 779 } 780 781 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 782 entry->pl_prop = ZPROP_INVAL; 783 entry->pl_user_prop = propname; 784 entry->pl_width = strlen(entry->pl_user_prop); 785 entry->pl_all = B_TRUE; 786 787 *last = entry; 788 last = &entry->pl_next; 789 } 790 791 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 792 793 if (entry->pl_fixed) 794 continue; 795 796 if (entry->pl_prop != ZPROP_INVAL && 797 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 798 NULL, B_FALSE) == 0) { 799 if (strlen(buf) > entry->pl_width) 800 entry->pl_width = strlen(buf); 801 } 802 } 803 804 return (0); 805 } 806 807 /* 808 * Get the state for the given feature on the given ZFS pool. 809 */ 810 int 811 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 812 size_t len) 813 { 814 uint64_t refcount; 815 boolean_t found = B_FALSE; 816 nvlist_t *features = zpool_get_features(zhp); 817 boolean_t supported; 818 const char *feature = strchr(propname, '@') + 1; 819 820 supported = zpool_prop_feature(propname); 821 ASSERT(supported || zpool_prop_unsupported(propname)); 822 823 /* 824 * Convert from feature name to feature guid. This conversion is 825 * unecessary for unsupported@... properties because they already 826 * use guids. 827 */ 828 if (supported) { 829 int ret; 830 spa_feature_t fid; 831 832 ret = zfeature_lookup_name(feature, &fid); 833 if (ret != 0) { 834 (void) strlcpy(buf, "-", len); 835 return (ENOTSUP); 836 } 837 feature = spa_feature_table[fid].fi_guid; 838 } 839 840 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 841 found = B_TRUE; 842 843 if (supported) { 844 if (!found) { 845 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 846 } else { 847 if (refcount == 0) 848 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 849 else 850 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 851 } 852 } else { 853 if (found) { 854 if (refcount == 0) { 855 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 856 } else { 857 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 858 } 859 } else { 860 (void) strlcpy(buf, "-", len); 861 return (ENOTSUP); 862 } 863 } 864 865 return (0); 866 } 867 868 /* 869 * Don't start the slice at the default block of 34; many storage 870 * devices will use a stripe width of 128k, so start there instead. 871 */ 872 #define NEW_START_BLOCK 256 873 874 /* 875 * Validate the given pool name, optionally putting an extended error message in 876 * 'buf'. 877 */ 878 boolean_t 879 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 880 { 881 namecheck_err_t why; 882 char what; 883 int ret; 884 885 ret = pool_namecheck(pool, &why, &what); 886 887 /* 888 * The rules for reserved pool names were extended at a later point. 889 * But we need to support users with existing pools that may now be 890 * invalid. So we only check for this expanded set of names during a 891 * create (or import), and only in userland. 892 */ 893 if (ret == 0 && !isopen && 894 (strncmp(pool, "mirror", 6) == 0 || 895 strncmp(pool, "raidz", 5) == 0 || 896 strncmp(pool, "spare", 5) == 0 || 897 strcmp(pool, "log") == 0)) { 898 if (hdl != NULL) 899 zfs_error_aux(hdl, 900 dgettext(TEXT_DOMAIN, "name is reserved")); 901 return (B_FALSE); 902 } 903 904 905 if (ret != 0) { 906 if (hdl != NULL) { 907 switch (why) { 908 case NAME_ERR_TOOLONG: 909 zfs_error_aux(hdl, 910 dgettext(TEXT_DOMAIN, "name is too long")); 911 break; 912 913 case NAME_ERR_INVALCHAR: 914 zfs_error_aux(hdl, 915 dgettext(TEXT_DOMAIN, "invalid character " 916 "'%c' in pool name"), what); 917 break; 918 919 case NAME_ERR_NOLETTER: 920 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 921 "name must begin with a letter")); 922 break; 923 924 case NAME_ERR_RESERVED: 925 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 926 "name is reserved")); 927 break; 928 929 case NAME_ERR_DISKLIKE: 930 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 931 "pool name is reserved")); 932 break; 933 934 case NAME_ERR_LEADING_SLASH: 935 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 936 "leading slash in name")); 937 break; 938 939 case NAME_ERR_EMPTY_COMPONENT: 940 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 941 "empty component in name")); 942 break; 943 944 case NAME_ERR_TRAILING_SLASH: 945 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 946 "trailing slash in name")); 947 break; 948 949 case NAME_ERR_MULTIPLE_DELIMITERS: 950 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 951 "multiple '@' and/or '#' delimiters in " 952 "name")); 953 break; 954 955 default: 956 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 957 "(%d) not defined"), why); 958 break; 959 } 960 } 961 return (B_FALSE); 962 } 963 964 return (B_TRUE); 965 } 966 967 /* 968 * Open a handle to the given pool, even if the pool is currently in the FAULTED 969 * state. 970 */ 971 zpool_handle_t * 972 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 973 { 974 zpool_handle_t *zhp; 975 boolean_t missing; 976 977 /* 978 * Make sure the pool name is valid. 979 */ 980 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 981 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 982 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 983 pool); 984 return (NULL); 985 } 986 987 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 988 return (NULL); 989 990 zhp->zpool_hdl = hdl; 991 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 992 993 if (zpool_refresh_stats(zhp, &missing) != 0) { 994 zpool_close(zhp); 995 return (NULL); 996 } 997 998 if (missing) { 999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1000 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1001 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1002 zpool_close(zhp); 1003 return (NULL); 1004 } 1005 1006 return (zhp); 1007 } 1008 1009 /* 1010 * Like the above, but silent on error. Used when iterating over pools (because 1011 * the configuration cache may be out of date). 1012 */ 1013 int 1014 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1015 { 1016 zpool_handle_t *zhp; 1017 boolean_t missing; 1018 1019 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1020 return (-1); 1021 1022 zhp->zpool_hdl = hdl; 1023 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1024 1025 if (zpool_refresh_stats(zhp, &missing) != 0) { 1026 zpool_close(zhp); 1027 return (-1); 1028 } 1029 1030 if (missing) { 1031 zpool_close(zhp); 1032 *ret = NULL; 1033 return (0); 1034 } 1035 1036 *ret = zhp; 1037 return (0); 1038 } 1039 1040 /* 1041 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1042 * state. 1043 */ 1044 zpool_handle_t * 1045 zpool_open(libzfs_handle_t *hdl, const char *pool) 1046 { 1047 zpool_handle_t *zhp; 1048 1049 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1050 return (NULL); 1051 1052 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1053 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1054 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1055 zpool_close(zhp); 1056 return (NULL); 1057 } 1058 1059 return (zhp); 1060 } 1061 1062 /* 1063 * Close the handle. Simply frees the memory associated with the handle. 1064 */ 1065 void 1066 zpool_close(zpool_handle_t *zhp) 1067 { 1068 nvlist_free(zhp->zpool_config); 1069 nvlist_free(zhp->zpool_old_config); 1070 nvlist_free(zhp->zpool_props); 1071 free(zhp); 1072 } 1073 1074 /* 1075 * Return the name of the pool. 1076 */ 1077 const char * 1078 zpool_get_name(zpool_handle_t *zhp) 1079 { 1080 return (zhp->zpool_name); 1081 } 1082 1083 1084 /* 1085 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1086 */ 1087 int 1088 zpool_get_state(zpool_handle_t *zhp) 1089 { 1090 return (zhp->zpool_state); 1091 } 1092 1093 /* 1094 * Create the named pool, using the provided vdev list. It is assumed 1095 * that the consumer has already validated the contents of the nvlist, so we 1096 * don't have to worry about error semantics. 1097 */ 1098 int 1099 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1100 nvlist_t *props, nvlist_t *fsprops) 1101 { 1102 zfs_cmd_t zc = { 0 }; 1103 nvlist_t *zc_fsprops = NULL; 1104 nvlist_t *zc_props = NULL; 1105 char msg[1024]; 1106 int ret = -1; 1107 1108 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1109 "cannot create '%s'"), pool); 1110 1111 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1112 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1113 1114 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1115 return (-1); 1116 1117 if (props) { 1118 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1119 1120 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1121 SPA_VERSION_1, flags, msg)) == NULL) { 1122 goto create_failed; 1123 } 1124 } 1125 1126 if (fsprops) { 1127 uint64_t zoned; 1128 char *zonestr; 1129 1130 zoned = ((nvlist_lookup_string(fsprops, 1131 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1132 strcmp(zonestr, "on") == 0); 1133 1134 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1135 fsprops, zoned, NULL, NULL, msg)) == NULL) { 1136 goto create_failed; 1137 } 1138 if (!zc_props && 1139 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1140 goto create_failed; 1141 } 1142 if (nvlist_add_nvlist(zc_props, 1143 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1144 goto create_failed; 1145 } 1146 } 1147 1148 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1149 goto create_failed; 1150 1151 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1152 1153 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1154 1155 zcmd_free_nvlists(&zc); 1156 nvlist_free(zc_props); 1157 nvlist_free(zc_fsprops); 1158 1159 switch (errno) { 1160 case EBUSY: 1161 /* 1162 * This can happen if the user has specified the same 1163 * device multiple times. We can't reliably detect this 1164 * until we try to add it and see we already have a 1165 * label. 1166 */ 1167 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1168 "one or more vdevs refer to the same device")); 1169 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1170 1171 case ERANGE: 1172 /* 1173 * This happens if the record size is smaller or larger 1174 * than the allowed size range, or not a power of 2. 1175 * 1176 * NOTE: although zfs_valid_proplist is called earlier, 1177 * this case may have slipped through since the 1178 * pool does not exist yet and it is therefore 1179 * impossible to read properties e.g. max blocksize 1180 * from the pool. 1181 */ 1182 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1183 "record size invalid")); 1184 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1185 1186 case EOVERFLOW: 1187 /* 1188 * This occurs when one of the devices is below 1189 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1190 * device was the problem device since there's no 1191 * reliable way to determine device size from userland. 1192 */ 1193 { 1194 char buf[64]; 1195 1196 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1197 1198 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1199 "one or more devices is less than the " 1200 "minimum size (%s)"), buf); 1201 } 1202 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1203 1204 case ENOSPC: 1205 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1206 "one or more devices is out of space")); 1207 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1208 1209 case ENOTBLK: 1210 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1211 "cache device must be a disk or disk slice")); 1212 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1213 1214 default: 1215 return (zpool_standard_error(hdl, errno, msg)); 1216 } 1217 } 1218 1219 create_failed: 1220 zcmd_free_nvlists(&zc); 1221 nvlist_free(zc_props); 1222 nvlist_free(zc_fsprops); 1223 return (ret); 1224 } 1225 1226 /* 1227 * Destroy the given pool. It is up to the caller to ensure that there are no 1228 * datasets left in the pool. 1229 */ 1230 int 1231 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1232 { 1233 zfs_cmd_t zc = { 0 }; 1234 zfs_handle_t *zfp = NULL; 1235 libzfs_handle_t *hdl = zhp->zpool_hdl; 1236 char msg[1024]; 1237 1238 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1239 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1240 return (-1); 1241 1242 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1243 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1244 1245 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1246 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1247 "cannot destroy '%s'"), zhp->zpool_name); 1248 1249 if (errno == EROFS) { 1250 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1251 "one or more devices is read only")); 1252 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1253 } else { 1254 (void) zpool_standard_error(hdl, errno, msg); 1255 } 1256 1257 if (zfp) 1258 zfs_close(zfp); 1259 return (-1); 1260 } 1261 1262 if (zfp) { 1263 remove_mountpoint(zfp); 1264 zfs_close(zfp); 1265 } 1266 1267 return (0); 1268 } 1269 1270 /* 1271 * Add the given vdevs to the pool. The caller must have already performed the 1272 * necessary verification to ensure that the vdev specification is well-formed. 1273 */ 1274 int 1275 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1276 { 1277 zfs_cmd_t zc = { 0 }; 1278 int ret; 1279 libzfs_handle_t *hdl = zhp->zpool_hdl; 1280 char msg[1024]; 1281 nvlist_t **spares, **l2cache; 1282 uint_t nspares, nl2cache; 1283 1284 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1285 "cannot add to '%s'"), zhp->zpool_name); 1286 1287 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1288 SPA_VERSION_SPARES && 1289 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1290 &spares, &nspares) == 0) { 1291 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1292 "upgraded to add hot spares")); 1293 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1294 } 1295 1296 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1297 SPA_VERSION_L2CACHE && 1298 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1299 &l2cache, &nl2cache) == 0) { 1300 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1301 "upgraded to add cache devices")); 1302 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1303 } 1304 1305 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1306 return (-1); 1307 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1308 1309 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1310 switch (errno) { 1311 case EBUSY: 1312 /* 1313 * This can happen if the user has specified the same 1314 * device multiple times. We can't reliably detect this 1315 * until we try to add it and see we already have a 1316 * label. 1317 */ 1318 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1319 "one or more vdevs refer to the same device")); 1320 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1321 break; 1322 1323 case EOVERFLOW: 1324 /* 1325 * This occurrs when one of the devices is below 1326 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1327 * device was the problem device since there's no 1328 * reliable way to determine device size from userland. 1329 */ 1330 { 1331 char buf[64]; 1332 1333 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1334 1335 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1336 "device is less than the minimum " 1337 "size (%s)"), buf); 1338 } 1339 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1340 break; 1341 1342 case ENOTSUP: 1343 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1344 "pool must be upgraded to add these vdevs")); 1345 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1346 break; 1347 1348 case EDOM: 1349 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1350 "root pool can not have multiple vdevs" 1351 " or separate logs")); 1352 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1353 break; 1354 1355 case ENOTBLK: 1356 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1357 "cache device must be a disk or disk slice")); 1358 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1359 break; 1360 1361 default: 1362 (void) zpool_standard_error(hdl, errno, msg); 1363 } 1364 1365 ret = -1; 1366 } else { 1367 ret = 0; 1368 } 1369 1370 zcmd_free_nvlists(&zc); 1371 1372 return (ret); 1373 } 1374 1375 /* 1376 * Exports the pool from the system. The caller must ensure that there are no 1377 * mounted datasets in the pool. 1378 */ 1379 static int 1380 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1381 const char *log_str) 1382 { 1383 zfs_cmd_t zc = { 0 }; 1384 char msg[1024]; 1385 1386 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1387 "cannot export '%s'"), zhp->zpool_name); 1388 1389 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1390 zc.zc_cookie = force; 1391 zc.zc_guid = hardforce; 1392 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1393 1394 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1395 switch (errno) { 1396 case EXDEV: 1397 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1398 "use '-f' to override the following errors:\n" 1399 "'%s' has an active shared spare which could be" 1400 " used by other pools once '%s' is exported."), 1401 zhp->zpool_name, zhp->zpool_name); 1402 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1403 msg)); 1404 default: 1405 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1406 msg)); 1407 } 1408 } 1409 1410 return (0); 1411 } 1412 1413 int 1414 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1415 { 1416 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1417 } 1418 1419 int 1420 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1421 { 1422 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1423 } 1424 1425 static void 1426 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1427 nvlist_t *config) 1428 { 1429 nvlist_t *nv = NULL; 1430 uint64_t rewindto; 1431 int64_t loss = -1; 1432 struct tm t; 1433 char timestr[128]; 1434 1435 if (!hdl->libzfs_printerr || config == NULL) 1436 return; 1437 1438 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1439 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1440 return; 1441 } 1442 1443 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1444 return; 1445 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1446 1447 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1448 strftime(timestr, 128, 0, &t) != 0) { 1449 if (dryrun) { 1450 (void) printf(dgettext(TEXT_DOMAIN, 1451 "Would be able to return %s " 1452 "to its state as of %s.\n"), 1453 name, timestr); 1454 } else { 1455 (void) printf(dgettext(TEXT_DOMAIN, 1456 "Pool %s returned to its state as of %s.\n"), 1457 name, timestr); 1458 } 1459 if (loss > 120) { 1460 (void) printf(dgettext(TEXT_DOMAIN, 1461 "%s approximately %lld "), 1462 dryrun ? "Would discard" : "Discarded", 1463 (loss + 30) / 60); 1464 (void) printf(dgettext(TEXT_DOMAIN, 1465 "minutes of transactions.\n")); 1466 } else if (loss > 0) { 1467 (void) printf(dgettext(TEXT_DOMAIN, 1468 "%s approximately %lld "), 1469 dryrun ? "Would discard" : "Discarded", loss); 1470 (void) printf(dgettext(TEXT_DOMAIN, 1471 "seconds of transactions.\n")); 1472 } 1473 } 1474 } 1475 1476 void 1477 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1478 nvlist_t *config) 1479 { 1480 nvlist_t *nv = NULL; 1481 int64_t loss = -1; 1482 uint64_t edata = UINT64_MAX; 1483 uint64_t rewindto; 1484 struct tm t; 1485 char timestr[128]; 1486 1487 if (!hdl->libzfs_printerr) 1488 return; 1489 1490 if (reason >= 0) 1491 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1492 else 1493 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1494 1495 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1496 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1497 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1498 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1499 goto no_info; 1500 1501 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1502 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1503 &edata); 1504 1505 (void) printf(dgettext(TEXT_DOMAIN, 1506 "Recovery is possible, but will result in some data loss.\n")); 1507 1508 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1509 strftime(timestr, 128, 0, &t) != 0) { 1510 (void) printf(dgettext(TEXT_DOMAIN, 1511 "\tReturning the pool to its state as of %s\n" 1512 "\tshould correct the problem. "), 1513 timestr); 1514 } else { 1515 (void) printf(dgettext(TEXT_DOMAIN, 1516 "\tReverting the pool to an earlier state " 1517 "should correct the problem.\n\t")); 1518 } 1519 1520 if (loss > 120) { 1521 (void) printf(dgettext(TEXT_DOMAIN, 1522 "Approximately %lld minutes of data\n" 1523 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1524 } else if (loss > 0) { 1525 (void) printf(dgettext(TEXT_DOMAIN, 1526 "Approximately %lld seconds of data\n" 1527 "\tmust be discarded, irreversibly. "), loss); 1528 } 1529 if (edata != 0 && edata != UINT64_MAX) { 1530 if (edata == 1) { 1531 (void) printf(dgettext(TEXT_DOMAIN, 1532 "After rewind, at least\n" 1533 "\tone persistent user-data error will remain. ")); 1534 } else { 1535 (void) printf(dgettext(TEXT_DOMAIN, 1536 "After rewind, several\n" 1537 "\tpersistent user-data errors will remain. ")); 1538 } 1539 } 1540 (void) printf(dgettext(TEXT_DOMAIN, 1541 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1542 reason >= 0 ? "clear" : "import", name); 1543 1544 (void) printf(dgettext(TEXT_DOMAIN, 1545 "A scrub of the pool\n" 1546 "\tis strongly recommended after recovery.\n")); 1547 return; 1548 1549 no_info: 1550 (void) printf(dgettext(TEXT_DOMAIN, 1551 "Destroy and re-create the pool from\n\ta backup source.\n")); 1552 } 1553 1554 /* 1555 * zpool_import() is a contracted interface. Should be kept the same 1556 * if possible. 1557 * 1558 * Applications should use zpool_import_props() to import a pool with 1559 * new properties value to be set. 1560 */ 1561 int 1562 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1563 char *altroot) 1564 { 1565 nvlist_t *props = NULL; 1566 int ret; 1567 1568 if (altroot != NULL) { 1569 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1570 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1571 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1572 newname)); 1573 } 1574 1575 if (nvlist_add_string(props, 1576 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1577 nvlist_add_string(props, 1578 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1579 nvlist_free(props); 1580 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1581 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1582 newname)); 1583 } 1584 } 1585 1586 ret = zpool_import_props(hdl, config, newname, props, 1587 ZFS_IMPORT_NORMAL); 1588 nvlist_free(props); 1589 return (ret); 1590 } 1591 1592 static void 1593 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1594 int indent) 1595 { 1596 nvlist_t **child; 1597 uint_t c, children; 1598 char *vname; 1599 uint64_t is_log = 0; 1600 1601 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1602 &is_log); 1603 1604 if (name != NULL) 1605 (void) printf("\t%*s%s%s\n", indent, "", name, 1606 is_log ? " [log]" : ""); 1607 1608 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1609 &child, &children) != 0) 1610 return; 1611 1612 for (c = 0; c < children; c++) { 1613 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1614 print_vdev_tree(hdl, vname, child[c], indent + 2); 1615 free(vname); 1616 } 1617 } 1618 1619 void 1620 zpool_print_unsup_feat(nvlist_t *config) 1621 { 1622 nvlist_t *nvinfo, *unsup_feat; 1623 1624 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1625 0); 1626 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1627 &unsup_feat) == 0); 1628 1629 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1630 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1631 char *desc; 1632 1633 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1634 verify(nvpair_value_string(nvp, &desc) == 0); 1635 1636 if (strlen(desc) > 0) 1637 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1638 else 1639 (void) printf("\t%s\n", nvpair_name(nvp)); 1640 } 1641 } 1642 1643 /* 1644 * Import the given pool using the known configuration and a list of 1645 * properties to be set. The configuration should have come from 1646 * zpool_find_import(). The 'newname' parameters control whether the pool 1647 * is imported with a different name. 1648 */ 1649 int 1650 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1651 nvlist_t *props, int flags) 1652 { 1653 zfs_cmd_t zc = { 0 }; 1654 zpool_rewind_policy_t policy; 1655 nvlist_t *nv = NULL; 1656 nvlist_t *nvinfo = NULL; 1657 nvlist_t *missing = NULL; 1658 char *thename; 1659 char *origname; 1660 int ret; 1661 int error = 0; 1662 char errbuf[1024]; 1663 1664 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1665 &origname) == 0); 1666 1667 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1668 "cannot import pool '%s'"), origname); 1669 1670 if (newname != NULL) { 1671 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1672 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1673 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1674 newname)); 1675 thename = (char *)newname; 1676 } else { 1677 thename = origname; 1678 } 1679 1680 if (props != NULL) { 1681 uint64_t version; 1682 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1683 1684 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1685 &version) == 0); 1686 1687 if ((props = zpool_valid_proplist(hdl, origname, 1688 props, version, flags, errbuf)) == NULL) 1689 return (-1); 1690 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1691 nvlist_free(props); 1692 return (-1); 1693 } 1694 nvlist_free(props); 1695 } 1696 1697 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1698 1699 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1700 &zc.zc_guid) == 0); 1701 1702 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1703 zcmd_free_nvlists(&zc); 1704 return (-1); 1705 } 1706 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1707 zcmd_free_nvlists(&zc); 1708 return (-1); 1709 } 1710 1711 zc.zc_cookie = flags; 1712 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1713 errno == ENOMEM) { 1714 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1715 zcmd_free_nvlists(&zc); 1716 return (-1); 1717 } 1718 } 1719 if (ret != 0) 1720 error = errno; 1721 1722 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1723 1724 zcmd_free_nvlists(&zc); 1725 1726 zpool_get_rewind_policy(config, &policy); 1727 1728 if (error) { 1729 char desc[1024]; 1730 1731 /* 1732 * Dry-run failed, but we print out what success 1733 * looks like if we found a best txg 1734 */ 1735 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1736 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1737 B_TRUE, nv); 1738 nvlist_free(nv); 1739 return (-1); 1740 } 1741 1742 if (newname == NULL) 1743 (void) snprintf(desc, sizeof (desc), 1744 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1745 thename); 1746 else 1747 (void) snprintf(desc, sizeof (desc), 1748 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1749 origname, thename); 1750 1751 switch (error) { 1752 case ENOTSUP: 1753 if (nv != NULL && nvlist_lookup_nvlist(nv, 1754 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1755 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1756 (void) printf(dgettext(TEXT_DOMAIN, "This " 1757 "pool uses the following feature(s) not " 1758 "supported by this system:\n")); 1759 zpool_print_unsup_feat(nv); 1760 if (nvlist_exists(nvinfo, 1761 ZPOOL_CONFIG_CAN_RDONLY)) { 1762 (void) printf(dgettext(TEXT_DOMAIN, 1763 "All unsupported features are only " 1764 "required for writing to the pool." 1765 "\nThe pool can be imported using " 1766 "'-o readonly=on'.\n")); 1767 } 1768 } 1769 /* 1770 * Unsupported version. 1771 */ 1772 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1773 break; 1774 1775 case EINVAL: 1776 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1777 break; 1778 1779 case EROFS: 1780 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1781 "one or more devices is read only")); 1782 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1783 break; 1784 1785 case ENXIO: 1786 if (nv && nvlist_lookup_nvlist(nv, 1787 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1788 nvlist_lookup_nvlist(nvinfo, 1789 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1790 (void) printf(dgettext(TEXT_DOMAIN, 1791 "The devices below are missing, use " 1792 "'-m' to import the pool anyway:\n")); 1793 print_vdev_tree(hdl, NULL, missing, 2); 1794 (void) printf("\n"); 1795 } 1796 (void) zpool_standard_error(hdl, error, desc); 1797 break; 1798 1799 case EEXIST: 1800 (void) zpool_standard_error(hdl, error, desc); 1801 break; 1802 case ENAMETOOLONG: 1803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1804 "new name of at least one dataset is longer than " 1805 "the maximum allowable length")); 1806 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 1807 break; 1808 default: 1809 (void) zpool_standard_error(hdl, error, desc); 1810 zpool_explain_recover(hdl, 1811 newname ? origname : thename, -error, nv); 1812 break; 1813 } 1814 1815 nvlist_free(nv); 1816 ret = -1; 1817 } else { 1818 zpool_handle_t *zhp; 1819 1820 /* 1821 * This should never fail, but play it safe anyway. 1822 */ 1823 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1824 ret = -1; 1825 else if (zhp != NULL) 1826 zpool_close(zhp); 1827 if (policy.zrp_request & 1828 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1829 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1830 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1831 } 1832 nvlist_free(nv); 1833 return (0); 1834 } 1835 1836 return (ret); 1837 } 1838 1839 /* 1840 * Scan the pool. 1841 */ 1842 int 1843 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1844 { 1845 zfs_cmd_t zc = { 0 }; 1846 char msg[1024]; 1847 libzfs_handle_t *hdl = zhp->zpool_hdl; 1848 1849 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1850 zc.zc_cookie = func; 1851 1852 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1853 (errno == ENOENT && func != POOL_SCAN_NONE)) 1854 return (0); 1855 1856 if (func == POOL_SCAN_SCRUB) { 1857 (void) snprintf(msg, sizeof (msg), 1858 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1859 } else if (func == POOL_SCAN_NONE) { 1860 (void) snprintf(msg, sizeof (msg), 1861 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1862 zc.zc_name); 1863 } else { 1864 assert(!"unexpected result"); 1865 } 1866 1867 if (errno == EBUSY) { 1868 nvlist_t *nvroot; 1869 pool_scan_stat_t *ps = NULL; 1870 uint_t psc; 1871 1872 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1873 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1874 (void) nvlist_lookup_uint64_array(nvroot, 1875 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1876 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1877 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1878 else 1879 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1880 } else if (errno == ENOENT) { 1881 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1882 } else { 1883 return (zpool_standard_error(hdl, errno, msg)); 1884 } 1885 } 1886 1887 /* 1888 * This provides a very minimal check whether a given string is likely a 1889 * c#t#d# style string. Users of this are expected to do their own 1890 * verification of the s# part. 1891 */ 1892 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1893 1894 /* 1895 * More elaborate version for ones which may start with "/dev/dsk/" 1896 * and the like. 1897 */ 1898 static int 1899 ctd_check_path(char *str) 1900 { 1901 /* 1902 * If it starts with a slash, check the last component. 1903 */ 1904 if (str && str[0] == '/') { 1905 char *tmp = strrchr(str, '/'); 1906 1907 /* 1908 * If it ends in "/old", check the second-to-last 1909 * component of the string instead. 1910 */ 1911 if (tmp != str && strcmp(tmp, "/old") == 0) { 1912 for (tmp--; *tmp != '/'; tmp--) 1913 ; 1914 } 1915 str = tmp + 1; 1916 } 1917 return (CTD_CHECK(str)); 1918 } 1919 1920 /* 1921 * Find a vdev that matches the search criteria specified. We use the 1922 * the nvpair name to determine how we should look for the device. 1923 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1924 * spare; but FALSE if its an INUSE spare. 1925 */ 1926 static nvlist_t * 1927 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1928 boolean_t *l2cache, boolean_t *log) 1929 { 1930 uint_t c, children; 1931 nvlist_t **child; 1932 nvlist_t *ret; 1933 uint64_t is_log; 1934 char *srchkey; 1935 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1936 1937 /* Nothing to look for */ 1938 if (search == NULL || pair == NULL) 1939 return (NULL); 1940 1941 /* Obtain the key we will use to search */ 1942 srchkey = nvpair_name(pair); 1943 1944 switch (nvpair_type(pair)) { 1945 case DATA_TYPE_UINT64: 1946 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1947 uint64_t srchval, theguid; 1948 1949 verify(nvpair_value_uint64(pair, &srchval) == 0); 1950 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1951 &theguid) == 0); 1952 if (theguid == srchval) 1953 return (nv); 1954 } 1955 break; 1956 1957 case DATA_TYPE_STRING: { 1958 char *srchval, *val; 1959 1960 verify(nvpair_value_string(pair, &srchval) == 0); 1961 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1962 break; 1963 1964 /* 1965 * Search for the requested value. Special cases: 1966 * 1967 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1968 * "s0" or "s0/old". The "s0" part is hidden from the user, 1969 * but included in the string, so this matches around it. 1970 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1971 * 1972 * Otherwise, all other searches are simple string compares. 1973 */ 1974 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1975 ctd_check_path(val)) { 1976 uint64_t wholedisk = 0; 1977 1978 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1979 &wholedisk); 1980 if (wholedisk) { 1981 int slen = strlen(srchval); 1982 int vlen = strlen(val); 1983 1984 if (slen != vlen - 2) 1985 break; 1986 1987 /* 1988 * make_leaf_vdev() should only set 1989 * wholedisk for ZPOOL_CONFIG_PATHs which 1990 * will include "/dev/dsk/", giving plenty of 1991 * room for the indices used next. 1992 */ 1993 ASSERT(vlen >= 6); 1994 1995 /* 1996 * strings identical except trailing "s0" 1997 */ 1998 if (strcmp(&val[vlen - 2], "s0") == 0 && 1999 strncmp(srchval, val, slen) == 0) 2000 return (nv); 2001 2002 /* 2003 * strings identical except trailing "s0/old" 2004 */ 2005 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 2006 strcmp(&srchval[slen - 4], "/old") == 0 && 2007 strncmp(srchval, val, slen - 4) == 0) 2008 return (nv); 2009 2010 break; 2011 } 2012 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2013 char *type, *idx, *end, *p; 2014 uint64_t id, vdev_id; 2015 2016 /* 2017 * Determine our vdev type, keeping in mind 2018 * that the srchval is composed of a type and 2019 * vdev id pair (i.e. mirror-4). 2020 */ 2021 if ((type = strdup(srchval)) == NULL) 2022 return (NULL); 2023 2024 if ((p = strrchr(type, '-')) == NULL) { 2025 free(type); 2026 break; 2027 } 2028 idx = p + 1; 2029 *p = '\0'; 2030 2031 /* 2032 * If the types don't match then keep looking. 2033 */ 2034 if (strncmp(val, type, strlen(val)) != 0) { 2035 free(type); 2036 break; 2037 } 2038 2039 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2040 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2041 strncmp(type, VDEV_TYPE_MIRROR, 2042 strlen(VDEV_TYPE_MIRROR)) == 0); 2043 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2044 &id) == 0); 2045 2046 errno = 0; 2047 vdev_id = strtoull(idx, &end, 10); 2048 2049 free(type); 2050 if (errno != 0) 2051 return (NULL); 2052 2053 /* 2054 * Now verify that we have the correct vdev id. 2055 */ 2056 if (vdev_id == id) 2057 return (nv); 2058 } 2059 2060 /* 2061 * Common case 2062 */ 2063 if (strcmp(srchval, val) == 0) 2064 return (nv); 2065 break; 2066 } 2067 2068 default: 2069 break; 2070 } 2071 2072 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2073 &child, &children) != 0) 2074 return (NULL); 2075 2076 for (c = 0; c < children; c++) { 2077 if ((ret = vdev_to_nvlist_iter(child[c], search, 2078 avail_spare, l2cache, NULL)) != NULL) { 2079 /* 2080 * The 'is_log' value is only set for the toplevel 2081 * vdev, not the leaf vdevs. So we always lookup the 2082 * log device from the root of the vdev tree (where 2083 * 'log' is non-NULL). 2084 */ 2085 if (log != NULL && 2086 nvlist_lookup_uint64(child[c], 2087 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2088 is_log) { 2089 *log = B_TRUE; 2090 } 2091 return (ret); 2092 } 2093 } 2094 2095 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2096 &child, &children) == 0) { 2097 for (c = 0; c < children; c++) { 2098 if ((ret = vdev_to_nvlist_iter(child[c], search, 2099 avail_spare, l2cache, NULL)) != NULL) { 2100 *avail_spare = B_TRUE; 2101 return (ret); 2102 } 2103 } 2104 } 2105 2106 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2107 &child, &children) == 0) { 2108 for (c = 0; c < children; c++) { 2109 if ((ret = vdev_to_nvlist_iter(child[c], search, 2110 avail_spare, l2cache, NULL)) != NULL) { 2111 *l2cache = B_TRUE; 2112 return (ret); 2113 } 2114 } 2115 } 2116 2117 return (NULL); 2118 } 2119 2120 /* 2121 * Given a physical path (minus the "/devices" prefix), find the 2122 * associated vdev. 2123 */ 2124 nvlist_t * 2125 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2126 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2127 { 2128 nvlist_t *search, *nvroot, *ret; 2129 2130 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2131 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2132 2133 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2134 &nvroot) == 0); 2135 2136 *avail_spare = B_FALSE; 2137 *l2cache = B_FALSE; 2138 if (log != NULL) 2139 *log = B_FALSE; 2140 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2141 nvlist_free(search); 2142 2143 return (ret); 2144 } 2145 2146 /* 2147 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2148 */ 2149 boolean_t 2150 zpool_vdev_is_interior(const char *name) 2151 { 2152 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2153 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2154 return (B_TRUE); 2155 return (B_FALSE); 2156 } 2157 2158 nvlist_t * 2159 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2160 boolean_t *l2cache, boolean_t *log) 2161 { 2162 char buf[MAXPATHLEN]; 2163 char *end; 2164 nvlist_t *nvroot, *search, *ret; 2165 uint64_t guid; 2166 2167 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2168 2169 guid = strtoull(path, &end, 10); 2170 if (guid != 0 && *end == '\0') { 2171 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2172 } else if (zpool_vdev_is_interior(path)) { 2173 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2174 } else if (path[0] != '/') { 2175 (void) snprintf(buf, sizeof (buf), "%s/%s", ZFS_DISK_ROOT, 2176 path); 2177 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2178 } else { 2179 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2180 } 2181 2182 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2183 &nvroot) == 0); 2184 2185 *avail_spare = B_FALSE; 2186 *l2cache = B_FALSE; 2187 if (log != NULL) 2188 *log = B_FALSE; 2189 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2190 nvlist_free(search); 2191 2192 return (ret); 2193 } 2194 2195 static int 2196 vdev_online(nvlist_t *nv) 2197 { 2198 uint64_t ival; 2199 2200 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2201 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2202 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2203 return (0); 2204 2205 return (1); 2206 } 2207 2208 /* 2209 * Helper function for zpool_get_physpaths(). 2210 */ 2211 static int 2212 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2213 size_t *bytes_written) 2214 { 2215 size_t bytes_left, pos, rsz; 2216 char *tmppath; 2217 const char *format; 2218 2219 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2220 &tmppath) != 0) 2221 return (EZFS_NODEVICE); 2222 2223 pos = *bytes_written; 2224 bytes_left = physpath_size - pos; 2225 format = (pos == 0) ? "%s" : " %s"; 2226 2227 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2228 *bytes_written += rsz; 2229 2230 if (rsz >= bytes_left) { 2231 /* if physpath was not copied properly, clear it */ 2232 if (bytes_left != 0) { 2233 physpath[pos] = 0; 2234 } 2235 return (EZFS_NOSPC); 2236 } 2237 return (0); 2238 } 2239 2240 static int 2241 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2242 size_t *rsz, boolean_t is_spare) 2243 { 2244 char *type; 2245 int ret; 2246 2247 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2248 return (EZFS_INVALCONFIG); 2249 2250 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2251 /* 2252 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2253 * For a spare vdev, we only want to boot from the active 2254 * spare device. 2255 */ 2256 if (is_spare) { 2257 uint64_t spare = 0; 2258 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2259 &spare); 2260 if (!spare) 2261 return (EZFS_INVALCONFIG); 2262 } 2263 2264 if (vdev_online(nv)) { 2265 if ((ret = vdev_get_one_physpath(nv, physpath, 2266 phypath_size, rsz)) != 0) 2267 return (ret); 2268 } 2269 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2270 strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2271 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2272 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2273 nvlist_t **child; 2274 uint_t count; 2275 int i, ret; 2276 2277 if (nvlist_lookup_nvlist_array(nv, 2278 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2279 return (EZFS_INVALCONFIG); 2280 2281 for (i = 0; i < count; i++) { 2282 ret = vdev_get_physpaths(child[i], physpath, 2283 phypath_size, rsz, is_spare); 2284 if (ret == EZFS_NOSPC) 2285 return (ret); 2286 } 2287 } 2288 2289 return (EZFS_POOL_INVALARG); 2290 } 2291 2292 /* 2293 * Get phys_path for a root pool config. 2294 * Return 0 on success; non-zero on failure. 2295 */ 2296 static int 2297 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2298 { 2299 size_t rsz; 2300 nvlist_t *vdev_root; 2301 nvlist_t **child; 2302 uint_t count; 2303 char *type; 2304 2305 rsz = 0; 2306 2307 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2308 &vdev_root) != 0) 2309 return (EZFS_INVALCONFIG); 2310 2311 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2312 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2313 &child, &count) != 0) 2314 return (EZFS_INVALCONFIG); 2315 2316 /* 2317 * root pool can only have a single top-level vdev. 2318 */ 2319 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2320 return (EZFS_POOL_INVALARG); 2321 2322 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2323 B_FALSE); 2324 2325 /* No online devices */ 2326 if (rsz == 0) 2327 return (EZFS_NODEVICE); 2328 2329 return (0); 2330 } 2331 2332 /* 2333 * Get phys_path for a root pool 2334 * Return 0 on success; non-zero on failure. 2335 */ 2336 int 2337 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2338 { 2339 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2340 phypath_size)); 2341 } 2342 2343 /* 2344 * If the device has being dynamically expanded then we need to relabel 2345 * the disk to use the new unallocated space. 2346 */ 2347 static int 2348 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2349 { 2350 char path[MAXPATHLEN]; 2351 char errbuf[1024]; 2352 int fd, error; 2353 int (*_efi_use_whole_disk)(int); 2354 2355 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2356 "efi_use_whole_disk")) == NULL) 2357 return (-1); 2358 2359 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name); 2360 2361 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2362 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2363 "relabel '%s': unable to open device"), name); 2364 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2365 } 2366 2367 /* 2368 * It's possible that we might encounter an error if the device 2369 * does not have any unallocated space left. If so, we simply 2370 * ignore that error and continue on. 2371 */ 2372 error = _efi_use_whole_disk(fd); 2373 (void) close(fd); 2374 if (error && error != VT_ENOSPC) { 2375 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2376 "relabel '%s': unable to read disk capacity"), name); 2377 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2378 } 2379 return (0); 2380 } 2381 2382 /* 2383 * Bring the specified vdev online. The 'flags' parameter is a set of the 2384 * ZFS_ONLINE_* flags. 2385 */ 2386 int 2387 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2388 vdev_state_t *newstate) 2389 { 2390 zfs_cmd_t zc = { 0 }; 2391 char msg[1024]; 2392 nvlist_t *tgt; 2393 boolean_t avail_spare, l2cache, islog; 2394 libzfs_handle_t *hdl = zhp->zpool_hdl; 2395 2396 if (flags & ZFS_ONLINE_EXPAND) { 2397 (void) snprintf(msg, sizeof (msg), 2398 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2399 } else { 2400 (void) snprintf(msg, sizeof (msg), 2401 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2402 } 2403 2404 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2405 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2406 &islog)) == NULL) 2407 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2408 2409 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2410 2411 if (avail_spare) 2412 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2413 2414 if (flags & ZFS_ONLINE_EXPAND || 2415 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2416 char *pathname = NULL; 2417 uint64_t wholedisk = 0; 2418 2419 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2420 &wholedisk); 2421 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2422 &pathname) == 0); 2423 2424 /* 2425 * XXX - L2ARC 1.0 devices can't support expansion. 2426 */ 2427 if (l2cache) { 2428 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2429 "cannot expand cache devices")); 2430 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2431 } 2432 2433 if (wholedisk) { 2434 pathname += strlen(ZFS_DISK_ROOT) + 1; 2435 (void) zpool_relabel_disk(hdl, pathname); 2436 } 2437 } 2438 2439 zc.zc_cookie = VDEV_STATE_ONLINE; 2440 zc.zc_obj = flags; 2441 2442 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2443 if (errno == EINVAL) { 2444 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2445 "from this pool into a new one. Use '%s' " 2446 "instead"), "zpool detach"); 2447 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2448 } 2449 return (zpool_standard_error(hdl, errno, msg)); 2450 } 2451 2452 *newstate = zc.zc_cookie; 2453 return (0); 2454 } 2455 2456 /* 2457 * Take the specified vdev offline 2458 */ 2459 int 2460 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2461 { 2462 zfs_cmd_t zc = { 0 }; 2463 char msg[1024]; 2464 nvlist_t *tgt; 2465 boolean_t avail_spare, l2cache; 2466 libzfs_handle_t *hdl = zhp->zpool_hdl; 2467 2468 (void) snprintf(msg, sizeof (msg), 2469 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2470 2471 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2472 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2473 NULL)) == NULL) 2474 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2475 2476 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2477 2478 if (avail_spare) 2479 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2480 2481 zc.zc_cookie = VDEV_STATE_OFFLINE; 2482 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2483 2484 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2485 return (0); 2486 2487 switch (errno) { 2488 case EBUSY: 2489 2490 /* 2491 * There are no other replicas of this device. 2492 */ 2493 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2494 2495 case EEXIST: 2496 /* 2497 * The log device has unplayed logs 2498 */ 2499 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2500 2501 default: 2502 return (zpool_standard_error(hdl, errno, msg)); 2503 } 2504 } 2505 2506 /* 2507 * Mark the given vdev faulted. 2508 */ 2509 int 2510 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2511 { 2512 zfs_cmd_t zc = { 0 }; 2513 char msg[1024]; 2514 libzfs_handle_t *hdl = zhp->zpool_hdl; 2515 2516 (void) snprintf(msg, sizeof (msg), 2517 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2518 2519 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2520 zc.zc_guid = guid; 2521 zc.zc_cookie = VDEV_STATE_FAULTED; 2522 zc.zc_obj = aux; 2523 2524 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2525 return (0); 2526 2527 switch (errno) { 2528 case EBUSY: 2529 2530 /* 2531 * There are no other replicas of this device. 2532 */ 2533 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2534 2535 default: 2536 return (zpool_standard_error(hdl, errno, msg)); 2537 } 2538 2539 } 2540 2541 /* 2542 * Mark the given vdev degraded. 2543 */ 2544 int 2545 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2546 { 2547 zfs_cmd_t zc = { 0 }; 2548 char msg[1024]; 2549 libzfs_handle_t *hdl = zhp->zpool_hdl; 2550 2551 (void) snprintf(msg, sizeof (msg), 2552 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2553 2554 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2555 zc.zc_guid = guid; 2556 zc.zc_cookie = VDEV_STATE_DEGRADED; 2557 zc.zc_obj = aux; 2558 2559 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2560 return (0); 2561 2562 return (zpool_standard_error(hdl, errno, msg)); 2563 } 2564 2565 /* 2566 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2567 * a hot spare. 2568 */ 2569 static boolean_t 2570 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2571 { 2572 nvlist_t **child; 2573 uint_t c, children; 2574 char *type; 2575 2576 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2577 &children) == 0) { 2578 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2579 &type) == 0); 2580 2581 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2582 children == 2 && child[which] == tgt) 2583 return (B_TRUE); 2584 2585 for (c = 0; c < children; c++) 2586 if (is_replacing_spare(child[c], tgt, which)) 2587 return (B_TRUE); 2588 } 2589 2590 return (B_FALSE); 2591 } 2592 2593 /* 2594 * Attach new_disk (fully described by nvroot) to old_disk. 2595 * If 'replacing' is specified, the new disk will replace the old one. 2596 */ 2597 int 2598 zpool_vdev_attach(zpool_handle_t *zhp, 2599 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2600 { 2601 zfs_cmd_t zc = { 0 }; 2602 char msg[1024]; 2603 int ret; 2604 nvlist_t *tgt; 2605 boolean_t avail_spare, l2cache, islog; 2606 uint64_t val; 2607 char *newname; 2608 nvlist_t **child; 2609 uint_t children; 2610 nvlist_t *config_root; 2611 libzfs_handle_t *hdl = zhp->zpool_hdl; 2612 boolean_t rootpool = zpool_is_bootable(zhp); 2613 2614 if (replacing) 2615 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2616 "cannot replace %s with %s"), old_disk, new_disk); 2617 else 2618 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2619 "cannot attach %s to %s"), new_disk, old_disk); 2620 2621 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2622 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2623 &islog)) == 0) 2624 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2625 2626 if (avail_spare) 2627 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2628 2629 if (l2cache) 2630 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2631 2632 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2633 zc.zc_cookie = replacing; 2634 2635 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2636 &child, &children) != 0 || children != 1) { 2637 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2638 "new device must be a single disk")); 2639 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2640 } 2641 2642 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2643 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2644 2645 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2646 return (-1); 2647 2648 /* 2649 * If the target is a hot spare that has been swapped in, we can only 2650 * replace it with another hot spare. 2651 */ 2652 if (replacing && 2653 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2654 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2655 NULL) == NULL || !avail_spare) && 2656 is_replacing_spare(config_root, tgt, 1)) { 2657 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2658 "can only be replaced by another hot spare")); 2659 free(newname); 2660 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2661 } 2662 2663 free(newname); 2664 2665 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2666 return (-1); 2667 2668 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2669 2670 zcmd_free_nvlists(&zc); 2671 2672 if (ret == 0) { 2673 if (rootpool) { 2674 /* 2675 * XXX need a better way to prevent user from 2676 * booting up a half-baked vdev. 2677 */ 2678 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2679 "sure to wait until resilver is done " 2680 "before rebooting.\n")); 2681 } 2682 return (0); 2683 } 2684 2685 switch (errno) { 2686 case ENOTSUP: 2687 /* 2688 * Can't attach to or replace this type of vdev. 2689 */ 2690 if (replacing) { 2691 uint64_t version = zpool_get_prop_int(zhp, 2692 ZPOOL_PROP_VERSION, NULL); 2693 2694 if (islog) 2695 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2696 "cannot replace a log with a spare")); 2697 else if (version >= SPA_VERSION_MULTI_REPLACE) 2698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2699 "already in replacing/spare config; wait " 2700 "for completion or use 'zpool detach'")); 2701 else 2702 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2703 "cannot replace a replacing device")); 2704 } else { 2705 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2706 "can only attach to mirrors and top-level " 2707 "disks")); 2708 } 2709 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2710 break; 2711 2712 case EINVAL: 2713 /* 2714 * The new device must be a single disk. 2715 */ 2716 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2717 "new device must be a single disk")); 2718 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2719 break; 2720 2721 case EBUSY: 2722 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2723 new_disk); 2724 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2725 break; 2726 2727 case EOVERFLOW: 2728 /* 2729 * The new device is too small. 2730 */ 2731 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2732 "device is too small")); 2733 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2734 break; 2735 2736 case EDOM: 2737 /* 2738 * The new device has a different alignment requirement. 2739 */ 2740 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2741 "devices have different sector alignment")); 2742 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2743 break; 2744 2745 case ENAMETOOLONG: 2746 /* 2747 * The resulting top-level vdev spec won't fit in the label. 2748 */ 2749 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2750 break; 2751 2752 default: 2753 (void) zpool_standard_error(hdl, errno, msg); 2754 } 2755 2756 return (-1); 2757 } 2758 2759 /* 2760 * Detach the specified device. 2761 */ 2762 int 2763 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2764 { 2765 zfs_cmd_t zc = { 0 }; 2766 char msg[1024]; 2767 nvlist_t *tgt; 2768 boolean_t avail_spare, l2cache; 2769 libzfs_handle_t *hdl = zhp->zpool_hdl; 2770 2771 (void) snprintf(msg, sizeof (msg), 2772 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2773 2774 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2775 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2776 NULL)) == 0) 2777 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2778 2779 if (avail_spare) 2780 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2781 2782 if (l2cache) 2783 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2784 2785 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2786 2787 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2788 return (0); 2789 2790 switch (errno) { 2791 2792 case ENOTSUP: 2793 /* 2794 * Can't detach from this type of vdev. 2795 */ 2796 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2797 "applicable to mirror and replacing vdevs")); 2798 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2799 break; 2800 2801 case EBUSY: 2802 /* 2803 * There are no other replicas of this device. 2804 */ 2805 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2806 break; 2807 2808 default: 2809 (void) zpool_standard_error(hdl, errno, msg); 2810 } 2811 2812 return (-1); 2813 } 2814 2815 /* 2816 * Find a mirror vdev in the source nvlist. 2817 * 2818 * The mchild array contains a list of disks in one of the top-level mirrors 2819 * of the source pool. The schild array contains a list of disks that the 2820 * user specified on the command line. We loop over the mchild array to 2821 * see if any entry in the schild array matches. 2822 * 2823 * If a disk in the mchild array is found in the schild array, we return 2824 * the index of that entry. Otherwise we return -1. 2825 */ 2826 static int 2827 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2828 nvlist_t **schild, uint_t schildren) 2829 { 2830 uint_t mc; 2831 2832 for (mc = 0; mc < mchildren; mc++) { 2833 uint_t sc; 2834 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2835 mchild[mc], B_FALSE); 2836 2837 for (sc = 0; sc < schildren; sc++) { 2838 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2839 schild[sc], B_FALSE); 2840 boolean_t result = (strcmp(mpath, spath) == 0); 2841 2842 free(spath); 2843 if (result) { 2844 free(mpath); 2845 return (mc); 2846 } 2847 } 2848 2849 free(mpath); 2850 } 2851 2852 return (-1); 2853 } 2854 2855 /* 2856 * Split a mirror pool. If newroot points to null, then a new nvlist 2857 * is generated and it is the responsibility of the caller to free it. 2858 */ 2859 int 2860 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2861 nvlist_t *props, splitflags_t flags) 2862 { 2863 zfs_cmd_t zc = { 0 }; 2864 char msg[1024]; 2865 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2866 nvlist_t **varray = NULL, *zc_props = NULL; 2867 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2868 libzfs_handle_t *hdl = zhp->zpool_hdl; 2869 uint64_t vers; 2870 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2871 int retval = 0; 2872 2873 (void) snprintf(msg, sizeof (msg), 2874 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2875 2876 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2877 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2878 2879 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2880 (void) fprintf(stderr, gettext("Internal error: unable to " 2881 "retrieve pool configuration\n")); 2882 return (-1); 2883 } 2884 2885 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2886 == 0); 2887 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2888 2889 if (props) { 2890 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2891 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2892 props, vers, flags, msg)) == NULL) 2893 return (-1); 2894 } 2895 2896 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2897 &children) != 0) { 2898 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2899 "Source pool is missing vdev tree")); 2900 nvlist_free(zc_props); 2901 return (-1); 2902 } 2903 2904 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2905 vcount = 0; 2906 2907 if (*newroot == NULL || 2908 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2909 &newchild, &newchildren) != 0) 2910 newchildren = 0; 2911 2912 for (c = 0; c < children; c++) { 2913 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2914 char *type; 2915 nvlist_t **mchild, *vdev; 2916 uint_t mchildren; 2917 int entry; 2918 2919 /* 2920 * Unlike cache & spares, slogs are stored in the 2921 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2922 */ 2923 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2924 &is_log); 2925 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2926 &is_hole); 2927 if (is_log || is_hole) { 2928 /* 2929 * Create a hole vdev and put it in the config. 2930 */ 2931 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2932 goto out; 2933 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2934 VDEV_TYPE_HOLE) != 0) 2935 goto out; 2936 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2937 1) != 0) 2938 goto out; 2939 if (lastlog == 0) 2940 lastlog = vcount; 2941 varray[vcount++] = vdev; 2942 continue; 2943 } 2944 lastlog = 0; 2945 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2946 == 0); 2947 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2948 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2949 "Source pool must be composed only of mirrors\n")); 2950 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2951 goto out; 2952 } 2953 2954 verify(nvlist_lookup_nvlist_array(child[c], 2955 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2956 2957 /* find or add an entry for this top-level vdev */ 2958 if (newchildren > 0 && 2959 (entry = find_vdev_entry(zhp, mchild, mchildren, 2960 newchild, newchildren)) >= 0) { 2961 /* We found a disk that the user specified. */ 2962 vdev = mchild[entry]; 2963 ++found; 2964 } else { 2965 /* User didn't specify a disk for this vdev. */ 2966 vdev = mchild[mchildren - 1]; 2967 } 2968 2969 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2970 goto out; 2971 } 2972 2973 /* did we find every disk the user specified? */ 2974 if (found != newchildren) { 2975 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2976 "include at most one disk from each mirror")); 2977 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2978 goto out; 2979 } 2980 2981 /* Prepare the nvlist for populating. */ 2982 if (*newroot == NULL) { 2983 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2984 goto out; 2985 freelist = B_TRUE; 2986 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2987 VDEV_TYPE_ROOT) != 0) 2988 goto out; 2989 } else { 2990 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 2991 } 2992 2993 /* Add all the children we found */ 2994 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 2995 lastlog == 0 ? vcount : lastlog) != 0) 2996 goto out; 2997 2998 /* 2999 * If we're just doing a dry run, exit now with success. 3000 */ 3001 if (flags.dryrun) { 3002 memory_err = B_FALSE; 3003 freelist = B_FALSE; 3004 goto out; 3005 } 3006 3007 /* now build up the config list & call the ioctl */ 3008 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3009 goto out; 3010 3011 if (nvlist_add_nvlist(newconfig, 3012 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3013 nvlist_add_string(newconfig, 3014 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3015 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3016 goto out; 3017 3018 /* 3019 * The new pool is automatically part of the namespace unless we 3020 * explicitly export it. 3021 */ 3022 if (!flags.import) 3023 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3024 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3025 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3026 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3027 goto out; 3028 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3029 goto out; 3030 3031 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3032 retval = zpool_standard_error(hdl, errno, msg); 3033 goto out; 3034 } 3035 3036 freelist = B_FALSE; 3037 memory_err = B_FALSE; 3038 3039 out: 3040 if (varray != NULL) { 3041 int v; 3042 3043 for (v = 0; v < vcount; v++) 3044 nvlist_free(varray[v]); 3045 free(varray); 3046 } 3047 zcmd_free_nvlists(&zc); 3048 nvlist_free(zc_props); 3049 nvlist_free(newconfig); 3050 if (freelist) { 3051 nvlist_free(*newroot); 3052 *newroot = NULL; 3053 } 3054 3055 if (retval != 0) 3056 return (retval); 3057 3058 if (memory_err) 3059 return (no_memory(hdl)); 3060 3061 return (0); 3062 } 3063 3064 /* 3065 * Remove the given device. Currently, this is supported only for hot spares 3066 * and level 2 cache devices. 3067 */ 3068 int 3069 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3070 { 3071 zfs_cmd_t zc = { 0 }; 3072 char msg[1024]; 3073 nvlist_t *tgt; 3074 boolean_t avail_spare, l2cache, islog; 3075 libzfs_handle_t *hdl = zhp->zpool_hdl; 3076 uint64_t version; 3077 3078 (void) snprintf(msg, sizeof (msg), 3079 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3080 3081 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3082 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3083 &islog)) == 0) 3084 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3085 /* 3086 * XXX - this should just go away. 3087 */ 3088 if (!avail_spare && !l2cache && !islog) { 3089 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3090 "only inactive hot spares, cache, top-level, " 3091 "or log devices can be removed")); 3092 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3093 } 3094 3095 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3096 if (islog && version < SPA_VERSION_HOLES) { 3097 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3098 "pool must be upgrade to support log removal")); 3099 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3100 } 3101 3102 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3103 3104 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3105 return (0); 3106 3107 return (zpool_standard_error(hdl, errno, msg)); 3108 } 3109 3110 /* 3111 * Clear the errors for the pool, or the particular device if specified. 3112 */ 3113 int 3114 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3115 { 3116 zfs_cmd_t zc = { 0 }; 3117 char msg[1024]; 3118 nvlist_t *tgt; 3119 zpool_rewind_policy_t policy; 3120 boolean_t avail_spare, l2cache; 3121 libzfs_handle_t *hdl = zhp->zpool_hdl; 3122 nvlist_t *nvi = NULL; 3123 int error; 3124 3125 if (path) 3126 (void) snprintf(msg, sizeof (msg), 3127 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3128 path); 3129 else 3130 (void) snprintf(msg, sizeof (msg), 3131 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3132 zhp->zpool_name); 3133 3134 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3135 if (path) { 3136 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3137 &l2cache, NULL)) == 0) 3138 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3139 3140 /* 3141 * Don't allow error clearing for hot spares. Do allow 3142 * error clearing for l2cache devices. 3143 */ 3144 if (avail_spare) 3145 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3146 3147 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3148 &zc.zc_guid) == 0); 3149 } 3150 3151 zpool_get_rewind_policy(rewindnvl, &policy); 3152 zc.zc_cookie = policy.zrp_request; 3153 3154 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3155 return (-1); 3156 3157 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3158 return (-1); 3159 3160 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3161 errno == ENOMEM) { 3162 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3163 zcmd_free_nvlists(&zc); 3164 return (-1); 3165 } 3166 } 3167 3168 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3169 errno != EPERM && errno != EACCES)) { 3170 if (policy.zrp_request & 3171 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3172 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3173 zpool_rewind_exclaim(hdl, zc.zc_name, 3174 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3175 nvi); 3176 nvlist_free(nvi); 3177 } 3178 zcmd_free_nvlists(&zc); 3179 return (0); 3180 } 3181 3182 zcmd_free_nvlists(&zc); 3183 return (zpool_standard_error(hdl, errno, msg)); 3184 } 3185 3186 /* 3187 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3188 */ 3189 int 3190 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3191 { 3192 zfs_cmd_t zc = { 0 }; 3193 char msg[1024]; 3194 libzfs_handle_t *hdl = zhp->zpool_hdl; 3195 3196 (void) snprintf(msg, sizeof (msg), 3197 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3198 guid); 3199 3200 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3201 zc.zc_guid = guid; 3202 zc.zc_cookie = ZPOOL_NO_REWIND; 3203 3204 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3205 return (0); 3206 3207 return (zpool_standard_error(hdl, errno, msg)); 3208 } 3209 3210 /* 3211 * Change the GUID for a pool. 3212 */ 3213 int 3214 zpool_reguid(zpool_handle_t *zhp) 3215 { 3216 char msg[1024]; 3217 libzfs_handle_t *hdl = zhp->zpool_hdl; 3218 zfs_cmd_t zc = { 0 }; 3219 3220 (void) snprintf(msg, sizeof (msg), 3221 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3222 3223 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3224 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3225 return (0); 3226 3227 return (zpool_standard_error(hdl, errno, msg)); 3228 } 3229 3230 /* 3231 * Reopen the pool. 3232 */ 3233 int 3234 zpool_reopen(zpool_handle_t *zhp) 3235 { 3236 zfs_cmd_t zc = { 0 }; 3237 char msg[1024]; 3238 libzfs_handle_t *hdl = zhp->zpool_hdl; 3239 3240 (void) snprintf(msg, sizeof (msg), 3241 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3242 zhp->zpool_name); 3243 3244 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3245 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3246 return (0); 3247 return (zpool_standard_error(hdl, errno, msg)); 3248 } 3249 3250 /* 3251 * Convert from a devid string to a path. 3252 */ 3253 static char * 3254 devid_to_path(char *devid_str) 3255 { 3256 ddi_devid_t devid; 3257 char *minor; 3258 char *path; 3259 devid_nmlist_t *list = NULL; 3260 int ret; 3261 3262 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3263 return (NULL); 3264 3265 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3266 3267 devid_str_free(minor); 3268 devid_free(devid); 3269 3270 if (ret != 0) 3271 return (NULL); 3272 3273 /* 3274 * In a case the strdup() fails, we will just return NULL below. 3275 */ 3276 path = strdup(list[0].devname); 3277 3278 devid_free_nmlist(list); 3279 3280 return (path); 3281 } 3282 3283 /* 3284 * Convert from a path to a devid string. 3285 */ 3286 static char * 3287 path_to_devid(const char *path) 3288 { 3289 int fd; 3290 ddi_devid_t devid; 3291 char *minor, *ret; 3292 3293 if ((fd = open(path, O_RDONLY)) < 0) 3294 return (NULL); 3295 3296 minor = NULL; 3297 ret = NULL; 3298 if (devid_get(fd, &devid) == 0) { 3299 if (devid_get_minor_name(fd, &minor) == 0) 3300 ret = devid_str_encode(devid, minor); 3301 if (minor != NULL) 3302 devid_str_free(minor); 3303 devid_free(devid); 3304 } 3305 (void) close(fd); 3306 3307 return (ret); 3308 } 3309 3310 /* 3311 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3312 * ignore any failure here, since a common case is for an unprivileged user to 3313 * type 'zpool status', and we'll display the correct information anyway. 3314 */ 3315 static void 3316 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3317 { 3318 zfs_cmd_t zc = { 0 }; 3319 3320 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3321 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3322 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3323 &zc.zc_guid) == 0); 3324 3325 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3326 } 3327 3328 /* 3329 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3330 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3331 * We also check if this is a whole disk, in which case we strip off the 3332 * trailing 's0' slice name. 3333 * 3334 * This routine is also responsible for identifying when disks have been 3335 * reconfigured in a new location. The kernel will have opened the device by 3336 * devid, but the path will still refer to the old location. To catch this, we 3337 * first do a path -> devid translation (which is fast for the common case). If 3338 * the devid matches, we're done. If not, we do a reverse devid -> path 3339 * translation and issue the appropriate ioctl() to update the path of the vdev. 3340 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3341 * of these checks. 3342 */ 3343 char * 3344 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3345 boolean_t verbose) 3346 { 3347 char *path, *devid; 3348 uint64_t value; 3349 char buf[64]; 3350 vdev_stat_t *vs; 3351 uint_t vsc; 3352 3353 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3354 &value) == 0) { 3355 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3356 &value) == 0); 3357 (void) snprintf(buf, sizeof (buf), "%llu", 3358 (u_longlong_t)value); 3359 path = buf; 3360 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3361 3362 /* 3363 * If the device is dead (faulted, offline, etc) then don't 3364 * bother opening it. Otherwise we may be forcing the user to 3365 * open a misbehaving device, which can have undesirable 3366 * effects. 3367 */ 3368 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3369 (uint64_t **)&vs, &vsc) != 0 || 3370 vs->vs_state >= VDEV_STATE_DEGRADED) && 3371 zhp != NULL && 3372 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3373 /* 3374 * Determine if the current path is correct. 3375 */ 3376 char *newdevid = path_to_devid(path); 3377 3378 if (newdevid == NULL || 3379 strcmp(devid, newdevid) != 0) { 3380 char *newpath; 3381 3382 if ((newpath = devid_to_path(devid)) != NULL) { 3383 /* 3384 * Update the path appropriately. 3385 */ 3386 set_path(zhp, nv, newpath); 3387 if (nvlist_add_string(nv, 3388 ZPOOL_CONFIG_PATH, newpath) == 0) 3389 verify(nvlist_lookup_string(nv, 3390 ZPOOL_CONFIG_PATH, 3391 &path) == 0); 3392 free(newpath); 3393 } 3394 } 3395 3396 if (newdevid) 3397 devid_str_free(newdevid); 3398 } 3399 3400 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0) 3401 path += strlen(ZFS_DISK_ROOTD); 3402 3403 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3404 &value) == 0 && value) { 3405 int pathlen = strlen(path); 3406 char *tmp = zfs_strdup(hdl, path); 3407 3408 /* 3409 * If it starts with c#, and ends with "s0", chop 3410 * the "s0" off, or if it ends with "s0/old", remove 3411 * the "s0" from the middle. 3412 */ 3413 if (CTD_CHECK(tmp)) { 3414 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3415 tmp[pathlen - 2] = '\0'; 3416 } else if (pathlen > 6 && 3417 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3418 (void) strcpy(&tmp[pathlen - 6], 3419 "/old"); 3420 } 3421 } 3422 return (tmp); 3423 } 3424 } else { 3425 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3426 3427 /* 3428 * If it's a raidz device, we need to stick in the parity level. 3429 */ 3430 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3431 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3432 &value) == 0); 3433 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3434 (u_longlong_t)value); 3435 path = buf; 3436 } 3437 3438 /* 3439 * We identify each top-level vdev by using a <type-id> 3440 * naming convention. 3441 */ 3442 if (verbose) { 3443 uint64_t id; 3444 3445 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3446 &id) == 0); 3447 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3448 (u_longlong_t)id); 3449 path = buf; 3450 } 3451 } 3452 3453 return (zfs_strdup(hdl, path)); 3454 } 3455 3456 static int 3457 zbookmark_mem_compare(const void *a, const void *b) 3458 { 3459 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3460 } 3461 3462 /* 3463 * Retrieve the persistent error log, uniquify the members, and return to the 3464 * caller. 3465 */ 3466 int 3467 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3468 { 3469 zfs_cmd_t zc = { 0 }; 3470 uint64_t count; 3471 zbookmark_phys_t *zb = NULL; 3472 int i; 3473 3474 /* 3475 * Retrieve the raw error list from the kernel. If the number of errors 3476 * has increased, allocate more space and continue until we get the 3477 * entire list. 3478 */ 3479 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3480 &count) == 0); 3481 if (count == 0) 3482 return (0); 3483 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3484 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3485 return (-1); 3486 zc.zc_nvlist_dst_size = count; 3487 (void) strcpy(zc.zc_name, zhp->zpool_name); 3488 for (;;) { 3489 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3490 &zc) != 0) { 3491 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3492 if (errno == ENOMEM) { 3493 void *dst; 3494 3495 count = zc.zc_nvlist_dst_size; 3496 dst = zfs_alloc(zhp->zpool_hdl, count * 3497 sizeof (zbookmark_phys_t)); 3498 if (dst == NULL) 3499 return (-1); 3500 zc.zc_nvlist_dst = (uintptr_t)dst; 3501 } else { 3502 return (-1); 3503 } 3504 } else { 3505 break; 3506 } 3507 } 3508 3509 /* 3510 * Sort the resulting bookmarks. This is a little confusing due to the 3511 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3512 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3513 * _not_ copied as part of the process. So we point the start of our 3514 * array appropriate and decrement the total number of elements. 3515 */ 3516 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3517 zc.zc_nvlist_dst_size; 3518 count -= zc.zc_nvlist_dst_size; 3519 3520 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 3521 3522 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3523 3524 /* 3525 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3526 */ 3527 for (i = 0; i < count; i++) { 3528 nvlist_t *nv; 3529 3530 /* ignoring zb_blkid and zb_level for now */ 3531 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3532 zb[i-1].zb_object == zb[i].zb_object) 3533 continue; 3534 3535 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3536 goto nomem; 3537 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3538 zb[i].zb_objset) != 0) { 3539 nvlist_free(nv); 3540 goto nomem; 3541 } 3542 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3543 zb[i].zb_object) != 0) { 3544 nvlist_free(nv); 3545 goto nomem; 3546 } 3547 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3548 nvlist_free(nv); 3549 goto nomem; 3550 } 3551 nvlist_free(nv); 3552 } 3553 3554 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3555 return (0); 3556 3557 nomem: 3558 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3559 return (no_memory(zhp->zpool_hdl)); 3560 } 3561 3562 /* 3563 * Upgrade a ZFS pool to the latest on-disk version. 3564 */ 3565 int 3566 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3567 { 3568 zfs_cmd_t zc = { 0 }; 3569 libzfs_handle_t *hdl = zhp->zpool_hdl; 3570 3571 (void) strcpy(zc.zc_name, zhp->zpool_name); 3572 zc.zc_cookie = new_version; 3573 3574 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3575 return (zpool_standard_error_fmt(hdl, errno, 3576 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3577 zhp->zpool_name)); 3578 return (0); 3579 } 3580 3581 void 3582 zfs_save_arguments(int argc, char **argv, char *string, int len) 3583 { 3584 (void) strlcpy(string, basename(argv[0]), len); 3585 for (int i = 1; i < argc; i++) { 3586 (void) strlcat(string, " ", len); 3587 (void) strlcat(string, argv[i], len); 3588 } 3589 } 3590 3591 int 3592 zpool_log_history(libzfs_handle_t *hdl, const char *message) 3593 { 3594 zfs_cmd_t zc = { 0 }; 3595 nvlist_t *args; 3596 int err; 3597 3598 args = fnvlist_alloc(); 3599 fnvlist_add_string(args, "message", message); 3600 err = zcmd_write_src_nvlist(hdl, &zc, args); 3601 if (err == 0) 3602 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3603 nvlist_free(args); 3604 zcmd_free_nvlists(&zc); 3605 return (err); 3606 } 3607 3608 /* 3609 * Perform ioctl to get some command history of a pool. 3610 * 3611 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3612 * logical offset of the history buffer to start reading from. 3613 * 3614 * Upon return, 'off' is the next logical offset to read from and 3615 * 'len' is the actual amount of bytes read into 'buf'. 3616 */ 3617 static int 3618 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3619 { 3620 zfs_cmd_t zc = { 0 }; 3621 libzfs_handle_t *hdl = zhp->zpool_hdl; 3622 3623 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3624 3625 zc.zc_history = (uint64_t)(uintptr_t)buf; 3626 zc.zc_history_len = *len; 3627 zc.zc_history_offset = *off; 3628 3629 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3630 switch (errno) { 3631 case EPERM: 3632 return (zfs_error_fmt(hdl, EZFS_PERM, 3633 dgettext(TEXT_DOMAIN, 3634 "cannot show history for pool '%s'"), 3635 zhp->zpool_name)); 3636 case ENOENT: 3637 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3638 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3639 "'%s'"), zhp->zpool_name)); 3640 case ENOTSUP: 3641 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3642 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3643 "'%s', pool must be upgraded"), zhp->zpool_name)); 3644 default: 3645 return (zpool_standard_error_fmt(hdl, errno, 3646 dgettext(TEXT_DOMAIN, 3647 "cannot get history for '%s'"), zhp->zpool_name)); 3648 } 3649 } 3650 3651 *len = zc.zc_history_len; 3652 *off = zc.zc_history_offset; 3653 3654 return (0); 3655 } 3656 3657 /* 3658 * Process the buffer of nvlists, unpacking and storing each nvlist record 3659 * into 'records'. 'leftover' is set to the number of bytes that weren't 3660 * processed as there wasn't a complete record. 3661 */ 3662 int 3663 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3664 nvlist_t ***records, uint_t *numrecords) 3665 { 3666 uint64_t reclen; 3667 nvlist_t *nv; 3668 int i; 3669 3670 while (bytes_read > sizeof (reclen)) { 3671 3672 /* get length of packed record (stored as little endian) */ 3673 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3674 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3675 3676 if (bytes_read < sizeof (reclen) + reclen) 3677 break; 3678 3679 /* unpack record */ 3680 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3681 return (ENOMEM); 3682 bytes_read -= sizeof (reclen) + reclen; 3683 buf += sizeof (reclen) + reclen; 3684 3685 /* add record to nvlist array */ 3686 (*numrecords)++; 3687 if (ISP2(*numrecords + 1)) { 3688 *records = realloc(*records, 3689 *numrecords * 2 * sizeof (nvlist_t *)); 3690 } 3691 (*records)[*numrecords - 1] = nv; 3692 } 3693 3694 *leftover = bytes_read; 3695 return (0); 3696 } 3697 3698 /* 3699 * Retrieve the command history of a pool. 3700 */ 3701 int 3702 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3703 { 3704 char *buf; 3705 int buflen = 128 * 1024; 3706 uint64_t off = 0; 3707 nvlist_t **records = NULL; 3708 uint_t numrecords = 0; 3709 int err, i; 3710 3711 buf = malloc(buflen); 3712 if (buf == NULL) 3713 return (ENOMEM); 3714 do { 3715 uint64_t bytes_read = buflen; 3716 uint64_t leftover; 3717 3718 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3719 break; 3720 3721 /* if nothing else was read in, we're at EOF, just return */ 3722 if (!bytes_read) 3723 break; 3724 3725 if ((err = zpool_history_unpack(buf, bytes_read, 3726 &leftover, &records, &numrecords)) != 0) 3727 break; 3728 off -= leftover; 3729 if (leftover == bytes_read) { 3730 /* 3731 * no progress made, because buffer is not big enough 3732 * to hold this record; resize and retry. 3733 */ 3734 buflen *= 2; 3735 free(buf); 3736 buf = malloc(buflen); 3737 if (buf == NULL) 3738 return (ENOMEM); 3739 } 3740 3741 /* CONSTCOND */ 3742 } while (1); 3743 3744 free(buf); 3745 3746 if (!err) { 3747 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3748 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3749 records, numrecords) == 0); 3750 } 3751 for (i = 0; i < numrecords; i++) 3752 nvlist_free(records[i]); 3753 free(records); 3754 3755 return (err); 3756 } 3757 3758 void 3759 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3760 char *pathname, size_t len) 3761 { 3762 zfs_cmd_t zc = { 0 }; 3763 boolean_t mounted = B_FALSE; 3764 char *mntpnt = NULL; 3765 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 3766 3767 if (dsobj == 0) { 3768 /* special case for the MOS */ 3769 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3770 return; 3771 } 3772 3773 /* get the dataset's name */ 3774 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3775 zc.zc_obj = dsobj; 3776 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3777 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3778 /* just write out a path of two object numbers */ 3779 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3780 dsobj, obj); 3781 return; 3782 } 3783 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3784 3785 /* find out if the dataset is mounted */ 3786 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3787 3788 /* get the corrupted object's path */ 3789 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3790 zc.zc_obj = obj; 3791 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3792 &zc) == 0) { 3793 if (mounted) { 3794 (void) snprintf(pathname, len, "%s%s", mntpnt, 3795 zc.zc_value); 3796 } else { 3797 (void) snprintf(pathname, len, "%s:%s", 3798 dsname, zc.zc_value); 3799 } 3800 } else { 3801 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3802 } 3803 free(mntpnt); 3804 } 3805 3806 /* 3807 * Read the EFI label from the config, if a label does not exist then 3808 * pass back the error to the caller. If the caller has passed a non-NULL 3809 * diskaddr argument then we set it to the starting address of the EFI 3810 * partition. 3811 */ 3812 static int 3813 read_efi_label(nvlist_t *config, diskaddr_t *sb) 3814 { 3815 char *path; 3816 int fd; 3817 char diskname[MAXPATHLEN]; 3818 int err = -1; 3819 3820 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3821 return (err); 3822 3823 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT, 3824 strrchr(path, '/')); 3825 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3826 struct dk_gpt *vtoc; 3827 3828 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3829 if (sb != NULL) 3830 *sb = vtoc->efi_parts[0].p_start; 3831 efi_free(vtoc); 3832 } 3833 (void) close(fd); 3834 } 3835 return (err); 3836 } 3837 3838 /* 3839 * determine where a partition starts on a disk in the current 3840 * configuration 3841 */ 3842 static diskaddr_t 3843 find_start_block(nvlist_t *config) 3844 { 3845 nvlist_t **child; 3846 uint_t c, children; 3847 diskaddr_t sb = MAXOFFSET_T; 3848 uint64_t wholedisk; 3849 3850 if (nvlist_lookup_nvlist_array(config, 3851 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3852 if (nvlist_lookup_uint64(config, 3853 ZPOOL_CONFIG_WHOLE_DISK, 3854 &wholedisk) != 0 || !wholedisk) { 3855 return (MAXOFFSET_T); 3856 } 3857 if (read_efi_label(config, &sb) < 0) 3858 sb = MAXOFFSET_T; 3859 return (sb); 3860 } 3861 3862 for (c = 0; c < children; c++) { 3863 sb = find_start_block(child[c]); 3864 if (sb != MAXOFFSET_T) { 3865 return (sb); 3866 } 3867 } 3868 return (MAXOFFSET_T); 3869 } 3870 3871 /* 3872 * Label an individual disk. The name provided is the short name, 3873 * stripped of any leading /dev path. 3874 */ 3875 int 3876 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3877 { 3878 char path[MAXPATHLEN]; 3879 struct dk_gpt *vtoc; 3880 int fd; 3881 size_t resv = EFI_MIN_RESV_SIZE; 3882 uint64_t slice_size; 3883 diskaddr_t start_block; 3884 char errbuf[1024]; 3885 3886 /* prepare an error message just in case */ 3887 (void) snprintf(errbuf, sizeof (errbuf), 3888 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3889 3890 if (zhp) { 3891 nvlist_t *nvroot; 3892 3893 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3894 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3895 3896 if (zhp->zpool_start_block == 0) 3897 start_block = find_start_block(nvroot); 3898 else 3899 start_block = zhp->zpool_start_block; 3900 zhp->zpool_start_block = start_block; 3901 } else { 3902 /* new pool */ 3903 start_block = NEW_START_BLOCK; 3904 } 3905 3906 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name, 3907 BACKUP_SLICE); 3908 3909 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3910 /* 3911 * This shouldn't happen. We've long since verified that this 3912 * is a valid device. 3913 */ 3914 zfs_error_aux(hdl, 3915 dgettext(TEXT_DOMAIN, "unable to open device")); 3916 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3917 } 3918 3919 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3920 /* 3921 * The only way this can fail is if we run out of memory, or we 3922 * were unable to read the disk's capacity 3923 */ 3924 if (errno == ENOMEM) 3925 (void) no_memory(hdl); 3926 3927 (void) close(fd); 3928 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3929 "unable to read disk capacity"), name); 3930 3931 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3932 } 3933 3934 slice_size = vtoc->efi_last_u_lba + 1; 3935 slice_size -= EFI_MIN_RESV_SIZE; 3936 if (start_block == MAXOFFSET_T) 3937 start_block = NEW_START_BLOCK; 3938 slice_size -= start_block; 3939 3940 vtoc->efi_parts[0].p_start = start_block; 3941 vtoc->efi_parts[0].p_size = slice_size; 3942 3943 /* 3944 * Why we use V_USR: V_BACKUP confuses users, and is considered 3945 * disposable by some EFI utilities (since EFI doesn't have a backup 3946 * slice). V_UNASSIGNED is supposed to be used only for zero size 3947 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3948 * etc. were all pretty specific. V_USR is as close to reality as we 3949 * can get, in the absence of V_OTHER. 3950 */ 3951 vtoc->efi_parts[0].p_tag = V_USR; 3952 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3953 3954 vtoc->efi_parts[8].p_start = slice_size + start_block; 3955 vtoc->efi_parts[8].p_size = resv; 3956 vtoc->efi_parts[8].p_tag = V_RESERVED; 3957 3958 if (efi_write(fd, vtoc) != 0) { 3959 /* 3960 * Some block drivers (like pcata) may not support EFI 3961 * GPT labels. Print out a helpful error message dir- 3962 * ecting the user to manually label the disk and give 3963 * a specific slice. 3964 */ 3965 (void) close(fd); 3966 efi_free(vtoc); 3967 3968 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3969 "try using fdisk(1M) and then provide a specific slice")); 3970 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3971 } 3972 3973 (void) close(fd); 3974 efi_free(vtoc); 3975 return (0); 3976 } 3977 3978 static boolean_t 3979 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3980 { 3981 char *type; 3982 nvlist_t **child; 3983 uint_t children, c; 3984 3985 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3986 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 3987 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3988 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3989 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3990 "vdev type '%s' is not supported"), type); 3991 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3992 return (B_FALSE); 3993 } 3994 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3995 &child, &children) == 0) { 3996 for (c = 0; c < children; c++) { 3997 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3998 return (B_FALSE); 3999 } 4000 } 4001 return (B_TRUE); 4002 } 4003 4004 /* 4005 * Check if this zvol is allowable for use as a dump device; zero if 4006 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4007 * 4008 * Allowable storage configurations include mirrors, all raidz variants, and 4009 * pools with log, cache, and spare devices. Pools which are backed by files or 4010 * have missing/hole vdevs are not suitable. 4011 */ 4012 int 4013 zvol_check_dump_config(char *arg) 4014 { 4015 zpool_handle_t *zhp = NULL; 4016 nvlist_t *config, *nvroot; 4017 char *p, *volname; 4018 nvlist_t **top; 4019 uint_t toplevels; 4020 libzfs_handle_t *hdl; 4021 char errbuf[1024]; 4022 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 4023 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4024 int ret = 1; 4025 4026 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4027 return (-1); 4028 } 4029 4030 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4031 "dump is not supported on device '%s'"), arg); 4032 4033 if ((hdl = libzfs_init()) == NULL) 4034 return (1); 4035 libzfs_print_on_error(hdl, B_TRUE); 4036 4037 volname = arg + pathlen; 4038 4039 /* check the configuration of the pool */ 4040 if ((p = strchr(volname, '/')) == NULL) { 4041 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4042 "malformed dataset name")); 4043 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4044 return (1); 4045 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) { 4046 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4047 "dataset name is too long")); 4048 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4049 return (1); 4050 } else { 4051 (void) strncpy(poolname, volname, p - volname); 4052 poolname[p - volname] = '\0'; 4053 } 4054 4055 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4056 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4057 "could not open pool '%s'"), poolname); 4058 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4059 goto out; 4060 } 4061 config = zpool_get_config(zhp, NULL); 4062 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4063 &nvroot) != 0) { 4064 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4065 "could not obtain vdev configuration for '%s'"), poolname); 4066 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4067 goto out; 4068 } 4069 4070 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4071 &top, &toplevels) == 0); 4072 4073 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4074 goto out; 4075 } 4076 ret = 0; 4077 4078 out: 4079 if (zhp) 4080 zpool_close(zhp); 4081 libzfs_fini(hdl); 4082 return (ret); 4083 } 4084