1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright 2016 Nexenta Systems, Inc. 27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 28 * Copyright (c) 2017 Datto Inc. 29 */ 30 31 #include <ctype.h> 32 #include <errno.h> 33 #include <devid.h> 34 #include <fcntl.h> 35 #include <libintl.h> 36 #include <stdio.h> 37 #include <stdlib.h> 38 #include <strings.h> 39 #include <unistd.h> 40 #include <libgen.h> 41 #include <sys/efi_partition.h> 42 #include <sys/vtoc.h> 43 #include <sys/zfs_ioctl.h> 44 #include <dlfcn.h> 45 46 #include "zfs_namecheck.h" 47 #include "zfs_prop.h" 48 #include "libzfs_impl.h" 49 #include "zfs_comutil.h" 50 #include "zfeature_common.h" 51 52 static int read_efi_label(nvlist_t *, diskaddr_t *, boolean_t *); 53 static boolean_t zpool_vdev_is_interior(const char *name); 54 55 #define BACKUP_SLICE "s2" 56 57 typedef struct prop_flags { 58 int create:1; /* Validate property on creation */ 59 int import:1; /* Validate property on import */ 60 } prop_flags_t; 61 62 /* 63 * ==================================================================== 64 * zpool property functions 65 * ==================================================================== 66 */ 67 68 static int 69 zpool_get_all_props(zpool_handle_t *zhp) 70 { 71 zfs_cmd_t zc = { 0 }; 72 libzfs_handle_t *hdl = zhp->zpool_hdl; 73 74 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 75 76 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 77 return (-1); 78 79 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 80 if (errno == ENOMEM) { 81 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 82 zcmd_free_nvlists(&zc); 83 return (-1); 84 } 85 } else { 86 zcmd_free_nvlists(&zc); 87 return (-1); 88 } 89 } 90 91 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 92 zcmd_free_nvlists(&zc); 93 return (-1); 94 } 95 96 zcmd_free_nvlists(&zc); 97 98 return (0); 99 } 100 101 static int 102 zpool_props_refresh(zpool_handle_t *zhp) 103 { 104 nvlist_t *old_props; 105 106 old_props = zhp->zpool_props; 107 108 if (zpool_get_all_props(zhp) != 0) 109 return (-1); 110 111 nvlist_free(old_props); 112 return (0); 113 } 114 115 static char * 116 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 117 zprop_source_t *src) 118 { 119 nvlist_t *nv, *nvl; 120 uint64_t ival; 121 char *value; 122 zprop_source_t source; 123 124 nvl = zhp->zpool_props; 125 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 126 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 127 source = ival; 128 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 129 } else { 130 source = ZPROP_SRC_DEFAULT; 131 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 132 value = "-"; 133 } 134 135 if (src) 136 *src = source; 137 138 return (value); 139 } 140 141 uint64_t 142 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 143 { 144 nvlist_t *nv, *nvl; 145 uint64_t value; 146 zprop_source_t source; 147 148 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 149 /* 150 * zpool_get_all_props() has most likely failed because 151 * the pool is faulted, but if all we need is the top level 152 * vdev's guid then get it from the zhp config nvlist. 153 */ 154 if ((prop == ZPOOL_PROP_GUID) && 155 (nvlist_lookup_nvlist(zhp->zpool_config, 156 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 157 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 158 == 0)) { 159 return (value); 160 } 161 return (zpool_prop_default_numeric(prop)); 162 } 163 164 nvl = zhp->zpool_props; 165 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 166 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 167 source = value; 168 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 169 } else { 170 source = ZPROP_SRC_DEFAULT; 171 value = zpool_prop_default_numeric(prop); 172 } 173 174 if (src) 175 *src = source; 176 177 return (value); 178 } 179 180 /* 181 * Map VDEV STATE to printed strings. 182 */ 183 const char * 184 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 185 { 186 switch (state) { 187 case VDEV_STATE_CLOSED: 188 case VDEV_STATE_OFFLINE: 189 return (gettext("OFFLINE")); 190 case VDEV_STATE_REMOVED: 191 return (gettext("REMOVED")); 192 case VDEV_STATE_CANT_OPEN: 193 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 194 return (gettext("FAULTED")); 195 else if (aux == VDEV_AUX_SPLIT_POOL) 196 return (gettext("SPLIT")); 197 else 198 return (gettext("UNAVAIL")); 199 case VDEV_STATE_FAULTED: 200 return (gettext("FAULTED")); 201 case VDEV_STATE_DEGRADED: 202 return (gettext("DEGRADED")); 203 case VDEV_STATE_HEALTHY: 204 return (gettext("ONLINE")); 205 206 default: 207 break; 208 } 209 210 return (gettext("UNKNOWN")); 211 } 212 213 /* 214 * Map POOL STATE to printed strings. 215 */ 216 const char * 217 zpool_pool_state_to_name(pool_state_t state) 218 { 219 switch (state) { 220 case POOL_STATE_ACTIVE: 221 return (gettext("ACTIVE")); 222 case POOL_STATE_EXPORTED: 223 return (gettext("EXPORTED")); 224 case POOL_STATE_DESTROYED: 225 return (gettext("DESTROYED")); 226 case POOL_STATE_SPARE: 227 return (gettext("SPARE")); 228 case POOL_STATE_L2CACHE: 229 return (gettext("L2CACHE")); 230 case POOL_STATE_UNINITIALIZED: 231 return (gettext("UNINITIALIZED")); 232 case POOL_STATE_UNAVAIL: 233 return (gettext("UNAVAIL")); 234 case POOL_STATE_POTENTIALLY_ACTIVE: 235 return (gettext("POTENTIALLY_ACTIVE")); 236 } 237 238 return (gettext("UNKNOWN")); 239 } 240 241 /* 242 * Get a zpool property value for 'prop' and return the value in 243 * a pre-allocated buffer. 244 */ 245 int 246 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 247 zprop_source_t *srctype, boolean_t literal) 248 { 249 uint64_t intval; 250 const char *strval; 251 zprop_source_t src = ZPROP_SRC_NONE; 252 nvlist_t *nvroot; 253 vdev_stat_t *vs; 254 uint_t vsc; 255 256 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 257 switch (prop) { 258 case ZPOOL_PROP_NAME: 259 (void) strlcpy(buf, zpool_get_name(zhp), len); 260 break; 261 262 case ZPOOL_PROP_HEALTH: 263 (void) strlcpy(buf, "FAULTED", len); 264 break; 265 266 case ZPOOL_PROP_GUID: 267 intval = zpool_get_prop_int(zhp, prop, &src); 268 (void) snprintf(buf, len, "%llu", intval); 269 break; 270 271 case ZPOOL_PROP_ALTROOT: 272 case ZPOOL_PROP_CACHEFILE: 273 case ZPOOL_PROP_COMMENT: 274 if (zhp->zpool_props != NULL || 275 zpool_get_all_props(zhp) == 0) { 276 (void) strlcpy(buf, 277 zpool_get_prop_string(zhp, prop, &src), 278 len); 279 break; 280 } 281 /* FALLTHROUGH */ 282 default: 283 (void) strlcpy(buf, "-", len); 284 break; 285 } 286 287 if (srctype != NULL) 288 *srctype = src; 289 return (0); 290 } 291 292 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 293 prop != ZPOOL_PROP_NAME) 294 return (-1); 295 296 switch (zpool_prop_get_type(prop)) { 297 case PROP_TYPE_STRING: 298 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 299 len); 300 break; 301 302 case PROP_TYPE_NUMBER: 303 intval = zpool_get_prop_int(zhp, prop, &src); 304 305 switch (prop) { 306 case ZPOOL_PROP_SIZE: 307 case ZPOOL_PROP_ALLOCATED: 308 case ZPOOL_PROP_FREE: 309 case ZPOOL_PROP_FREEING: 310 case ZPOOL_PROP_LEAKED: 311 if (literal) { 312 (void) snprintf(buf, len, "%llu", 313 (u_longlong_t)intval); 314 } else { 315 (void) zfs_nicenum(intval, buf, len); 316 } 317 break; 318 case ZPOOL_PROP_BOOTSIZE: 319 case ZPOOL_PROP_EXPANDSZ: 320 case ZPOOL_PROP_CHECKPOINT: 321 if (intval == 0) { 322 (void) strlcpy(buf, "-", len); 323 } else if (literal) { 324 (void) snprintf(buf, len, "%llu", 325 (u_longlong_t)intval); 326 } else { 327 (void) zfs_nicenum(intval, buf, len); 328 } 329 break; 330 case ZPOOL_PROP_CAPACITY: 331 if (literal) { 332 (void) snprintf(buf, len, "%llu", 333 (u_longlong_t)intval); 334 } else { 335 (void) snprintf(buf, len, "%llu%%", 336 (u_longlong_t)intval); 337 } 338 break; 339 case ZPOOL_PROP_FRAGMENTATION: 340 if (intval == UINT64_MAX) { 341 (void) strlcpy(buf, "-", len); 342 } else { 343 (void) snprintf(buf, len, "%llu%%", 344 (u_longlong_t)intval); 345 } 346 break; 347 case ZPOOL_PROP_DEDUPRATIO: 348 (void) snprintf(buf, len, "%llu.%02llux", 349 (u_longlong_t)(intval / 100), 350 (u_longlong_t)(intval % 100)); 351 break; 352 case ZPOOL_PROP_HEALTH: 353 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 354 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 355 verify(nvlist_lookup_uint64_array(nvroot, 356 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 357 == 0); 358 359 (void) strlcpy(buf, zpool_state_to_name(intval, 360 vs->vs_aux), len); 361 break; 362 case ZPOOL_PROP_VERSION: 363 if (intval >= SPA_VERSION_FEATURES) { 364 (void) snprintf(buf, len, "-"); 365 break; 366 } 367 /* FALLTHROUGH */ 368 default: 369 (void) snprintf(buf, len, "%llu", intval); 370 } 371 break; 372 373 case PROP_TYPE_INDEX: 374 intval = zpool_get_prop_int(zhp, prop, &src); 375 if (zpool_prop_index_to_string(prop, intval, &strval) 376 != 0) 377 return (-1); 378 (void) strlcpy(buf, strval, len); 379 break; 380 381 default: 382 abort(); 383 } 384 385 if (srctype) 386 *srctype = src; 387 388 return (0); 389 } 390 391 /* 392 * Check if the bootfs name has the same pool name as it is set to. 393 * Assuming bootfs is a valid dataset name. 394 */ 395 static boolean_t 396 bootfs_name_valid(const char *pool, char *bootfs) 397 { 398 int len = strlen(pool); 399 if (bootfs[0] == '\0') 400 return (B_TRUE); 401 402 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 403 return (B_FALSE); 404 405 if (strncmp(pool, bootfs, len) == 0 && 406 (bootfs[len] == '/' || bootfs[len] == '\0')) 407 return (B_TRUE); 408 409 return (B_FALSE); 410 } 411 412 boolean_t 413 zpool_is_bootable(zpool_handle_t *zhp) 414 { 415 char bootfs[ZFS_MAX_DATASET_NAME_LEN]; 416 417 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 418 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 419 sizeof (bootfs)) != 0); 420 } 421 422 423 /* 424 * Given an nvlist of zpool properties to be set, validate that they are 425 * correct, and parse any numeric properties (index, boolean, etc) if they are 426 * specified as strings. 427 */ 428 static nvlist_t * 429 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 430 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 431 { 432 nvpair_t *elem; 433 nvlist_t *retprops; 434 zpool_prop_t prop; 435 char *strval; 436 uint64_t intval; 437 char *slash, *check; 438 struct stat64 statbuf; 439 zpool_handle_t *zhp; 440 441 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 442 (void) no_memory(hdl); 443 return (NULL); 444 } 445 446 elem = NULL; 447 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 448 const char *propname = nvpair_name(elem); 449 450 prop = zpool_name_to_prop(propname); 451 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) { 452 int err; 453 char *fname = strchr(propname, '@') + 1; 454 455 err = zfeature_lookup_name(fname, NULL); 456 if (err != 0) { 457 ASSERT3U(err, ==, ENOENT); 458 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 459 "invalid feature '%s'"), fname); 460 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 461 goto error; 462 } 463 464 if (nvpair_type(elem) != DATA_TYPE_STRING) { 465 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 466 "'%s' must be a string"), propname); 467 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 468 goto error; 469 } 470 471 (void) nvpair_value_string(elem, &strval); 472 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 473 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 474 "property '%s' can only be set to " 475 "'enabled'"), propname); 476 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 477 goto error; 478 } 479 480 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 481 (void) no_memory(hdl); 482 goto error; 483 } 484 continue; 485 } 486 487 /* 488 * Make sure this property is valid and applies to this type. 489 */ 490 if (prop == ZPOOL_PROP_INVAL) { 491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 492 "invalid property '%s'"), propname); 493 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 494 goto error; 495 } 496 497 if (zpool_prop_readonly(prop)) { 498 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 499 "is readonly"), propname); 500 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 501 goto error; 502 } 503 504 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 505 &strval, &intval, errbuf) != 0) 506 goto error; 507 508 /* 509 * Perform additional checking for specific properties. 510 */ 511 switch (prop) { 512 case ZPOOL_PROP_VERSION: 513 if (intval < version || 514 !SPA_VERSION_IS_SUPPORTED(intval)) { 515 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 516 "property '%s' number %d is invalid."), 517 propname, intval); 518 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 519 goto error; 520 } 521 break; 522 523 case ZPOOL_PROP_BOOTSIZE: 524 if (!flags.create) { 525 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 526 "property '%s' can only be set during pool " 527 "creation"), propname); 528 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 529 goto error; 530 } 531 break; 532 533 case ZPOOL_PROP_BOOTFS: 534 if (flags.create || flags.import) { 535 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 536 "property '%s' cannot be set at creation " 537 "or import time"), propname); 538 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 539 goto error; 540 } 541 542 if (version < SPA_VERSION_BOOTFS) { 543 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 544 "pool must be upgraded to support " 545 "'%s' property"), propname); 546 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 547 goto error; 548 } 549 550 /* 551 * bootfs property value has to be a dataset name and 552 * the dataset has to be in the same pool as it sets to. 553 */ 554 if (!bootfs_name_valid(poolname, strval)) { 555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 556 "is an invalid name"), strval); 557 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 558 goto error; 559 } 560 561 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 562 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 563 "could not open pool '%s'"), poolname); 564 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 565 goto error; 566 } 567 zpool_close(zhp); 568 break; 569 570 case ZPOOL_PROP_ALTROOT: 571 if (!flags.create && !flags.import) { 572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 573 "property '%s' can only be set during pool " 574 "creation or import"), propname); 575 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 576 goto error; 577 } 578 579 if (strval[0] != '/') { 580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 581 "bad alternate root '%s'"), strval); 582 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 583 goto error; 584 } 585 break; 586 587 case ZPOOL_PROP_CACHEFILE: 588 if (strval[0] == '\0') 589 break; 590 591 if (strcmp(strval, "none") == 0) 592 break; 593 594 if (strval[0] != '/') { 595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 596 "property '%s' must be empty, an " 597 "absolute path, or 'none'"), propname); 598 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 599 goto error; 600 } 601 602 slash = strrchr(strval, '/'); 603 604 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 605 strcmp(slash, "/..") == 0) { 606 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 607 "'%s' is not a valid file"), strval); 608 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 609 goto error; 610 } 611 612 *slash = '\0'; 613 614 if (strval[0] != '\0' && 615 (stat64(strval, &statbuf) != 0 || 616 !S_ISDIR(statbuf.st_mode))) { 617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 618 "'%s' is not a valid directory"), 619 strval); 620 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 621 goto error; 622 } 623 624 *slash = '/'; 625 break; 626 627 case ZPOOL_PROP_COMMENT: 628 for (check = strval; *check != '\0'; check++) { 629 if (!isprint(*check)) { 630 zfs_error_aux(hdl, 631 dgettext(TEXT_DOMAIN, 632 "comment may only have printable " 633 "characters")); 634 (void) zfs_error(hdl, EZFS_BADPROP, 635 errbuf); 636 goto error; 637 } 638 } 639 if (strlen(strval) > ZPROP_MAX_COMMENT) { 640 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 641 "comment must not exceed %d characters"), 642 ZPROP_MAX_COMMENT); 643 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 644 goto error; 645 } 646 break; 647 648 case ZPOOL_PROP_READONLY: 649 if (!flags.import) { 650 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 651 "property '%s' can only be set at " 652 "import time"), propname); 653 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 654 goto error; 655 } 656 break; 657 658 case ZPOOL_PROP_TNAME: 659 if (!flags.create) { 660 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 661 "property '%s' can only be set at " 662 "creation time"), propname); 663 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 664 goto error; 665 } 666 break; 667 668 case ZPOOL_PROP_MULTIHOST: 669 if (get_system_hostid() == 0) { 670 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 671 "requires a non-zero system hostid")); 672 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 673 goto error; 674 } 675 break; 676 677 default: 678 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 679 "property '%s'(%d) not defined"), propname, prop); 680 break; 681 } 682 } 683 684 return (retprops); 685 error: 686 nvlist_free(retprops); 687 return (NULL); 688 } 689 690 /* 691 * Set zpool property : propname=propval. 692 */ 693 int 694 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 695 { 696 zfs_cmd_t zc = { 0 }; 697 int ret = -1; 698 char errbuf[1024]; 699 nvlist_t *nvl = NULL; 700 nvlist_t *realprops; 701 uint64_t version; 702 prop_flags_t flags = { 0 }; 703 704 (void) snprintf(errbuf, sizeof (errbuf), 705 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 706 zhp->zpool_name); 707 708 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 709 return (no_memory(zhp->zpool_hdl)); 710 711 if (nvlist_add_string(nvl, propname, propval) != 0) { 712 nvlist_free(nvl); 713 return (no_memory(zhp->zpool_hdl)); 714 } 715 716 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 717 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 718 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 719 nvlist_free(nvl); 720 return (-1); 721 } 722 723 nvlist_free(nvl); 724 nvl = realprops; 725 726 /* 727 * Execute the corresponding ioctl() to set this property. 728 */ 729 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 730 731 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 732 nvlist_free(nvl); 733 return (-1); 734 } 735 736 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 737 738 zcmd_free_nvlists(&zc); 739 nvlist_free(nvl); 740 741 if (ret) 742 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 743 else 744 (void) zpool_props_refresh(zhp); 745 746 return (ret); 747 } 748 749 int 750 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 751 { 752 libzfs_handle_t *hdl = zhp->zpool_hdl; 753 zprop_list_t *entry; 754 char buf[ZFS_MAXPROPLEN]; 755 nvlist_t *features = NULL; 756 zprop_list_t **last; 757 boolean_t firstexpand = (NULL == *plp); 758 759 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 760 return (-1); 761 762 last = plp; 763 while (*last != NULL) 764 last = &(*last)->pl_next; 765 766 if ((*plp)->pl_all) 767 features = zpool_get_features(zhp); 768 769 if ((*plp)->pl_all && firstexpand) { 770 for (int i = 0; i < SPA_FEATURES; i++) { 771 zprop_list_t *entry = zfs_alloc(hdl, 772 sizeof (zprop_list_t)); 773 entry->pl_prop = ZPROP_INVAL; 774 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 775 spa_feature_table[i].fi_uname); 776 entry->pl_width = strlen(entry->pl_user_prop); 777 entry->pl_all = B_TRUE; 778 779 *last = entry; 780 last = &entry->pl_next; 781 } 782 } 783 784 /* add any unsupported features */ 785 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 786 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 787 char *propname; 788 boolean_t found; 789 zprop_list_t *entry; 790 791 if (zfeature_is_supported(nvpair_name(nvp))) 792 continue; 793 794 propname = zfs_asprintf(hdl, "unsupported@%s", 795 nvpair_name(nvp)); 796 797 /* 798 * Before adding the property to the list make sure that no 799 * other pool already added the same property. 800 */ 801 found = B_FALSE; 802 entry = *plp; 803 while (entry != NULL) { 804 if (entry->pl_user_prop != NULL && 805 strcmp(propname, entry->pl_user_prop) == 0) { 806 found = B_TRUE; 807 break; 808 } 809 entry = entry->pl_next; 810 } 811 if (found) { 812 free(propname); 813 continue; 814 } 815 816 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 817 entry->pl_prop = ZPROP_INVAL; 818 entry->pl_user_prop = propname; 819 entry->pl_width = strlen(entry->pl_user_prop); 820 entry->pl_all = B_TRUE; 821 822 *last = entry; 823 last = &entry->pl_next; 824 } 825 826 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 827 828 if (entry->pl_fixed) 829 continue; 830 831 if (entry->pl_prop != ZPROP_INVAL && 832 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 833 NULL, B_FALSE) == 0) { 834 if (strlen(buf) > entry->pl_width) 835 entry->pl_width = strlen(buf); 836 } 837 } 838 839 return (0); 840 } 841 842 /* 843 * Get the state for the given feature on the given ZFS pool. 844 */ 845 int 846 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 847 size_t len) 848 { 849 uint64_t refcount; 850 boolean_t found = B_FALSE; 851 nvlist_t *features = zpool_get_features(zhp); 852 boolean_t supported; 853 const char *feature = strchr(propname, '@') + 1; 854 855 supported = zpool_prop_feature(propname); 856 ASSERT(supported || zpool_prop_unsupported(propname)); 857 858 /* 859 * Convert from feature name to feature guid. This conversion is 860 * unecessary for unsupported@... properties because they already 861 * use guids. 862 */ 863 if (supported) { 864 int ret; 865 spa_feature_t fid; 866 867 ret = zfeature_lookup_name(feature, &fid); 868 if (ret != 0) { 869 (void) strlcpy(buf, "-", len); 870 return (ENOTSUP); 871 } 872 feature = spa_feature_table[fid].fi_guid; 873 } 874 875 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 876 found = B_TRUE; 877 878 if (supported) { 879 if (!found) { 880 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 881 } else { 882 if (refcount == 0) 883 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 884 else 885 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 886 } 887 } else { 888 if (found) { 889 if (refcount == 0) { 890 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 891 } else { 892 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 893 } 894 } else { 895 (void) strlcpy(buf, "-", len); 896 return (ENOTSUP); 897 } 898 } 899 900 return (0); 901 } 902 903 /* 904 * Don't start the slice at the default block of 34; many storage 905 * devices will use a stripe width of 128k, so start there instead. 906 */ 907 #define NEW_START_BLOCK 256 908 909 /* 910 * Validate the given pool name, optionally putting an extended error message in 911 * 'buf'. 912 */ 913 boolean_t 914 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 915 { 916 namecheck_err_t why; 917 char what; 918 int ret; 919 920 ret = pool_namecheck(pool, &why, &what); 921 922 /* 923 * The rules for reserved pool names were extended at a later point. 924 * But we need to support users with existing pools that may now be 925 * invalid. So we only check for this expanded set of names during a 926 * create (or import), and only in userland. 927 */ 928 if (ret == 0 && !isopen && 929 (strncmp(pool, "mirror", 6) == 0 || 930 strncmp(pool, "raidz", 5) == 0 || 931 strncmp(pool, "spare", 5) == 0 || 932 strcmp(pool, "log") == 0)) { 933 if (hdl != NULL) 934 zfs_error_aux(hdl, 935 dgettext(TEXT_DOMAIN, "name is reserved")); 936 return (B_FALSE); 937 } 938 939 940 if (ret != 0) { 941 if (hdl != NULL) { 942 switch (why) { 943 case NAME_ERR_TOOLONG: 944 zfs_error_aux(hdl, 945 dgettext(TEXT_DOMAIN, "name is too long")); 946 break; 947 948 case NAME_ERR_INVALCHAR: 949 zfs_error_aux(hdl, 950 dgettext(TEXT_DOMAIN, "invalid character " 951 "'%c' in pool name"), what); 952 break; 953 954 case NAME_ERR_NOLETTER: 955 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 956 "name must begin with a letter")); 957 break; 958 959 case NAME_ERR_RESERVED: 960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 961 "name is reserved")); 962 break; 963 964 case NAME_ERR_DISKLIKE: 965 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 966 "pool name is reserved")); 967 break; 968 969 case NAME_ERR_LEADING_SLASH: 970 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 971 "leading slash in name")); 972 break; 973 974 case NAME_ERR_EMPTY_COMPONENT: 975 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 976 "empty component in name")); 977 break; 978 979 case NAME_ERR_TRAILING_SLASH: 980 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 981 "trailing slash in name")); 982 break; 983 984 case NAME_ERR_MULTIPLE_DELIMITERS: 985 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 986 "multiple '@' and/or '#' delimiters in " 987 "name")); 988 break; 989 990 default: 991 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 992 "(%d) not defined"), why); 993 break; 994 } 995 } 996 return (B_FALSE); 997 } 998 999 return (B_TRUE); 1000 } 1001 1002 /* 1003 * Open a handle to the given pool, even if the pool is currently in the FAULTED 1004 * state. 1005 */ 1006 zpool_handle_t * 1007 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 1008 { 1009 zpool_handle_t *zhp; 1010 boolean_t missing; 1011 1012 /* 1013 * Make sure the pool name is valid. 1014 */ 1015 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 1016 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1017 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1018 pool); 1019 return (NULL); 1020 } 1021 1022 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1023 return (NULL); 1024 1025 zhp->zpool_hdl = hdl; 1026 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1027 1028 if (zpool_refresh_stats(zhp, &missing) != 0) { 1029 zpool_close(zhp); 1030 return (NULL); 1031 } 1032 1033 if (missing) { 1034 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1035 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1036 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1037 zpool_close(zhp); 1038 return (NULL); 1039 } 1040 1041 return (zhp); 1042 } 1043 1044 /* 1045 * Like the above, but silent on error. Used when iterating over pools (because 1046 * the configuration cache may be out of date). 1047 */ 1048 int 1049 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1050 { 1051 zpool_handle_t *zhp; 1052 boolean_t missing; 1053 1054 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1055 return (-1); 1056 1057 zhp->zpool_hdl = hdl; 1058 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1059 1060 if (zpool_refresh_stats(zhp, &missing) != 0) { 1061 zpool_close(zhp); 1062 return (-1); 1063 } 1064 1065 if (missing) { 1066 zpool_close(zhp); 1067 *ret = NULL; 1068 return (0); 1069 } 1070 1071 *ret = zhp; 1072 return (0); 1073 } 1074 1075 /* 1076 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1077 * state. 1078 */ 1079 zpool_handle_t * 1080 zpool_open(libzfs_handle_t *hdl, const char *pool) 1081 { 1082 zpool_handle_t *zhp; 1083 1084 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1085 return (NULL); 1086 1087 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1088 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1089 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1090 zpool_close(zhp); 1091 return (NULL); 1092 } 1093 1094 return (zhp); 1095 } 1096 1097 /* 1098 * Close the handle. Simply frees the memory associated with the handle. 1099 */ 1100 void 1101 zpool_close(zpool_handle_t *zhp) 1102 { 1103 nvlist_free(zhp->zpool_config); 1104 nvlist_free(zhp->zpool_old_config); 1105 nvlist_free(zhp->zpool_props); 1106 free(zhp); 1107 } 1108 1109 /* 1110 * Return the name of the pool. 1111 */ 1112 const char * 1113 zpool_get_name(zpool_handle_t *zhp) 1114 { 1115 return (zhp->zpool_name); 1116 } 1117 1118 1119 /* 1120 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1121 */ 1122 int 1123 zpool_get_state(zpool_handle_t *zhp) 1124 { 1125 return (zhp->zpool_state); 1126 } 1127 1128 /* 1129 * Create the named pool, using the provided vdev list. It is assumed 1130 * that the consumer has already validated the contents of the nvlist, so we 1131 * don't have to worry about error semantics. 1132 */ 1133 int 1134 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1135 nvlist_t *props, nvlist_t *fsprops) 1136 { 1137 zfs_cmd_t zc = { 0 }; 1138 nvlist_t *zc_fsprops = NULL; 1139 nvlist_t *zc_props = NULL; 1140 char msg[1024]; 1141 int ret = -1; 1142 1143 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1144 "cannot create '%s'"), pool); 1145 1146 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1147 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1148 1149 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1150 return (-1); 1151 1152 if (props) { 1153 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1154 1155 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1156 SPA_VERSION_1, flags, msg)) == NULL) { 1157 goto create_failed; 1158 } 1159 } 1160 1161 if (fsprops) { 1162 uint64_t zoned; 1163 char *zonestr; 1164 1165 zoned = ((nvlist_lookup_string(fsprops, 1166 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1167 strcmp(zonestr, "on") == 0); 1168 1169 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1170 fsprops, zoned, NULL, NULL, msg)) == NULL) { 1171 goto create_failed; 1172 } 1173 if (!zc_props && 1174 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1175 goto create_failed; 1176 } 1177 if (nvlist_add_nvlist(zc_props, 1178 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1179 goto create_failed; 1180 } 1181 } 1182 1183 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1184 goto create_failed; 1185 1186 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1187 1188 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1189 1190 zcmd_free_nvlists(&zc); 1191 nvlist_free(zc_props); 1192 nvlist_free(zc_fsprops); 1193 1194 switch (errno) { 1195 case EBUSY: 1196 /* 1197 * This can happen if the user has specified the same 1198 * device multiple times. We can't reliably detect this 1199 * until we try to add it and see we already have a 1200 * label. 1201 */ 1202 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1203 "one or more vdevs refer to the same device")); 1204 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1205 1206 case ERANGE: 1207 /* 1208 * This happens if the record size is smaller or larger 1209 * than the allowed size range, or not a power of 2. 1210 * 1211 * NOTE: although zfs_valid_proplist is called earlier, 1212 * this case may have slipped through since the 1213 * pool does not exist yet and it is therefore 1214 * impossible to read properties e.g. max blocksize 1215 * from the pool. 1216 */ 1217 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1218 "record size invalid")); 1219 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1220 1221 case EOVERFLOW: 1222 /* 1223 * This occurs when one of the devices is below 1224 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1225 * device was the problem device since there's no 1226 * reliable way to determine device size from userland. 1227 */ 1228 { 1229 char buf[64]; 1230 1231 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1232 1233 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1234 "one or more devices is less than the " 1235 "minimum size (%s)"), buf); 1236 } 1237 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1238 1239 case ENOSPC: 1240 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1241 "one or more devices is out of space")); 1242 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1243 1244 case ENOTBLK: 1245 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1246 "cache device must be a disk or disk slice")); 1247 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1248 1249 default: 1250 return (zpool_standard_error(hdl, errno, msg)); 1251 } 1252 } 1253 1254 create_failed: 1255 zcmd_free_nvlists(&zc); 1256 nvlist_free(zc_props); 1257 nvlist_free(zc_fsprops); 1258 return (ret); 1259 } 1260 1261 /* 1262 * Destroy the given pool. It is up to the caller to ensure that there are no 1263 * datasets left in the pool. 1264 */ 1265 int 1266 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1267 { 1268 zfs_cmd_t zc = { 0 }; 1269 zfs_handle_t *zfp = NULL; 1270 libzfs_handle_t *hdl = zhp->zpool_hdl; 1271 char msg[1024]; 1272 1273 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1274 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1275 return (-1); 1276 1277 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1278 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1279 1280 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1281 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1282 "cannot destroy '%s'"), zhp->zpool_name); 1283 1284 if (errno == EROFS) { 1285 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1286 "one or more devices is read only")); 1287 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1288 } else { 1289 (void) zpool_standard_error(hdl, errno, msg); 1290 } 1291 1292 if (zfp) 1293 zfs_close(zfp); 1294 return (-1); 1295 } 1296 1297 if (zfp) { 1298 remove_mountpoint(zfp); 1299 zfs_close(zfp); 1300 } 1301 1302 return (0); 1303 } 1304 1305 /* 1306 * Create a checkpoint in the given pool. 1307 */ 1308 int 1309 zpool_checkpoint(zpool_handle_t *zhp) 1310 { 1311 libzfs_handle_t *hdl = zhp->zpool_hdl; 1312 char msg[1024]; 1313 int error; 1314 1315 error = lzc_pool_checkpoint(zhp->zpool_name); 1316 if (error != 0) { 1317 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1318 "cannot checkpoint '%s'"), zhp->zpool_name); 1319 (void) zpool_standard_error(hdl, error, msg); 1320 return (-1); 1321 } 1322 1323 return (0); 1324 } 1325 1326 /* 1327 * Discard the checkpoint from the given pool. 1328 */ 1329 int 1330 zpool_discard_checkpoint(zpool_handle_t *zhp) 1331 { 1332 libzfs_handle_t *hdl = zhp->zpool_hdl; 1333 char msg[1024]; 1334 int error; 1335 1336 error = lzc_pool_checkpoint_discard(zhp->zpool_name); 1337 if (error != 0) { 1338 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1339 "cannot discard checkpoint in '%s'"), zhp->zpool_name); 1340 (void) zpool_standard_error(hdl, error, msg); 1341 return (-1); 1342 } 1343 1344 return (0); 1345 } 1346 1347 /* 1348 * Add the given vdevs to the pool. The caller must have already performed the 1349 * necessary verification to ensure that the vdev specification is well-formed. 1350 */ 1351 int 1352 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1353 { 1354 zfs_cmd_t zc = { 0 }; 1355 int ret; 1356 libzfs_handle_t *hdl = zhp->zpool_hdl; 1357 char msg[1024]; 1358 nvlist_t **spares, **l2cache; 1359 uint_t nspares, nl2cache; 1360 1361 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1362 "cannot add to '%s'"), zhp->zpool_name); 1363 1364 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1365 SPA_VERSION_SPARES && 1366 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1367 &spares, &nspares) == 0) { 1368 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1369 "upgraded to add hot spares")); 1370 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1371 } 1372 1373 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1374 SPA_VERSION_L2CACHE && 1375 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1376 &l2cache, &nl2cache) == 0) { 1377 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1378 "upgraded to add cache devices")); 1379 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1380 } 1381 1382 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1383 return (-1); 1384 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1385 1386 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1387 switch (errno) { 1388 case EBUSY: 1389 /* 1390 * This can happen if the user has specified the same 1391 * device multiple times. We can't reliably detect this 1392 * until we try to add it and see we already have a 1393 * label. 1394 */ 1395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1396 "one or more vdevs refer to the same device")); 1397 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1398 break; 1399 1400 case EINVAL: 1401 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1402 "invalid config; a pool with removing/removed " 1403 "vdevs does not support adding raidz vdevs")); 1404 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1405 break; 1406 1407 case EOVERFLOW: 1408 /* 1409 * This occurrs when one of the devices is below 1410 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1411 * device was the problem device since there's no 1412 * reliable way to determine device size from userland. 1413 */ 1414 { 1415 char buf[64]; 1416 1417 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1418 1419 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1420 "device is less than the minimum " 1421 "size (%s)"), buf); 1422 } 1423 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1424 break; 1425 1426 case ENOTSUP: 1427 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1428 "pool must be upgraded to add these vdevs")); 1429 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1430 break; 1431 1432 case EDOM: 1433 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1434 "root pool can not have multiple vdevs" 1435 " or separate logs")); 1436 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1437 break; 1438 1439 case ENOTBLK: 1440 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1441 "cache device must be a disk or disk slice")); 1442 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1443 break; 1444 1445 default: 1446 (void) zpool_standard_error(hdl, errno, msg); 1447 } 1448 1449 ret = -1; 1450 } else { 1451 ret = 0; 1452 } 1453 1454 zcmd_free_nvlists(&zc); 1455 1456 return (ret); 1457 } 1458 1459 /* 1460 * Exports the pool from the system. The caller must ensure that there are no 1461 * mounted datasets in the pool. 1462 */ 1463 static int 1464 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1465 const char *log_str) 1466 { 1467 zfs_cmd_t zc = { 0 }; 1468 char msg[1024]; 1469 1470 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1471 "cannot export '%s'"), zhp->zpool_name); 1472 1473 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1474 zc.zc_cookie = force; 1475 zc.zc_guid = hardforce; 1476 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1477 1478 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1479 switch (errno) { 1480 case EXDEV: 1481 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1482 "use '-f' to override the following errors:\n" 1483 "'%s' has an active shared spare which could be" 1484 " used by other pools once '%s' is exported."), 1485 zhp->zpool_name, zhp->zpool_name); 1486 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1487 msg)); 1488 default: 1489 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1490 msg)); 1491 } 1492 } 1493 1494 return (0); 1495 } 1496 1497 int 1498 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1499 { 1500 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1501 } 1502 1503 int 1504 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1505 { 1506 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1507 } 1508 1509 static void 1510 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1511 nvlist_t *config) 1512 { 1513 nvlist_t *nv = NULL; 1514 uint64_t rewindto; 1515 int64_t loss = -1; 1516 struct tm t; 1517 char timestr[128]; 1518 1519 if (!hdl->libzfs_printerr || config == NULL) 1520 return; 1521 1522 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1523 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1524 return; 1525 } 1526 1527 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1528 return; 1529 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1530 1531 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1532 strftime(timestr, 128, 0, &t) != 0) { 1533 if (dryrun) { 1534 (void) printf(dgettext(TEXT_DOMAIN, 1535 "Would be able to return %s " 1536 "to its state as of %s.\n"), 1537 name, timestr); 1538 } else { 1539 (void) printf(dgettext(TEXT_DOMAIN, 1540 "Pool %s returned to its state as of %s.\n"), 1541 name, timestr); 1542 } 1543 if (loss > 120) { 1544 (void) printf(dgettext(TEXT_DOMAIN, 1545 "%s approximately %lld "), 1546 dryrun ? "Would discard" : "Discarded", 1547 (loss + 30) / 60); 1548 (void) printf(dgettext(TEXT_DOMAIN, 1549 "minutes of transactions.\n")); 1550 } else if (loss > 0) { 1551 (void) printf(dgettext(TEXT_DOMAIN, 1552 "%s approximately %lld "), 1553 dryrun ? "Would discard" : "Discarded", loss); 1554 (void) printf(dgettext(TEXT_DOMAIN, 1555 "seconds of transactions.\n")); 1556 } 1557 } 1558 } 1559 1560 void 1561 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1562 nvlist_t *config) 1563 { 1564 nvlist_t *nv = NULL; 1565 int64_t loss = -1; 1566 uint64_t edata = UINT64_MAX; 1567 uint64_t rewindto; 1568 struct tm t; 1569 char timestr[128]; 1570 1571 if (!hdl->libzfs_printerr) 1572 return; 1573 1574 if (reason >= 0) 1575 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1576 else 1577 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1578 1579 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1580 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1581 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1582 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1583 goto no_info; 1584 1585 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1586 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1587 &edata); 1588 1589 (void) printf(dgettext(TEXT_DOMAIN, 1590 "Recovery is possible, but will result in some data loss.\n")); 1591 1592 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1593 strftime(timestr, 128, 0, &t) != 0) { 1594 (void) printf(dgettext(TEXT_DOMAIN, 1595 "\tReturning the pool to its state as of %s\n" 1596 "\tshould correct the problem. "), 1597 timestr); 1598 } else { 1599 (void) printf(dgettext(TEXT_DOMAIN, 1600 "\tReverting the pool to an earlier state " 1601 "should correct the problem.\n\t")); 1602 } 1603 1604 if (loss > 120) { 1605 (void) printf(dgettext(TEXT_DOMAIN, 1606 "Approximately %lld minutes of data\n" 1607 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1608 } else if (loss > 0) { 1609 (void) printf(dgettext(TEXT_DOMAIN, 1610 "Approximately %lld seconds of data\n" 1611 "\tmust be discarded, irreversibly. "), loss); 1612 } 1613 if (edata != 0 && edata != UINT64_MAX) { 1614 if (edata == 1) { 1615 (void) printf(dgettext(TEXT_DOMAIN, 1616 "After rewind, at least\n" 1617 "\tone persistent user-data error will remain. ")); 1618 } else { 1619 (void) printf(dgettext(TEXT_DOMAIN, 1620 "After rewind, several\n" 1621 "\tpersistent user-data errors will remain. ")); 1622 } 1623 } 1624 (void) printf(dgettext(TEXT_DOMAIN, 1625 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1626 reason >= 0 ? "clear" : "import", name); 1627 1628 (void) printf(dgettext(TEXT_DOMAIN, 1629 "A scrub of the pool\n" 1630 "\tis strongly recommended after recovery.\n")); 1631 return; 1632 1633 no_info: 1634 (void) printf(dgettext(TEXT_DOMAIN, 1635 "Destroy and re-create the pool from\n\ta backup source.\n")); 1636 } 1637 1638 /* 1639 * zpool_import() is a contracted interface. Should be kept the same 1640 * if possible. 1641 * 1642 * Applications should use zpool_import_props() to import a pool with 1643 * new properties value to be set. 1644 */ 1645 int 1646 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1647 char *altroot) 1648 { 1649 nvlist_t *props = NULL; 1650 int ret; 1651 1652 if (altroot != NULL) { 1653 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1654 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1655 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1656 newname)); 1657 } 1658 1659 if (nvlist_add_string(props, 1660 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1661 nvlist_add_string(props, 1662 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1663 nvlist_free(props); 1664 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1665 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1666 newname)); 1667 } 1668 } 1669 1670 ret = zpool_import_props(hdl, config, newname, props, 1671 ZFS_IMPORT_NORMAL); 1672 nvlist_free(props); 1673 return (ret); 1674 } 1675 1676 static void 1677 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1678 int indent) 1679 { 1680 nvlist_t **child; 1681 uint_t c, children; 1682 char *vname; 1683 uint64_t is_log = 0; 1684 1685 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1686 &is_log); 1687 1688 if (name != NULL) 1689 (void) printf("\t%*s%s%s\n", indent, "", name, 1690 is_log ? " [log]" : ""); 1691 1692 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1693 &child, &children) != 0) 1694 return; 1695 1696 for (c = 0; c < children; c++) { 1697 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1698 print_vdev_tree(hdl, vname, child[c], indent + 2); 1699 free(vname); 1700 } 1701 } 1702 1703 void 1704 zpool_print_unsup_feat(nvlist_t *config) 1705 { 1706 nvlist_t *nvinfo, *unsup_feat; 1707 1708 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1709 0); 1710 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1711 &unsup_feat) == 0); 1712 1713 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1714 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1715 char *desc; 1716 1717 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1718 verify(nvpair_value_string(nvp, &desc) == 0); 1719 1720 if (strlen(desc) > 0) 1721 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1722 else 1723 (void) printf("\t%s\n", nvpair_name(nvp)); 1724 } 1725 } 1726 1727 /* 1728 * Import the given pool using the known configuration and a list of 1729 * properties to be set. The configuration should have come from 1730 * zpool_find_import(). The 'newname' parameters control whether the pool 1731 * is imported with a different name. 1732 */ 1733 int 1734 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1735 nvlist_t *props, int flags) 1736 { 1737 zfs_cmd_t zc = { 0 }; 1738 zpool_load_policy_t policy; 1739 nvlist_t *nv = NULL; 1740 nvlist_t *nvinfo = NULL; 1741 nvlist_t *missing = NULL; 1742 char *thename; 1743 char *origname; 1744 int ret; 1745 int error = 0; 1746 char errbuf[1024]; 1747 1748 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1749 &origname) == 0); 1750 1751 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1752 "cannot import pool '%s'"), origname); 1753 1754 if (newname != NULL) { 1755 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1756 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1757 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1758 newname)); 1759 thename = (char *)newname; 1760 } else { 1761 thename = origname; 1762 } 1763 1764 if (props != NULL) { 1765 uint64_t version; 1766 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1767 1768 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1769 &version) == 0); 1770 1771 if ((props = zpool_valid_proplist(hdl, origname, 1772 props, version, flags, errbuf)) == NULL) 1773 return (-1); 1774 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1775 nvlist_free(props); 1776 return (-1); 1777 } 1778 nvlist_free(props); 1779 } 1780 1781 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1782 1783 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1784 &zc.zc_guid) == 0); 1785 1786 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1787 zcmd_free_nvlists(&zc); 1788 return (-1); 1789 } 1790 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1791 zcmd_free_nvlists(&zc); 1792 return (-1); 1793 } 1794 1795 zc.zc_cookie = flags; 1796 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1797 errno == ENOMEM) { 1798 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1799 zcmd_free_nvlists(&zc); 1800 return (-1); 1801 } 1802 } 1803 if (ret != 0) 1804 error = errno; 1805 1806 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1807 1808 zcmd_free_nvlists(&zc); 1809 1810 zpool_get_load_policy(config, &policy); 1811 1812 if (error) { 1813 char desc[1024]; 1814 char aux[256]; 1815 1816 /* 1817 * Dry-run failed, but we print out what success 1818 * looks like if we found a best txg 1819 */ 1820 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) { 1821 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1822 B_TRUE, nv); 1823 nvlist_free(nv); 1824 return (-1); 1825 } 1826 1827 if (newname == NULL) 1828 (void) snprintf(desc, sizeof (desc), 1829 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1830 thename); 1831 else 1832 (void) snprintf(desc, sizeof (desc), 1833 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1834 origname, thename); 1835 1836 switch (error) { 1837 case ENOTSUP: 1838 if (nv != NULL && nvlist_lookup_nvlist(nv, 1839 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1840 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1841 (void) printf(dgettext(TEXT_DOMAIN, "This " 1842 "pool uses the following feature(s) not " 1843 "supported by this system:\n")); 1844 zpool_print_unsup_feat(nv); 1845 if (nvlist_exists(nvinfo, 1846 ZPOOL_CONFIG_CAN_RDONLY)) { 1847 (void) printf(dgettext(TEXT_DOMAIN, 1848 "All unsupported features are only " 1849 "required for writing to the pool." 1850 "\nThe pool can be imported using " 1851 "'-o readonly=on'.\n")); 1852 } 1853 } 1854 /* 1855 * Unsupported version. 1856 */ 1857 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1858 break; 1859 1860 case EREMOTEIO: 1861 if (nv != NULL && nvlist_lookup_nvlist(nv, 1862 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) { 1863 char *hostname = "<unknown>"; 1864 uint64_t hostid = 0; 1865 mmp_state_t mmp_state; 1866 1867 mmp_state = fnvlist_lookup_uint64(nvinfo, 1868 ZPOOL_CONFIG_MMP_STATE); 1869 1870 if (nvlist_exists(nvinfo, 1871 ZPOOL_CONFIG_MMP_HOSTNAME)) 1872 hostname = fnvlist_lookup_string(nvinfo, 1873 ZPOOL_CONFIG_MMP_HOSTNAME); 1874 1875 if (nvlist_exists(nvinfo, 1876 ZPOOL_CONFIG_MMP_HOSTID)) 1877 hostid = fnvlist_lookup_uint64(nvinfo, 1878 ZPOOL_CONFIG_MMP_HOSTID); 1879 1880 if (mmp_state == MMP_STATE_ACTIVE) { 1881 (void) snprintf(aux, sizeof (aux), 1882 dgettext(TEXT_DOMAIN, "pool is imp" 1883 "orted on host '%s' (hostid=%lx).\n" 1884 "Export the pool on the other " 1885 "system, then run 'zpool import'."), 1886 hostname, (unsigned long) hostid); 1887 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 1888 (void) snprintf(aux, sizeof (aux), 1889 dgettext(TEXT_DOMAIN, "pool has " 1890 "the multihost property on and " 1891 "the\nsystem's hostid is not " 1892 "set.\n")); 1893 } 1894 1895 (void) zfs_error_aux(hdl, aux); 1896 } 1897 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc); 1898 break; 1899 1900 case EINVAL: 1901 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1902 break; 1903 1904 case EROFS: 1905 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1906 "one or more devices is read only")); 1907 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1908 break; 1909 1910 case ENXIO: 1911 if (nv && nvlist_lookup_nvlist(nv, 1912 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1913 nvlist_lookup_nvlist(nvinfo, 1914 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1915 (void) printf(dgettext(TEXT_DOMAIN, 1916 "The devices below are missing or " 1917 "corrupted, use '-m' to import the pool " 1918 "anyway:\n")); 1919 print_vdev_tree(hdl, NULL, missing, 2); 1920 (void) printf("\n"); 1921 } 1922 (void) zpool_standard_error(hdl, error, desc); 1923 break; 1924 1925 case EEXIST: 1926 (void) zpool_standard_error(hdl, error, desc); 1927 break; 1928 case ENAMETOOLONG: 1929 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1930 "new name of at least one dataset is longer than " 1931 "the maximum allowable length")); 1932 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 1933 break; 1934 default: 1935 (void) zpool_standard_error(hdl, error, desc); 1936 zpool_explain_recover(hdl, 1937 newname ? origname : thename, -error, nv); 1938 break; 1939 } 1940 1941 nvlist_free(nv); 1942 ret = -1; 1943 } else { 1944 zpool_handle_t *zhp; 1945 1946 /* 1947 * This should never fail, but play it safe anyway. 1948 */ 1949 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1950 ret = -1; 1951 else if (zhp != NULL) 1952 zpool_close(zhp); 1953 if (policy.zlp_rewind & 1954 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1955 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1956 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv); 1957 } 1958 nvlist_free(nv); 1959 return (0); 1960 } 1961 1962 return (ret); 1963 } 1964 1965 /* 1966 * Scan the pool. 1967 */ 1968 int 1969 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) 1970 { 1971 zfs_cmd_t zc = { 0 }; 1972 char msg[1024]; 1973 int err; 1974 libzfs_handle_t *hdl = zhp->zpool_hdl; 1975 1976 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1977 zc.zc_cookie = func; 1978 zc.zc_flags = cmd; 1979 1980 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0) 1981 return (0); 1982 1983 err = errno; 1984 1985 /* ECANCELED on a scrub means we resumed a paused scrub */ 1986 if (err == ECANCELED && func == POOL_SCAN_SCRUB && 1987 cmd == POOL_SCRUB_NORMAL) 1988 return (0); 1989 1990 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL) 1991 return (0); 1992 1993 if (func == POOL_SCAN_SCRUB) { 1994 if (cmd == POOL_SCRUB_PAUSE) { 1995 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1996 "cannot pause scrubbing %s"), zc.zc_name); 1997 } else { 1998 assert(cmd == POOL_SCRUB_NORMAL); 1999 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2000 "cannot scrub %s"), zc.zc_name); 2001 } 2002 } else if (func == POOL_SCAN_NONE) { 2003 (void) snprintf(msg, sizeof (msg), 2004 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 2005 zc.zc_name); 2006 } else { 2007 assert(!"unexpected result"); 2008 } 2009 2010 if (err == EBUSY) { 2011 nvlist_t *nvroot; 2012 pool_scan_stat_t *ps = NULL; 2013 uint_t psc; 2014 2015 verify(nvlist_lookup_nvlist(zhp->zpool_config, 2016 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 2017 (void) nvlist_lookup_uint64_array(nvroot, 2018 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 2019 if (ps && ps->pss_func == POOL_SCAN_SCRUB) { 2020 if (cmd == POOL_SCRUB_PAUSE) 2021 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg)); 2022 else 2023 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 2024 } else { 2025 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 2026 } 2027 } else if (err == ENOENT) { 2028 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 2029 } else { 2030 return (zpool_standard_error(hdl, err, msg)); 2031 } 2032 } 2033 2034 static int 2035 xlate_init_err(int err) 2036 { 2037 switch (err) { 2038 case ENODEV: 2039 return (EZFS_NODEVICE); 2040 case EINVAL: 2041 case EROFS: 2042 return (EZFS_BADDEV); 2043 case EBUSY: 2044 return (EZFS_INITIALIZING); 2045 case ESRCH: 2046 return (EZFS_NO_INITIALIZE); 2047 } 2048 return (err); 2049 } 2050 2051 /* 2052 * Begin, suspend, or cancel the initialization (initializing of all free 2053 * blocks) for the given vdevs in the given pool. 2054 */ 2055 int 2056 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2057 nvlist_t *vds) 2058 { 2059 char msg[1024]; 2060 libzfs_handle_t *hdl = zhp->zpool_hdl; 2061 2062 nvlist_t *errlist; 2063 2064 /* translate vdev names to guids */ 2065 nvlist_t *vdev_guids = fnvlist_alloc(); 2066 nvlist_t *guids_to_paths = fnvlist_alloc(); 2067 boolean_t spare, cache; 2068 nvlist_t *tgt; 2069 nvpair_t *elem; 2070 2071 for (elem = nvlist_next_nvpair(vds, NULL); elem != NULL; 2072 elem = nvlist_next_nvpair(vds, elem)) { 2073 char *vd_path = nvpair_name(elem); 2074 tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache, NULL); 2075 2076 if ((tgt == NULL) || cache || spare) { 2077 (void) snprintf(msg, sizeof (msg), 2078 dgettext(TEXT_DOMAIN, "cannot initialize '%s'"), 2079 vd_path); 2080 int err = (tgt == NULL) ? EZFS_NODEVICE : 2081 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE); 2082 fnvlist_free(vdev_guids); 2083 fnvlist_free(guids_to_paths); 2084 return (zfs_error(hdl, err, msg)); 2085 } 2086 2087 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 2088 fnvlist_add_uint64(vdev_guids, vd_path, guid); 2089 2090 (void) snprintf(msg, sizeof (msg), "%llu", guid); 2091 fnvlist_add_string(guids_to_paths, msg, vd_path); 2092 } 2093 2094 int err = lzc_initialize(zhp->zpool_name, cmd_type, vdev_guids, 2095 &errlist); 2096 fnvlist_free(vdev_guids); 2097 2098 if (err == 0) { 2099 fnvlist_free(guids_to_paths); 2100 return (0); 2101 } 2102 2103 nvlist_t *vd_errlist = NULL; 2104 if (errlist != NULL) { 2105 vd_errlist = fnvlist_lookup_nvlist(errlist, 2106 ZPOOL_INITIALIZE_VDEVS); 2107 } 2108 2109 (void) snprintf(msg, sizeof (msg), 2110 dgettext(TEXT_DOMAIN, "operation failed")); 2111 2112 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL; 2113 elem = nvlist_next_nvpair(vd_errlist, elem)) { 2114 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem)); 2115 char *path = fnvlist_lookup_string(guids_to_paths, 2116 nvpair_name(elem)); 2117 (void) zfs_error_fmt(hdl, vd_error, "cannot initialize '%s'", 2118 path); 2119 } 2120 2121 fnvlist_free(guids_to_paths); 2122 if (vd_errlist != NULL) 2123 return (-1); 2124 2125 return (zpool_standard_error(hdl, err, msg)); 2126 } 2127 2128 /* 2129 * This provides a very minimal check whether a given string is likely a 2130 * c#t#d# style string. Users of this are expected to do their own 2131 * verification of the s# part. 2132 */ 2133 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 2134 2135 /* 2136 * More elaborate version for ones which may start with "/dev/dsk/" 2137 * and the like. 2138 */ 2139 static int 2140 ctd_check_path(char *str) 2141 { 2142 /* 2143 * If it starts with a slash, check the last component. 2144 */ 2145 if (str && str[0] == '/') { 2146 char *tmp = strrchr(str, '/'); 2147 2148 /* 2149 * If it ends in "/old", check the second-to-last 2150 * component of the string instead. 2151 */ 2152 if (tmp != str && strcmp(tmp, "/old") == 0) { 2153 for (tmp--; *tmp != '/'; tmp--) 2154 ; 2155 } 2156 str = tmp + 1; 2157 } 2158 return (CTD_CHECK(str)); 2159 } 2160 2161 /* 2162 * Find a vdev that matches the search criteria specified. We use the 2163 * the nvpair name to determine how we should look for the device. 2164 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 2165 * spare; but FALSE if its an INUSE spare. 2166 */ 2167 static nvlist_t * 2168 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 2169 boolean_t *l2cache, boolean_t *log) 2170 { 2171 uint_t c, children; 2172 nvlist_t **child; 2173 nvlist_t *ret; 2174 uint64_t is_log; 2175 char *srchkey; 2176 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 2177 2178 /* Nothing to look for */ 2179 if (search == NULL || pair == NULL) 2180 return (NULL); 2181 2182 /* Obtain the key we will use to search */ 2183 srchkey = nvpair_name(pair); 2184 2185 switch (nvpair_type(pair)) { 2186 case DATA_TYPE_UINT64: 2187 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 2188 uint64_t srchval, theguid; 2189 2190 verify(nvpair_value_uint64(pair, &srchval) == 0); 2191 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2192 &theguid) == 0); 2193 if (theguid == srchval) 2194 return (nv); 2195 } 2196 break; 2197 2198 case DATA_TYPE_STRING: { 2199 char *srchval, *val; 2200 2201 verify(nvpair_value_string(pair, &srchval) == 0); 2202 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 2203 break; 2204 2205 /* 2206 * Search for the requested value. Special cases: 2207 * 2208 * - ZPOOL_CONFIG_PATH for whole disk entries. To support 2209 * UEFI boot, these end in "s0" or "s0/old" or "s1" or 2210 * "s1/old". The "s0" or "s1" part is hidden from the user, 2211 * but included in the string, so this matches around it. 2212 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2213 * 2214 * Otherwise, all other searches are simple string compares. 2215 */ 2216 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 2217 ctd_check_path(val)) { 2218 uint64_t wholedisk = 0; 2219 2220 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2221 &wholedisk); 2222 if (wholedisk) { 2223 int slen = strlen(srchval); 2224 int vlen = strlen(val); 2225 2226 if (slen != vlen - 2) 2227 break; 2228 2229 /* 2230 * make_leaf_vdev() should only set 2231 * wholedisk for ZPOOL_CONFIG_PATHs which 2232 * will include "/dev/dsk/", giving plenty of 2233 * room for the indices used next. 2234 */ 2235 ASSERT(vlen >= 6); 2236 2237 /* 2238 * strings identical except trailing "s0" 2239 */ 2240 if ((strcmp(&val[vlen - 2], "s0") == 0 || 2241 strcmp(&val[vlen - 2], "s1") == 0) && 2242 strncmp(srchval, val, slen) == 0) 2243 return (nv); 2244 2245 /* 2246 * strings identical except trailing "s0/old" 2247 */ 2248 if ((strcmp(&val[vlen - 6], "s0/old") == 0 || 2249 strcmp(&val[vlen - 6], "s1/old") == 0) && 2250 strcmp(&srchval[slen - 4], "/old") == 0 && 2251 strncmp(srchval, val, slen - 4) == 0) 2252 return (nv); 2253 2254 break; 2255 } 2256 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2257 char *type, *idx, *end, *p; 2258 uint64_t id, vdev_id; 2259 2260 /* 2261 * Determine our vdev type, keeping in mind 2262 * that the srchval is composed of a type and 2263 * vdev id pair (i.e. mirror-4). 2264 */ 2265 if ((type = strdup(srchval)) == NULL) 2266 return (NULL); 2267 2268 if ((p = strrchr(type, '-')) == NULL) { 2269 free(type); 2270 break; 2271 } 2272 idx = p + 1; 2273 *p = '\0'; 2274 2275 /* 2276 * If the types don't match then keep looking. 2277 */ 2278 if (strncmp(val, type, strlen(val)) != 0) { 2279 free(type); 2280 break; 2281 } 2282 2283 verify(zpool_vdev_is_interior(type)); 2284 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2285 &id) == 0); 2286 2287 errno = 0; 2288 vdev_id = strtoull(idx, &end, 10); 2289 2290 free(type); 2291 if (errno != 0) 2292 return (NULL); 2293 2294 /* 2295 * Now verify that we have the correct vdev id. 2296 */ 2297 if (vdev_id == id) 2298 return (nv); 2299 } 2300 2301 /* 2302 * Common case 2303 */ 2304 if (strcmp(srchval, val) == 0) 2305 return (nv); 2306 break; 2307 } 2308 2309 default: 2310 break; 2311 } 2312 2313 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2314 &child, &children) != 0) 2315 return (NULL); 2316 2317 for (c = 0; c < children; c++) { 2318 if ((ret = vdev_to_nvlist_iter(child[c], search, 2319 avail_spare, l2cache, NULL)) != NULL) { 2320 /* 2321 * The 'is_log' value is only set for the toplevel 2322 * vdev, not the leaf vdevs. So we always lookup the 2323 * log device from the root of the vdev tree (where 2324 * 'log' is non-NULL). 2325 */ 2326 if (log != NULL && 2327 nvlist_lookup_uint64(child[c], 2328 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2329 is_log) { 2330 *log = B_TRUE; 2331 } 2332 return (ret); 2333 } 2334 } 2335 2336 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2337 &child, &children) == 0) { 2338 for (c = 0; c < children; c++) { 2339 if ((ret = vdev_to_nvlist_iter(child[c], search, 2340 avail_spare, l2cache, NULL)) != NULL) { 2341 *avail_spare = B_TRUE; 2342 return (ret); 2343 } 2344 } 2345 } 2346 2347 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2348 &child, &children) == 0) { 2349 for (c = 0; c < children; c++) { 2350 if ((ret = vdev_to_nvlist_iter(child[c], search, 2351 avail_spare, l2cache, NULL)) != NULL) { 2352 *l2cache = B_TRUE; 2353 return (ret); 2354 } 2355 } 2356 } 2357 2358 return (NULL); 2359 } 2360 2361 /* 2362 * Given a physical path (minus the "/devices" prefix), find the 2363 * associated vdev. 2364 */ 2365 nvlist_t * 2366 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2367 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2368 { 2369 nvlist_t *search, *nvroot, *ret; 2370 2371 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2372 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2373 2374 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2375 &nvroot) == 0); 2376 2377 *avail_spare = B_FALSE; 2378 *l2cache = B_FALSE; 2379 if (log != NULL) 2380 *log = B_FALSE; 2381 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2382 nvlist_free(search); 2383 2384 return (ret); 2385 } 2386 2387 /* 2388 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2389 */ 2390 static boolean_t 2391 zpool_vdev_is_interior(const char *name) 2392 { 2393 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2394 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || 2395 strncmp(name, 2396 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || 2397 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2398 return (B_TRUE); 2399 return (B_FALSE); 2400 } 2401 2402 nvlist_t * 2403 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2404 boolean_t *l2cache, boolean_t *log) 2405 { 2406 char buf[MAXPATHLEN]; 2407 char *end; 2408 nvlist_t *nvroot, *search, *ret; 2409 uint64_t guid; 2410 2411 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2412 2413 guid = strtoull(path, &end, 10); 2414 if (guid != 0 && *end == '\0') { 2415 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2416 } else if (zpool_vdev_is_interior(path)) { 2417 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2418 } else if (path[0] != '/') { 2419 (void) snprintf(buf, sizeof (buf), "%s/%s", ZFS_DISK_ROOT, 2420 path); 2421 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2422 } else { 2423 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2424 } 2425 2426 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2427 &nvroot) == 0); 2428 2429 *avail_spare = B_FALSE; 2430 *l2cache = B_FALSE; 2431 if (log != NULL) 2432 *log = B_FALSE; 2433 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2434 nvlist_free(search); 2435 2436 return (ret); 2437 } 2438 2439 static int 2440 vdev_is_online(nvlist_t *nv) 2441 { 2442 uint64_t ival; 2443 2444 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2445 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2446 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2447 return (0); 2448 2449 return (1); 2450 } 2451 2452 /* 2453 * Helper function for zpool_get_physpaths(). 2454 */ 2455 static int 2456 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2457 size_t *bytes_written) 2458 { 2459 size_t bytes_left, pos, rsz; 2460 char *tmppath; 2461 const char *format; 2462 2463 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2464 &tmppath) != 0) 2465 return (EZFS_NODEVICE); 2466 2467 pos = *bytes_written; 2468 bytes_left = physpath_size - pos; 2469 format = (pos == 0) ? "%s" : " %s"; 2470 2471 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2472 *bytes_written += rsz; 2473 2474 if (rsz >= bytes_left) { 2475 /* if physpath was not copied properly, clear it */ 2476 if (bytes_left != 0) { 2477 physpath[pos] = 0; 2478 } 2479 return (EZFS_NOSPC); 2480 } 2481 return (0); 2482 } 2483 2484 static int 2485 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2486 size_t *rsz, boolean_t is_spare) 2487 { 2488 char *type; 2489 int ret; 2490 2491 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2492 return (EZFS_INVALCONFIG); 2493 2494 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2495 /* 2496 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2497 * For a spare vdev, we only want to boot from the active 2498 * spare device. 2499 */ 2500 if (is_spare) { 2501 uint64_t spare = 0; 2502 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2503 &spare); 2504 if (!spare) 2505 return (EZFS_INVALCONFIG); 2506 } 2507 2508 if (vdev_is_online(nv)) { 2509 if ((ret = vdev_get_one_physpath(nv, physpath, 2510 phypath_size, rsz)) != 0) 2511 return (ret); 2512 } 2513 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2514 strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2515 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2516 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2517 nvlist_t **child; 2518 uint_t count; 2519 int i, ret; 2520 2521 if (nvlist_lookup_nvlist_array(nv, 2522 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2523 return (EZFS_INVALCONFIG); 2524 2525 for (i = 0; i < count; i++) { 2526 ret = vdev_get_physpaths(child[i], physpath, 2527 phypath_size, rsz, is_spare); 2528 if (ret == EZFS_NOSPC) 2529 return (ret); 2530 } 2531 } 2532 2533 return (EZFS_POOL_INVALARG); 2534 } 2535 2536 /* 2537 * Get phys_path for a root pool config. 2538 * Return 0 on success; non-zero on failure. 2539 */ 2540 static int 2541 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2542 { 2543 size_t rsz; 2544 nvlist_t *vdev_root; 2545 nvlist_t **child; 2546 uint_t count; 2547 char *type; 2548 2549 rsz = 0; 2550 2551 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2552 &vdev_root) != 0) 2553 return (EZFS_INVALCONFIG); 2554 2555 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2556 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2557 &child, &count) != 0) 2558 return (EZFS_INVALCONFIG); 2559 2560 /* 2561 * root pool can only have a single top-level vdev. 2562 */ 2563 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2564 return (EZFS_POOL_INVALARG); 2565 2566 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2567 B_FALSE); 2568 2569 /* No online devices */ 2570 if (rsz == 0) 2571 return (EZFS_NODEVICE); 2572 2573 return (0); 2574 } 2575 2576 /* 2577 * Get phys_path for a root pool 2578 * Return 0 on success; non-zero on failure. 2579 */ 2580 int 2581 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2582 { 2583 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2584 phypath_size)); 2585 } 2586 2587 /* 2588 * If the device has being dynamically expanded then we need to relabel 2589 * the disk to use the new unallocated space. 2590 */ 2591 static int 2592 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2593 { 2594 char path[MAXPATHLEN]; 2595 char errbuf[1024]; 2596 int fd, error; 2597 int (*_efi_use_whole_disk)(int); 2598 2599 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2600 "efi_use_whole_disk")) == NULL) 2601 return (-1); 2602 2603 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name); 2604 2605 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2606 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2607 "relabel '%s': unable to open device"), name); 2608 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2609 } 2610 2611 /* 2612 * It's possible that we might encounter an error if the device 2613 * does not have any unallocated space left. If so, we simply 2614 * ignore that error and continue on. 2615 */ 2616 error = _efi_use_whole_disk(fd); 2617 (void) close(fd); 2618 if (error && error != VT_ENOSPC) { 2619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2620 "relabel '%s': unable to read disk capacity"), name); 2621 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2622 } 2623 return (0); 2624 } 2625 2626 /* 2627 * Bring the specified vdev online. The 'flags' parameter is a set of the 2628 * ZFS_ONLINE_* flags. 2629 */ 2630 int 2631 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2632 vdev_state_t *newstate) 2633 { 2634 zfs_cmd_t zc = { 0 }; 2635 char msg[1024]; 2636 char *pathname; 2637 nvlist_t *tgt; 2638 boolean_t avail_spare, l2cache, islog; 2639 libzfs_handle_t *hdl = zhp->zpool_hdl; 2640 2641 if (flags & ZFS_ONLINE_EXPAND) { 2642 (void) snprintf(msg, sizeof (msg), 2643 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2644 } else { 2645 (void) snprintf(msg, sizeof (msg), 2646 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2647 } 2648 2649 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2650 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2651 &islog)) == NULL) 2652 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2653 2654 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2655 2656 if (avail_spare) 2657 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2658 2659 if ((flags & ZFS_ONLINE_EXPAND || 2660 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) && 2661 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) { 2662 uint64_t wholedisk = 0; 2663 2664 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2665 &wholedisk); 2666 2667 /* 2668 * XXX - L2ARC 1.0 devices can't support expansion. 2669 */ 2670 if (l2cache) { 2671 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2672 "cannot expand cache devices")); 2673 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2674 } 2675 2676 if (wholedisk) { 2677 pathname += strlen(ZFS_DISK_ROOT) + 1; 2678 (void) zpool_relabel_disk(hdl, pathname); 2679 } 2680 } 2681 2682 zc.zc_cookie = VDEV_STATE_ONLINE; 2683 zc.zc_obj = flags; 2684 2685 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2686 if (errno == EINVAL) { 2687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2688 "from this pool into a new one. Use '%s' " 2689 "instead"), "zpool detach"); 2690 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2691 } 2692 return (zpool_standard_error(hdl, errno, msg)); 2693 } 2694 2695 *newstate = zc.zc_cookie; 2696 return (0); 2697 } 2698 2699 /* 2700 * Take the specified vdev offline 2701 */ 2702 int 2703 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2704 { 2705 zfs_cmd_t zc = { 0 }; 2706 char msg[1024]; 2707 nvlist_t *tgt; 2708 boolean_t avail_spare, l2cache; 2709 libzfs_handle_t *hdl = zhp->zpool_hdl; 2710 2711 (void) snprintf(msg, sizeof (msg), 2712 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2713 2714 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2715 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2716 NULL)) == NULL) 2717 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2718 2719 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2720 2721 if (avail_spare) 2722 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2723 2724 zc.zc_cookie = VDEV_STATE_OFFLINE; 2725 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2726 2727 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2728 return (0); 2729 2730 switch (errno) { 2731 case EBUSY: 2732 2733 /* 2734 * There are no other replicas of this device. 2735 */ 2736 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2737 2738 case EEXIST: 2739 /* 2740 * The log device has unplayed logs 2741 */ 2742 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2743 2744 default: 2745 return (zpool_standard_error(hdl, errno, msg)); 2746 } 2747 } 2748 2749 /* 2750 * Mark the given vdev faulted. 2751 */ 2752 int 2753 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2754 { 2755 zfs_cmd_t zc = { 0 }; 2756 char msg[1024]; 2757 libzfs_handle_t *hdl = zhp->zpool_hdl; 2758 2759 (void) snprintf(msg, sizeof (msg), 2760 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2761 2762 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2763 zc.zc_guid = guid; 2764 zc.zc_cookie = VDEV_STATE_FAULTED; 2765 zc.zc_obj = aux; 2766 2767 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2768 return (0); 2769 2770 switch (errno) { 2771 case EBUSY: 2772 2773 /* 2774 * There are no other replicas of this device. 2775 */ 2776 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2777 2778 default: 2779 return (zpool_standard_error(hdl, errno, msg)); 2780 } 2781 2782 } 2783 2784 /* 2785 * Mark the given vdev degraded. 2786 */ 2787 int 2788 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2789 { 2790 zfs_cmd_t zc = { 0 }; 2791 char msg[1024]; 2792 libzfs_handle_t *hdl = zhp->zpool_hdl; 2793 2794 (void) snprintf(msg, sizeof (msg), 2795 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2796 2797 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2798 zc.zc_guid = guid; 2799 zc.zc_cookie = VDEV_STATE_DEGRADED; 2800 zc.zc_obj = aux; 2801 2802 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2803 return (0); 2804 2805 return (zpool_standard_error(hdl, errno, msg)); 2806 } 2807 2808 /* 2809 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2810 * a hot spare. 2811 */ 2812 static boolean_t 2813 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2814 { 2815 nvlist_t **child; 2816 uint_t c, children; 2817 char *type; 2818 2819 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2820 &children) == 0) { 2821 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2822 &type) == 0); 2823 2824 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2825 children == 2 && child[which] == tgt) 2826 return (B_TRUE); 2827 2828 for (c = 0; c < children; c++) 2829 if (is_replacing_spare(child[c], tgt, which)) 2830 return (B_TRUE); 2831 } 2832 2833 return (B_FALSE); 2834 } 2835 2836 /* 2837 * Attach new_disk (fully described by nvroot) to old_disk. 2838 * If 'replacing' is specified, the new disk will replace the old one. 2839 */ 2840 int 2841 zpool_vdev_attach(zpool_handle_t *zhp, 2842 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2843 { 2844 zfs_cmd_t zc = { 0 }; 2845 char msg[1024]; 2846 int ret; 2847 nvlist_t *tgt; 2848 boolean_t avail_spare, l2cache, islog; 2849 uint64_t val; 2850 char *newname; 2851 nvlist_t **child; 2852 uint_t children; 2853 nvlist_t *config_root; 2854 libzfs_handle_t *hdl = zhp->zpool_hdl; 2855 boolean_t rootpool = zpool_is_bootable(zhp); 2856 2857 if (replacing) 2858 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2859 "cannot replace %s with %s"), old_disk, new_disk); 2860 else 2861 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2862 "cannot attach %s to %s"), new_disk, old_disk); 2863 2864 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2865 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2866 &islog)) == NULL) 2867 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2868 2869 if (avail_spare) 2870 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2871 2872 if (l2cache) 2873 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2874 2875 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2876 zc.zc_cookie = replacing; 2877 2878 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2879 &child, &children) != 0 || children != 1) { 2880 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2881 "new device must be a single disk")); 2882 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2883 } 2884 2885 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2886 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2887 2888 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2889 return (-1); 2890 2891 /* 2892 * If the target is a hot spare that has been swapped in, we can only 2893 * replace it with another hot spare. 2894 */ 2895 if (replacing && 2896 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2897 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2898 NULL) == NULL || !avail_spare) && 2899 is_replacing_spare(config_root, tgt, 1)) { 2900 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2901 "can only be replaced by another hot spare")); 2902 free(newname); 2903 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2904 } 2905 2906 free(newname); 2907 2908 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2909 return (-1); 2910 2911 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2912 2913 zcmd_free_nvlists(&zc); 2914 2915 if (ret == 0) { 2916 if (rootpool) { 2917 /* 2918 * XXX need a better way to prevent user from 2919 * booting up a half-baked vdev. 2920 */ 2921 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2922 "sure to wait until resilver is done " 2923 "before rebooting.\n")); 2924 } 2925 return (0); 2926 } 2927 2928 switch (errno) { 2929 case ENOTSUP: 2930 /* 2931 * Can't attach to or replace this type of vdev. 2932 */ 2933 if (replacing) { 2934 uint64_t version = zpool_get_prop_int(zhp, 2935 ZPOOL_PROP_VERSION, NULL); 2936 2937 if (islog) 2938 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2939 "cannot replace a log with a spare")); 2940 else if (version >= SPA_VERSION_MULTI_REPLACE) 2941 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2942 "already in replacing/spare config; wait " 2943 "for completion or use 'zpool detach'")); 2944 else 2945 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2946 "cannot replace a replacing device")); 2947 } else { 2948 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2949 "can only attach to mirrors and top-level " 2950 "disks")); 2951 } 2952 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2953 break; 2954 2955 case EINVAL: 2956 /* 2957 * The new device must be a single disk. 2958 */ 2959 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2960 "new device must be a single disk")); 2961 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2962 break; 2963 2964 case EBUSY: 2965 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, " 2966 "or device removal is in progress"), 2967 new_disk); 2968 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2969 break; 2970 2971 case EOVERFLOW: 2972 /* 2973 * The new device is too small. 2974 */ 2975 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2976 "device is too small")); 2977 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2978 break; 2979 2980 case EDOM: 2981 /* 2982 * The new device has a different alignment requirement. 2983 */ 2984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2985 "devices have different sector alignment")); 2986 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2987 break; 2988 2989 case ENAMETOOLONG: 2990 /* 2991 * The resulting top-level vdev spec won't fit in the label. 2992 */ 2993 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2994 break; 2995 2996 default: 2997 (void) zpool_standard_error(hdl, errno, msg); 2998 } 2999 3000 return (-1); 3001 } 3002 3003 /* 3004 * Detach the specified device. 3005 */ 3006 int 3007 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 3008 { 3009 zfs_cmd_t zc = { 0 }; 3010 char msg[1024]; 3011 nvlist_t *tgt; 3012 boolean_t avail_spare, l2cache; 3013 libzfs_handle_t *hdl = zhp->zpool_hdl; 3014 3015 (void) snprintf(msg, sizeof (msg), 3016 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 3017 3018 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3019 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3020 NULL)) == NULL) 3021 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3022 3023 if (avail_spare) 3024 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3025 3026 if (l2cache) 3027 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3028 3029 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3030 3031 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 3032 return (0); 3033 3034 switch (errno) { 3035 3036 case ENOTSUP: 3037 /* 3038 * Can't detach from this type of vdev. 3039 */ 3040 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 3041 "applicable to mirror and replacing vdevs")); 3042 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3043 break; 3044 3045 case EBUSY: 3046 /* 3047 * There are no other replicas of this device. 3048 */ 3049 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 3050 break; 3051 3052 default: 3053 (void) zpool_standard_error(hdl, errno, msg); 3054 } 3055 3056 return (-1); 3057 } 3058 3059 /* 3060 * Find a mirror vdev in the source nvlist. 3061 * 3062 * The mchild array contains a list of disks in one of the top-level mirrors 3063 * of the source pool. The schild array contains a list of disks that the 3064 * user specified on the command line. We loop over the mchild array to 3065 * see if any entry in the schild array matches. 3066 * 3067 * If a disk in the mchild array is found in the schild array, we return 3068 * the index of that entry. Otherwise we return -1. 3069 */ 3070 static int 3071 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 3072 nvlist_t **schild, uint_t schildren) 3073 { 3074 uint_t mc; 3075 3076 for (mc = 0; mc < mchildren; mc++) { 3077 uint_t sc; 3078 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3079 mchild[mc], B_FALSE); 3080 3081 for (sc = 0; sc < schildren; sc++) { 3082 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3083 schild[sc], B_FALSE); 3084 boolean_t result = (strcmp(mpath, spath) == 0); 3085 3086 free(spath); 3087 if (result) { 3088 free(mpath); 3089 return (mc); 3090 } 3091 } 3092 3093 free(mpath); 3094 } 3095 3096 return (-1); 3097 } 3098 3099 /* 3100 * Split a mirror pool. If newroot points to null, then a new nvlist 3101 * is generated and it is the responsibility of the caller to free it. 3102 */ 3103 int 3104 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 3105 nvlist_t *props, splitflags_t flags) 3106 { 3107 zfs_cmd_t zc = { 0 }; 3108 char msg[1024]; 3109 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 3110 nvlist_t **varray = NULL, *zc_props = NULL; 3111 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 3112 libzfs_handle_t *hdl = zhp->zpool_hdl; 3113 uint64_t vers; 3114 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 3115 int retval = 0; 3116 3117 (void) snprintf(msg, sizeof (msg), 3118 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 3119 3120 if (!zpool_name_valid(hdl, B_FALSE, newname)) 3121 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 3122 3123 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 3124 (void) fprintf(stderr, gettext("Internal error: unable to " 3125 "retrieve pool configuration\n")); 3126 return (-1); 3127 } 3128 3129 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 3130 == 0); 3131 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 3132 3133 if (props) { 3134 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 3135 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 3136 props, vers, flags, msg)) == NULL) 3137 return (-1); 3138 } 3139 3140 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 3141 &children) != 0) { 3142 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3143 "Source pool is missing vdev tree")); 3144 nvlist_free(zc_props); 3145 return (-1); 3146 } 3147 3148 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 3149 vcount = 0; 3150 3151 if (*newroot == NULL || 3152 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 3153 &newchild, &newchildren) != 0) 3154 newchildren = 0; 3155 3156 for (c = 0; c < children; c++) { 3157 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 3158 char *type; 3159 nvlist_t **mchild, *vdev; 3160 uint_t mchildren; 3161 int entry; 3162 3163 /* 3164 * Unlike cache & spares, slogs are stored in the 3165 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 3166 */ 3167 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3168 &is_log); 3169 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3170 &is_hole); 3171 if (is_log || is_hole) { 3172 /* 3173 * Create a hole vdev and put it in the config. 3174 */ 3175 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 3176 goto out; 3177 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 3178 VDEV_TYPE_HOLE) != 0) 3179 goto out; 3180 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 3181 1) != 0) 3182 goto out; 3183 if (lastlog == 0) 3184 lastlog = vcount; 3185 varray[vcount++] = vdev; 3186 continue; 3187 } 3188 lastlog = 0; 3189 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 3190 == 0); 3191 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 3192 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3193 "Source pool must be composed only of mirrors\n")); 3194 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3195 goto out; 3196 } 3197 3198 verify(nvlist_lookup_nvlist_array(child[c], 3199 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3200 3201 /* find or add an entry for this top-level vdev */ 3202 if (newchildren > 0 && 3203 (entry = find_vdev_entry(zhp, mchild, mchildren, 3204 newchild, newchildren)) >= 0) { 3205 /* We found a disk that the user specified. */ 3206 vdev = mchild[entry]; 3207 ++found; 3208 } else { 3209 /* User didn't specify a disk for this vdev. */ 3210 vdev = mchild[mchildren - 1]; 3211 } 3212 3213 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3214 goto out; 3215 } 3216 3217 /* did we find every disk the user specified? */ 3218 if (found != newchildren) { 3219 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3220 "include at most one disk from each mirror")); 3221 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3222 goto out; 3223 } 3224 3225 /* Prepare the nvlist for populating. */ 3226 if (*newroot == NULL) { 3227 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3228 goto out; 3229 freelist = B_TRUE; 3230 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3231 VDEV_TYPE_ROOT) != 0) 3232 goto out; 3233 } else { 3234 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3235 } 3236 3237 /* Add all the children we found */ 3238 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3239 lastlog == 0 ? vcount : lastlog) != 0) 3240 goto out; 3241 3242 /* 3243 * If we're just doing a dry run, exit now with success. 3244 */ 3245 if (flags.dryrun) { 3246 memory_err = B_FALSE; 3247 freelist = B_FALSE; 3248 goto out; 3249 } 3250 3251 /* now build up the config list & call the ioctl */ 3252 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3253 goto out; 3254 3255 if (nvlist_add_nvlist(newconfig, 3256 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3257 nvlist_add_string(newconfig, 3258 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3259 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3260 goto out; 3261 3262 /* 3263 * The new pool is automatically part of the namespace unless we 3264 * explicitly export it. 3265 */ 3266 if (!flags.import) 3267 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3268 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3269 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3270 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3271 goto out; 3272 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3273 goto out; 3274 3275 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3276 retval = zpool_standard_error(hdl, errno, msg); 3277 goto out; 3278 } 3279 3280 freelist = B_FALSE; 3281 memory_err = B_FALSE; 3282 3283 out: 3284 if (varray != NULL) { 3285 int v; 3286 3287 for (v = 0; v < vcount; v++) 3288 nvlist_free(varray[v]); 3289 free(varray); 3290 } 3291 zcmd_free_nvlists(&zc); 3292 nvlist_free(zc_props); 3293 nvlist_free(newconfig); 3294 if (freelist) { 3295 nvlist_free(*newroot); 3296 *newroot = NULL; 3297 } 3298 3299 if (retval != 0) 3300 return (retval); 3301 3302 if (memory_err) 3303 return (no_memory(hdl)); 3304 3305 return (0); 3306 } 3307 3308 /* 3309 * Remove the given device. 3310 */ 3311 int 3312 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3313 { 3314 zfs_cmd_t zc = { 0 }; 3315 char msg[1024]; 3316 nvlist_t *tgt; 3317 boolean_t avail_spare, l2cache, islog; 3318 libzfs_handle_t *hdl = zhp->zpool_hdl; 3319 uint64_t version; 3320 3321 (void) snprintf(msg, sizeof (msg), 3322 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3323 3324 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3325 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3326 &islog)) == NULL) 3327 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3328 3329 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3330 if (islog && version < SPA_VERSION_HOLES) { 3331 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3332 "pool must be upgraded to support log removal")); 3333 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3334 } 3335 3336 if (!islog && !avail_spare && !l2cache && zpool_is_bootable(zhp)) { 3337 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3338 "root pool can not have removed devices, " 3339 "because GRUB does not understand them")); 3340 return (zfs_error(hdl, EINVAL, msg)); 3341 } 3342 3343 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3344 3345 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3346 return (0); 3347 3348 switch (errno) { 3349 3350 case EINVAL: 3351 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3352 "invalid config; all top-level vdevs must " 3353 "have the same sector size and not be raidz.")); 3354 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3355 break; 3356 3357 case EBUSY: 3358 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3359 "Pool busy; removal may already be in progress")); 3360 (void) zfs_error(hdl, EZFS_BUSY, msg); 3361 break; 3362 3363 default: 3364 (void) zpool_standard_error(hdl, errno, msg); 3365 } 3366 return (-1); 3367 } 3368 3369 int 3370 zpool_vdev_remove_cancel(zpool_handle_t *zhp) 3371 { 3372 zfs_cmd_t zc = { 0 }; 3373 char msg[1024]; 3374 libzfs_handle_t *hdl = zhp->zpool_hdl; 3375 3376 (void) snprintf(msg, sizeof (msg), 3377 dgettext(TEXT_DOMAIN, "cannot cancel removal")); 3378 3379 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3380 zc.zc_cookie = 1; 3381 3382 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3383 return (0); 3384 3385 return (zpool_standard_error(hdl, errno, msg)); 3386 } 3387 3388 int 3389 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path, 3390 uint64_t *sizep) 3391 { 3392 char msg[1024]; 3393 nvlist_t *tgt; 3394 boolean_t avail_spare, l2cache, islog; 3395 libzfs_handle_t *hdl = zhp->zpool_hdl; 3396 3397 (void) snprintf(msg, sizeof (msg), 3398 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"), 3399 path); 3400 3401 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3402 &islog)) == NULL) 3403 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3404 3405 if (avail_spare || l2cache || islog) { 3406 *sizep = 0; 3407 return (0); 3408 } 3409 3410 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) { 3411 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3412 "indirect size not available")); 3413 return (zfs_error(hdl, EINVAL, msg)); 3414 } 3415 return (0); 3416 } 3417 3418 /* 3419 * Clear the errors for the pool, or the particular device if specified. 3420 */ 3421 int 3422 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3423 { 3424 zfs_cmd_t zc = { 0 }; 3425 char msg[1024]; 3426 nvlist_t *tgt; 3427 zpool_load_policy_t policy; 3428 boolean_t avail_spare, l2cache; 3429 libzfs_handle_t *hdl = zhp->zpool_hdl; 3430 nvlist_t *nvi = NULL; 3431 int error; 3432 3433 if (path) 3434 (void) snprintf(msg, sizeof (msg), 3435 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3436 path); 3437 else 3438 (void) snprintf(msg, sizeof (msg), 3439 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3440 zhp->zpool_name); 3441 3442 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3443 if (path) { 3444 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3445 &l2cache, NULL)) == NULL) 3446 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3447 3448 /* 3449 * Don't allow error clearing for hot spares. Do allow 3450 * error clearing for l2cache devices. 3451 */ 3452 if (avail_spare) 3453 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3454 3455 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3456 &zc.zc_guid) == 0); 3457 } 3458 3459 zpool_get_load_policy(rewindnvl, &policy); 3460 zc.zc_cookie = policy.zlp_rewind; 3461 3462 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3463 return (-1); 3464 3465 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3466 return (-1); 3467 3468 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3469 errno == ENOMEM) { 3470 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3471 zcmd_free_nvlists(&zc); 3472 return (-1); 3473 } 3474 } 3475 3476 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) && 3477 errno != EPERM && errno != EACCES)) { 3478 if (policy.zlp_rewind & 3479 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3480 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3481 zpool_rewind_exclaim(hdl, zc.zc_name, 3482 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), 3483 nvi); 3484 nvlist_free(nvi); 3485 } 3486 zcmd_free_nvlists(&zc); 3487 return (0); 3488 } 3489 3490 zcmd_free_nvlists(&zc); 3491 return (zpool_standard_error(hdl, errno, msg)); 3492 } 3493 3494 /* 3495 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3496 */ 3497 int 3498 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3499 { 3500 zfs_cmd_t zc = { 0 }; 3501 char msg[1024]; 3502 libzfs_handle_t *hdl = zhp->zpool_hdl; 3503 3504 (void) snprintf(msg, sizeof (msg), 3505 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3506 guid); 3507 3508 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3509 zc.zc_guid = guid; 3510 zc.zc_cookie = ZPOOL_NO_REWIND; 3511 3512 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3513 return (0); 3514 3515 return (zpool_standard_error(hdl, errno, msg)); 3516 } 3517 3518 /* 3519 * Change the GUID for a pool. 3520 */ 3521 int 3522 zpool_reguid(zpool_handle_t *zhp) 3523 { 3524 char msg[1024]; 3525 libzfs_handle_t *hdl = zhp->zpool_hdl; 3526 zfs_cmd_t zc = { 0 }; 3527 3528 (void) snprintf(msg, sizeof (msg), 3529 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3530 3531 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3532 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3533 return (0); 3534 3535 return (zpool_standard_error(hdl, errno, msg)); 3536 } 3537 3538 /* 3539 * Reopen the pool. 3540 */ 3541 int 3542 zpool_reopen(zpool_handle_t *zhp) 3543 { 3544 zfs_cmd_t zc = { 0 }; 3545 char msg[1024]; 3546 libzfs_handle_t *hdl = zhp->zpool_hdl; 3547 3548 (void) snprintf(msg, sizeof (msg), 3549 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3550 zhp->zpool_name); 3551 3552 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3553 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3554 return (0); 3555 return (zpool_standard_error(hdl, errno, msg)); 3556 } 3557 3558 /* call into libzfs_core to execute the sync IOCTL per pool */ 3559 int 3560 zpool_sync_one(zpool_handle_t *zhp, void *data) 3561 { 3562 int ret; 3563 libzfs_handle_t *hdl = zpool_get_handle(zhp); 3564 const char *pool_name = zpool_get_name(zhp); 3565 boolean_t *force = data; 3566 nvlist_t *innvl = fnvlist_alloc(); 3567 3568 fnvlist_add_boolean_value(innvl, "force", *force); 3569 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) { 3570 nvlist_free(innvl); 3571 return (zpool_standard_error_fmt(hdl, ret, 3572 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name)); 3573 } 3574 nvlist_free(innvl); 3575 3576 return (0); 3577 } 3578 3579 /* 3580 * Convert from a devid string to a path. 3581 */ 3582 static char * 3583 devid_to_path(char *devid_str) 3584 { 3585 ddi_devid_t devid; 3586 char *minor; 3587 char *path; 3588 devid_nmlist_t *list = NULL; 3589 int ret; 3590 3591 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3592 return (NULL); 3593 3594 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3595 3596 devid_str_free(minor); 3597 devid_free(devid); 3598 3599 if (ret != 0) 3600 return (NULL); 3601 3602 /* 3603 * In a case the strdup() fails, we will just return NULL below. 3604 */ 3605 path = strdup(list[0].devname); 3606 3607 devid_free_nmlist(list); 3608 3609 return (path); 3610 } 3611 3612 /* 3613 * Convert from a path to a devid string. 3614 */ 3615 static char * 3616 path_to_devid(const char *path) 3617 { 3618 int fd; 3619 ddi_devid_t devid; 3620 char *minor, *ret; 3621 3622 if ((fd = open(path, O_RDONLY)) < 0) 3623 return (NULL); 3624 3625 minor = NULL; 3626 ret = NULL; 3627 if (devid_get(fd, &devid) == 0) { 3628 if (devid_get_minor_name(fd, &minor) == 0) 3629 ret = devid_str_encode(devid, minor); 3630 if (minor != NULL) 3631 devid_str_free(minor); 3632 devid_free(devid); 3633 } 3634 (void) close(fd); 3635 3636 return (ret); 3637 } 3638 3639 /* 3640 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3641 * ignore any failure here, since a common case is for an unprivileged user to 3642 * type 'zpool status', and we'll display the correct information anyway. 3643 */ 3644 static void 3645 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3646 { 3647 zfs_cmd_t zc = { 0 }; 3648 3649 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3650 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3651 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3652 &zc.zc_guid) == 0); 3653 3654 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3655 } 3656 3657 /* 3658 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3659 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3660 * We also check if this is a whole disk, in which case we strip off the 3661 * trailing 's0' slice name. 3662 * 3663 * This routine is also responsible for identifying when disks have been 3664 * reconfigured in a new location. The kernel will have opened the device by 3665 * devid, but the path will still refer to the old location. To catch this, we 3666 * first do a path -> devid translation (which is fast for the common case). If 3667 * the devid matches, we're done. If not, we do a reverse devid -> path 3668 * translation and issue the appropriate ioctl() to update the path of the vdev. 3669 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3670 * of these checks. 3671 */ 3672 char * 3673 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3674 boolean_t verbose) 3675 { 3676 char *path, *devid; 3677 uint64_t value; 3678 char buf[64]; 3679 vdev_stat_t *vs; 3680 uint_t vsc; 3681 3682 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3683 &value) == 0) { 3684 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3685 &value) == 0); 3686 (void) snprintf(buf, sizeof (buf), "%llu", 3687 (u_longlong_t)value); 3688 path = buf; 3689 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3690 3691 /* 3692 * If the device is dead (faulted, offline, etc) then don't 3693 * bother opening it. Otherwise we may be forcing the user to 3694 * open a misbehaving device, which can have undesirable 3695 * effects. 3696 */ 3697 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3698 (uint64_t **)&vs, &vsc) != 0 || 3699 vs->vs_state >= VDEV_STATE_DEGRADED) && 3700 zhp != NULL && 3701 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3702 /* 3703 * Determine if the current path is correct. 3704 */ 3705 char *newdevid = path_to_devid(path); 3706 3707 if (newdevid == NULL || 3708 strcmp(devid, newdevid) != 0) { 3709 char *newpath; 3710 3711 if ((newpath = devid_to_path(devid)) != NULL) { 3712 /* 3713 * Update the path appropriately. 3714 */ 3715 set_path(zhp, nv, newpath); 3716 if (nvlist_add_string(nv, 3717 ZPOOL_CONFIG_PATH, newpath) == 0) 3718 verify(nvlist_lookup_string(nv, 3719 ZPOOL_CONFIG_PATH, 3720 &path) == 0); 3721 free(newpath); 3722 } 3723 } 3724 3725 if (newdevid) 3726 devid_str_free(newdevid); 3727 } 3728 3729 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0) 3730 path += strlen(ZFS_DISK_ROOTD); 3731 3732 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3733 &value) == 0 && value) { 3734 int pathlen = strlen(path); 3735 char *tmp = zfs_strdup(hdl, path); 3736 3737 /* 3738 * If it starts with c#, and ends with "s0" or "s1", 3739 * chop the slice off, or if it ends with "s0/old" or 3740 * "s1/old", remove the slice from the middle. 3741 */ 3742 if (CTD_CHECK(tmp)) { 3743 if (strcmp(&tmp[pathlen - 2], "s0") == 0 || 3744 strcmp(&tmp[pathlen - 2], "s1") == 0) { 3745 tmp[pathlen - 2] = '\0'; 3746 } else if (pathlen > 6 && 3747 (strcmp(&tmp[pathlen - 6], "s0/old") == 0 || 3748 strcmp(&tmp[pathlen - 6], "s1/old") == 0)) { 3749 (void) strcpy(&tmp[pathlen - 6], 3750 "/old"); 3751 } 3752 } 3753 return (tmp); 3754 } 3755 } else { 3756 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3757 3758 /* 3759 * If it's a raidz device, we need to stick in the parity level. 3760 */ 3761 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3762 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3763 &value) == 0); 3764 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3765 (u_longlong_t)value); 3766 path = buf; 3767 } 3768 3769 /* 3770 * We identify each top-level vdev by using a <type-id> 3771 * naming convention. 3772 */ 3773 if (verbose) { 3774 uint64_t id; 3775 3776 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3777 &id) == 0); 3778 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3779 (u_longlong_t)id); 3780 path = buf; 3781 } 3782 } 3783 3784 return (zfs_strdup(hdl, path)); 3785 } 3786 3787 static int 3788 zbookmark_mem_compare(const void *a, const void *b) 3789 { 3790 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3791 } 3792 3793 /* 3794 * Retrieve the persistent error log, uniquify the members, and return to the 3795 * caller. 3796 */ 3797 int 3798 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3799 { 3800 zfs_cmd_t zc = { 0 }; 3801 uint64_t count; 3802 zbookmark_phys_t *zb = NULL; 3803 int i; 3804 3805 /* 3806 * Retrieve the raw error list from the kernel. If the number of errors 3807 * has increased, allocate more space and continue until we get the 3808 * entire list. 3809 */ 3810 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3811 &count) == 0); 3812 if (count == 0) 3813 return (0); 3814 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3815 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3816 return (-1); 3817 zc.zc_nvlist_dst_size = count; 3818 (void) strcpy(zc.zc_name, zhp->zpool_name); 3819 for (;;) { 3820 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3821 &zc) != 0) { 3822 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3823 if (errno == ENOMEM) { 3824 void *dst; 3825 3826 count = zc.zc_nvlist_dst_size; 3827 dst = zfs_alloc(zhp->zpool_hdl, count * 3828 sizeof (zbookmark_phys_t)); 3829 if (dst == NULL) 3830 return (-1); 3831 zc.zc_nvlist_dst = (uintptr_t)dst; 3832 } else { 3833 return (-1); 3834 } 3835 } else { 3836 break; 3837 } 3838 } 3839 3840 /* 3841 * Sort the resulting bookmarks. This is a little confusing due to the 3842 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3843 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3844 * _not_ copied as part of the process. So we point the start of our 3845 * array appropriate and decrement the total number of elements. 3846 */ 3847 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3848 zc.zc_nvlist_dst_size; 3849 count -= zc.zc_nvlist_dst_size; 3850 3851 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 3852 3853 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3854 3855 /* 3856 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3857 */ 3858 for (i = 0; i < count; i++) { 3859 nvlist_t *nv; 3860 3861 /* ignoring zb_blkid and zb_level for now */ 3862 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3863 zb[i-1].zb_object == zb[i].zb_object) 3864 continue; 3865 3866 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3867 goto nomem; 3868 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3869 zb[i].zb_objset) != 0) { 3870 nvlist_free(nv); 3871 goto nomem; 3872 } 3873 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3874 zb[i].zb_object) != 0) { 3875 nvlist_free(nv); 3876 goto nomem; 3877 } 3878 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3879 nvlist_free(nv); 3880 goto nomem; 3881 } 3882 nvlist_free(nv); 3883 } 3884 3885 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3886 return (0); 3887 3888 nomem: 3889 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3890 return (no_memory(zhp->zpool_hdl)); 3891 } 3892 3893 /* 3894 * Upgrade a ZFS pool to the latest on-disk version. 3895 */ 3896 int 3897 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3898 { 3899 zfs_cmd_t zc = { 0 }; 3900 libzfs_handle_t *hdl = zhp->zpool_hdl; 3901 3902 (void) strcpy(zc.zc_name, zhp->zpool_name); 3903 zc.zc_cookie = new_version; 3904 3905 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3906 return (zpool_standard_error_fmt(hdl, errno, 3907 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3908 zhp->zpool_name)); 3909 return (0); 3910 } 3911 3912 void 3913 zfs_save_arguments(int argc, char **argv, char *string, int len) 3914 { 3915 (void) strlcpy(string, basename(argv[0]), len); 3916 for (int i = 1; i < argc; i++) { 3917 (void) strlcat(string, " ", len); 3918 (void) strlcat(string, argv[i], len); 3919 } 3920 } 3921 3922 int 3923 zpool_log_history(libzfs_handle_t *hdl, const char *message) 3924 { 3925 zfs_cmd_t zc = { 0 }; 3926 nvlist_t *args; 3927 int err; 3928 3929 args = fnvlist_alloc(); 3930 fnvlist_add_string(args, "message", message); 3931 err = zcmd_write_src_nvlist(hdl, &zc, args); 3932 if (err == 0) 3933 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3934 nvlist_free(args); 3935 zcmd_free_nvlists(&zc); 3936 return (err); 3937 } 3938 3939 /* 3940 * Perform ioctl to get some command history of a pool. 3941 * 3942 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3943 * logical offset of the history buffer to start reading from. 3944 * 3945 * Upon return, 'off' is the next logical offset to read from and 3946 * 'len' is the actual amount of bytes read into 'buf'. 3947 */ 3948 static int 3949 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3950 { 3951 zfs_cmd_t zc = { 0 }; 3952 libzfs_handle_t *hdl = zhp->zpool_hdl; 3953 3954 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3955 3956 zc.zc_history = (uint64_t)(uintptr_t)buf; 3957 zc.zc_history_len = *len; 3958 zc.zc_history_offset = *off; 3959 3960 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3961 switch (errno) { 3962 case EPERM: 3963 return (zfs_error_fmt(hdl, EZFS_PERM, 3964 dgettext(TEXT_DOMAIN, 3965 "cannot show history for pool '%s'"), 3966 zhp->zpool_name)); 3967 case ENOENT: 3968 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3969 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3970 "'%s'"), zhp->zpool_name)); 3971 case ENOTSUP: 3972 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3973 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3974 "'%s', pool must be upgraded"), zhp->zpool_name)); 3975 default: 3976 return (zpool_standard_error_fmt(hdl, errno, 3977 dgettext(TEXT_DOMAIN, 3978 "cannot get history for '%s'"), zhp->zpool_name)); 3979 } 3980 } 3981 3982 *len = zc.zc_history_len; 3983 *off = zc.zc_history_offset; 3984 3985 return (0); 3986 } 3987 3988 /* 3989 * Process the buffer of nvlists, unpacking and storing each nvlist record 3990 * into 'records'. 'leftover' is set to the number of bytes that weren't 3991 * processed as there wasn't a complete record. 3992 */ 3993 int 3994 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3995 nvlist_t ***records, uint_t *numrecords) 3996 { 3997 uint64_t reclen; 3998 nvlist_t *nv; 3999 int i; 4000 4001 while (bytes_read > sizeof (reclen)) { 4002 4003 /* get length of packed record (stored as little endian) */ 4004 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 4005 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 4006 4007 if (bytes_read < sizeof (reclen) + reclen) 4008 break; 4009 4010 /* unpack record */ 4011 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 4012 return (ENOMEM); 4013 bytes_read -= sizeof (reclen) + reclen; 4014 buf += sizeof (reclen) + reclen; 4015 4016 /* add record to nvlist array */ 4017 (*numrecords)++; 4018 if (ISP2(*numrecords + 1)) { 4019 *records = realloc(*records, 4020 *numrecords * 2 * sizeof (nvlist_t *)); 4021 } 4022 (*records)[*numrecords - 1] = nv; 4023 } 4024 4025 *leftover = bytes_read; 4026 return (0); 4027 } 4028 4029 /* 4030 * Retrieve the command history of a pool. 4031 */ 4032 int 4033 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 4034 { 4035 char *buf; 4036 int buflen = 128 * 1024; 4037 uint64_t off = 0; 4038 nvlist_t **records = NULL; 4039 uint_t numrecords = 0; 4040 int err, i; 4041 4042 buf = malloc(buflen); 4043 if (buf == NULL) 4044 return (ENOMEM); 4045 do { 4046 uint64_t bytes_read = buflen; 4047 uint64_t leftover; 4048 4049 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 4050 break; 4051 4052 /* if nothing else was read in, we're at EOF, just return */ 4053 if (!bytes_read) 4054 break; 4055 4056 if ((err = zpool_history_unpack(buf, bytes_read, 4057 &leftover, &records, &numrecords)) != 0) 4058 break; 4059 off -= leftover; 4060 if (leftover == bytes_read) { 4061 /* 4062 * no progress made, because buffer is not big enough 4063 * to hold this record; resize and retry. 4064 */ 4065 buflen *= 2; 4066 free(buf); 4067 buf = malloc(buflen); 4068 if (buf == NULL) 4069 return (ENOMEM); 4070 } 4071 4072 /* CONSTCOND */ 4073 } while (1); 4074 4075 free(buf); 4076 4077 if (!err) { 4078 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 4079 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 4080 records, numrecords) == 0); 4081 } 4082 for (i = 0; i < numrecords; i++) 4083 nvlist_free(records[i]); 4084 free(records); 4085 4086 return (err); 4087 } 4088 4089 void 4090 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4091 char *pathname, size_t len) 4092 { 4093 zfs_cmd_t zc = { 0 }; 4094 boolean_t mounted = B_FALSE; 4095 char *mntpnt = NULL; 4096 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 4097 4098 if (dsobj == 0) { 4099 /* special case for the MOS */ 4100 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 4101 return; 4102 } 4103 4104 /* get the dataset's name */ 4105 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4106 zc.zc_obj = dsobj; 4107 if (ioctl(zhp->zpool_hdl->libzfs_fd, 4108 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 4109 /* just write out a path of two object numbers */ 4110 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 4111 dsobj, obj); 4112 return; 4113 } 4114 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 4115 4116 /* find out if the dataset is mounted */ 4117 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 4118 4119 /* get the corrupted object's path */ 4120 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 4121 zc.zc_obj = obj; 4122 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 4123 &zc) == 0) { 4124 if (mounted) { 4125 (void) snprintf(pathname, len, "%s%s", mntpnt, 4126 zc.zc_value); 4127 } else { 4128 (void) snprintf(pathname, len, "%s:%s", 4129 dsname, zc.zc_value); 4130 } 4131 } else { 4132 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 4133 } 4134 free(mntpnt); 4135 } 4136 4137 /* 4138 * Read the EFI label from the config, if a label does not exist then 4139 * pass back the error to the caller. If the caller has passed a non-NULL 4140 * diskaddr argument then we set it to the starting address of the EFI 4141 * partition. If the caller has passed a non-NULL boolean argument, then 4142 * we set it to indicate if the disk does have efi system partition. 4143 */ 4144 static int 4145 read_efi_label(nvlist_t *config, diskaddr_t *sb, boolean_t *system) 4146 { 4147 char *path; 4148 int fd; 4149 char diskname[MAXPATHLEN]; 4150 boolean_t boot = B_FALSE; 4151 int err = -1; 4152 int slice; 4153 4154 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 4155 return (err); 4156 4157 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT, 4158 strrchr(path, '/')); 4159 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 4160 struct dk_gpt *vtoc; 4161 4162 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 4163 for (slice = 0; slice < vtoc->efi_nparts; slice++) { 4164 if (vtoc->efi_parts[slice].p_tag == V_SYSTEM) 4165 boot = B_TRUE; 4166 if (vtoc->efi_parts[slice].p_tag == V_USR) 4167 break; 4168 } 4169 if (sb != NULL && vtoc->efi_parts[slice].p_tag == V_USR) 4170 *sb = vtoc->efi_parts[slice].p_start; 4171 if (system != NULL) 4172 *system = boot; 4173 efi_free(vtoc); 4174 } 4175 (void) close(fd); 4176 } 4177 return (err); 4178 } 4179 4180 /* 4181 * determine where a partition starts on a disk in the current 4182 * configuration 4183 */ 4184 static diskaddr_t 4185 find_start_block(nvlist_t *config) 4186 { 4187 nvlist_t **child; 4188 uint_t c, children; 4189 diskaddr_t sb = MAXOFFSET_T; 4190 uint64_t wholedisk; 4191 4192 if (nvlist_lookup_nvlist_array(config, 4193 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 4194 if (nvlist_lookup_uint64(config, 4195 ZPOOL_CONFIG_WHOLE_DISK, 4196 &wholedisk) != 0 || !wholedisk) { 4197 return (MAXOFFSET_T); 4198 } 4199 if (read_efi_label(config, &sb, NULL) < 0) 4200 sb = MAXOFFSET_T; 4201 return (sb); 4202 } 4203 4204 for (c = 0; c < children; c++) { 4205 sb = find_start_block(child[c]); 4206 if (sb != MAXOFFSET_T) { 4207 return (sb); 4208 } 4209 } 4210 return (MAXOFFSET_T); 4211 } 4212 4213 /* 4214 * Label an individual disk. The name provided is the short name, 4215 * stripped of any leading /dev path. 4216 */ 4217 int 4218 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name, 4219 zpool_boot_label_t boot_type, uint64_t boot_size, int *slice) 4220 { 4221 char path[MAXPATHLEN]; 4222 struct dk_gpt *vtoc; 4223 int fd; 4224 size_t resv = EFI_MIN_RESV_SIZE; 4225 uint64_t slice_size; 4226 diskaddr_t start_block; 4227 char errbuf[1024]; 4228 4229 /* prepare an error message just in case */ 4230 (void) snprintf(errbuf, sizeof (errbuf), 4231 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 4232 4233 if (zhp) { 4234 nvlist_t *nvroot; 4235 4236 verify(nvlist_lookup_nvlist(zhp->zpool_config, 4237 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 4238 4239 if (zhp->zpool_start_block == 0) 4240 start_block = find_start_block(nvroot); 4241 else 4242 start_block = zhp->zpool_start_block; 4243 zhp->zpool_start_block = start_block; 4244 } else { 4245 /* new pool */ 4246 start_block = NEW_START_BLOCK; 4247 } 4248 4249 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name, 4250 BACKUP_SLICE); 4251 4252 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 4253 /* 4254 * This shouldn't happen. We've long since verified that this 4255 * is a valid device. 4256 */ 4257 zfs_error_aux(hdl, 4258 dgettext(TEXT_DOMAIN, "unable to open device")); 4259 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 4260 } 4261 4262 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 4263 /* 4264 * The only way this can fail is if we run out of memory, or we 4265 * were unable to read the disk's capacity 4266 */ 4267 if (errno == ENOMEM) 4268 (void) no_memory(hdl); 4269 4270 (void) close(fd); 4271 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4272 "unable to read disk capacity"), name); 4273 4274 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 4275 } 4276 4277 /* 4278 * Why we use V_USR: V_BACKUP confuses users, and is considered 4279 * disposable by some EFI utilities (since EFI doesn't have a backup 4280 * slice). V_UNASSIGNED is supposed to be used only for zero size 4281 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 4282 * etc. were all pretty specific. V_USR is as close to reality as we 4283 * can get, in the absence of V_OTHER. 4284 */ 4285 /* first fix the partition start block */ 4286 if (start_block == MAXOFFSET_T) 4287 start_block = NEW_START_BLOCK; 4288 4289 /* 4290 * EFI System partition is using slice 0. 4291 * ZFS is on slice 1 and slice 8 is reserved. 4292 * We assume the GPT partition table without system 4293 * partition has zfs p_start == NEW_START_BLOCK. 4294 * If start_block != NEW_START_BLOCK, it means we have 4295 * system partition. Correct solution would be to query/cache vtoc 4296 * from existing vdev member. 4297 */ 4298 if (boot_type == ZPOOL_CREATE_BOOT_LABEL) { 4299 if (boot_size % vtoc->efi_lbasize != 0) { 4300 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4301 "boot partition size must be a multiple of %d"), 4302 vtoc->efi_lbasize); 4303 (void) close(fd); 4304 efi_free(vtoc); 4305 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4306 } 4307 /* 4308 * System partition size checks. 4309 * Note the 1MB is quite arbitrary value, since we 4310 * are creating dedicated pool, it should be enough 4311 * to hold fat + efi bootloader. May need to be 4312 * adjusted if the bootloader size will grow. 4313 */ 4314 if (boot_size < 1024 * 1024) { 4315 char buf[64]; 4316 zfs_nicenum(boot_size, buf, sizeof (buf)); 4317 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4318 "Specified size %s for EFI System partition is too " 4319 "small, the minimum size is 1MB."), buf); 4320 (void) close(fd); 4321 efi_free(vtoc); 4322 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4323 } 4324 /* 33MB is tested with mkfs -F pcfs */ 4325 if (hdl->libzfs_printerr && 4326 ((vtoc->efi_lbasize == 512 && 4327 boot_size < 33 * 1024 * 1024) || 4328 (vtoc->efi_lbasize == 4096 && 4329 boot_size < 256 * 1024 * 1024))) { 4330 char buf[64]; 4331 zfs_nicenum(boot_size, buf, sizeof (buf)); 4332 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, 4333 "Warning: EFI System partition size %s is " 4334 "not allowing to create FAT32 file\nsystem, which " 4335 "may result in unbootable system.\n"), buf); 4336 } 4337 /* Adjust zfs partition start by size of system partition. */ 4338 start_block += boot_size / vtoc->efi_lbasize; 4339 } 4340 4341 if (start_block == NEW_START_BLOCK) { 4342 /* 4343 * Use default layout. 4344 * ZFS is on slice 0 and slice 8 is reserved. 4345 */ 4346 slice_size = vtoc->efi_last_u_lba + 1; 4347 slice_size -= EFI_MIN_RESV_SIZE; 4348 slice_size -= start_block; 4349 if (slice != NULL) 4350 *slice = 0; 4351 4352 vtoc->efi_parts[0].p_start = start_block; 4353 vtoc->efi_parts[0].p_size = slice_size; 4354 4355 vtoc->efi_parts[0].p_tag = V_USR; 4356 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 4357 4358 vtoc->efi_parts[8].p_start = slice_size + start_block; 4359 vtoc->efi_parts[8].p_size = resv; 4360 vtoc->efi_parts[8].p_tag = V_RESERVED; 4361 } else { 4362 slice_size = start_block - NEW_START_BLOCK; 4363 vtoc->efi_parts[0].p_start = NEW_START_BLOCK; 4364 vtoc->efi_parts[0].p_size = slice_size; 4365 vtoc->efi_parts[0].p_tag = V_SYSTEM; 4366 (void) strcpy(vtoc->efi_parts[0].p_name, "loader"); 4367 if (slice != NULL) 4368 *slice = 1; 4369 /* prepare slice 1 */ 4370 slice_size = vtoc->efi_last_u_lba + 1 - slice_size; 4371 slice_size -= resv; 4372 slice_size -= NEW_START_BLOCK; 4373 vtoc->efi_parts[1].p_start = start_block; 4374 vtoc->efi_parts[1].p_size = slice_size; 4375 vtoc->efi_parts[1].p_tag = V_USR; 4376 (void) strcpy(vtoc->efi_parts[1].p_name, "zfs"); 4377 4378 vtoc->efi_parts[8].p_start = slice_size + start_block; 4379 vtoc->efi_parts[8].p_size = resv; 4380 vtoc->efi_parts[8].p_tag = V_RESERVED; 4381 } 4382 4383 if (efi_write(fd, vtoc) != 0) { 4384 /* 4385 * Some block drivers (like pcata) may not support EFI 4386 * GPT labels. Print out a helpful error message dir- 4387 * ecting the user to manually label the disk and give 4388 * a specific slice. 4389 */ 4390 (void) close(fd); 4391 efi_free(vtoc); 4392 4393 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4394 "try using fdisk(1M) and then provide a specific slice")); 4395 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4396 } 4397 4398 (void) close(fd); 4399 efi_free(vtoc); 4400 return (0); 4401 } 4402 4403 static boolean_t 4404 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4405 { 4406 char *type; 4407 nvlist_t **child; 4408 uint_t children, c; 4409 4410 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4411 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4412 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4413 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4414 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4415 "vdev type '%s' is not supported"), type); 4416 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4417 return (B_FALSE); 4418 } 4419 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4420 &child, &children) == 0) { 4421 for (c = 0; c < children; c++) { 4422 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4423 return (B_FALSE); 4424 } 4425 } 4426 return (B_TRUE); 4427 } 4428 4429 /* 4430 * Check if this zvol is allowable for use as a dump device; zero if 4431 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4432 * 4433 * Allowable storage configurations include mirrors, all raidz variants, and 4434 * pools with log, cache, and spare devices. Pools which are backed by files or 4435 * have missing/hole vdevs are not suitable. 4436 */ 4437 int 4438 zvol_check_dump_config(char *arg) 4439 { 4440 zpool_handle_t *zhp = NULL; 4441 nvlist_t *config, *nvroot; 4442 char *p, *volname; 4443 nvlist_t **top; 4444 uint_t toplevels; 4445 libzfs_handle_t *hdl; 4446 char errbuf[1024]; 4447 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 4448 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4449 int ret = 1; 4450 4451 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4452 return (-1); 4453 } 4454 4455 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4456 "dump is not supported on device '%s'"), arg); 4457 4458 if ((hdl = libzfs_init()) == NULL) 4459 return (1); 4460 libzfs_print_on_error(hdl, B_TRUE); 4461 4462 volname = arg + pathlen; 4463 4464 /* check the configuration of the pool */ 4465 if ((p = strchr(volname, '/')) == NULL) { 4466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4467 "malformed dataset name")); 4468 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4469 return (1); 4470 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) { 4471 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4472 "dataset name is too long")); 4473 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4474 return (1); 4475 } else { 4476 (void) strncpy(poolname, volname, p - volname); 4477 poolname[p - volname] = '\0'; 4478 } 4479 4480 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4481 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4482 "could not open pool '%s'"), poolname); 4483 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4484 goto out; 4485 } 4486 config = zpool_get_config(zhp, NULL); 4487 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4488 &nvroot) != 0) { 4489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4490 "could not obtain vdev configuration for '%s'"), poolname); 4491 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4492 goto out; 4493 } 4494 4495 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4496 &top, &toplevels) == 0); 4497 4498 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4499 goto out; 4500 } 4501 ret = 0; 4502 4503 out: 4504 if (zhp) 4505 zpool_close(zhp); 4506 libzfs_fini(hdl); 4507 return (ret); 4508 } 4509