1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright 2016 Nexenta Systems, Inc. 27 */ 28 29 #include <ctype.h> 30 #include <errno.h> 31 #include <devid.h> 32 #include <fcntl.h> 33 #include <libintl.h> 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <strings.h> 37 #include <unistd.h> 38 #include <libgen.h> 39 #include <sys/efi_partition.h> 40 #include <sys/vtoc.h> 41 #include <sys/zfs_ioctl.h> 42 #include <dlfcn.h> 43 44 #include "zfs_namecheck.h" 45 #include "zfs_prop.h" 46 #include "libzfs_impl.h" 47 #include "zfs_comutil.h" 48 #include "zfeature_common.h" 49 50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52 #define BACKUP_SLICE "s2" 53 54 typedef struct prop_flags { 55 int create:1; /* Validate property on creation */ 56 int import:1; /* Validate property on import */ 57 } prop_flags_t; 58 59 /* 60 * ==================================================================== 61 * zpool property functions 62 * ==================================================================== 63 */ 64 65 static int 66 zpool_get_all_props(zpool_handle_t *zhp) 67 { 68 zfs_cmd_t zc = { 0 }; 69 libzfs_handle_t *hdl = zhp->zpool_hdl; 70 71 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 72 73 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 74 return (-1); 75 76 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 77 if (errno == ENOMEM) { 78 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 79 zcmd_free_nvlists(&zc); 80 return (-1); 81 } 82 } else { 83 zcmd_free_nvlists(&zc); 84 return (-1); 85 } 86 } 87 88 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 89 zcmd_free_nvlists(&zc); 90 return (-1); 91 } 92 93 zcmd_free_nvlists(&zc); 94 95 return (0); 96 } 97 98 static int 99 zpool_props_refresh(zpool_handle_t *zhp) 100 { 101 nvlist_t *old_props; 102 103 old_props = zhp->zpool_props; 104 105 if (zpool_get_all_props(zhp) != 0) 106 return (-1); 107 108 nvlist_free(old_props); 109 return (0); 110 } 111 112 static char * 113 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 114 zprop_source_t *src) 115 { 116 nvlist_t *nv, *nvl; 117 uint64_t ival; 118 char *value; 119 zprop_source_t source; 120 121 nvl = zhp->zpool_props; 122 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 123 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 124 source = ival; 125 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 126 } else { 127 source = ZPROP_SRC_DEFAULT; 128 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 129 value = "-"; 130 } 131 132 if (src) 133 *src = source; 134 135 return (value); 136 } 137 138 uint64_t 139 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 140 { 141 nvlist_t *nv, *nvl; 142 uint64_t value; 143 zprop_source_t source; 144 145 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 146 /* 147 * zpool_get_all_props() has most likely failed because 148 * the pool is faulted, but if all we need is the top level 149 * vdev's guid then get it from the zhp config nvlist. 150 */ 151 if ((prop == ZPOOL_PROP_GUID) && 152 (nvlist_lookup_nvlist(zhp->zpool_config, 153 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 154 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 155 == 0)) { 156 return (value); 157 } 158 return (zpool_prop_default_numeric(prop)); 159 } 160 161 nvl = zhp->zpool_props; 162 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 163 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 164 source = value; 165 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 166 } else { 167 source = ZPROP_SRC_DEFAULT; 168 value = zpool_prop_default_numeric(prop); 169 } 170 171 if (src) 172 *src = source; 173 174 return (value); 175 } 176 177 /* 178 * Map VDEV STATE to printed strings. 179 */ 180 const char * 181 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 182 { 183 switch (state) { 184 case VDEV_STATE_CLOSED: 185 case VDEV_STATE_OFFLINE: 186 return (gettext("OFFLINE")); 187 case VDEV_STATE_REMOVED: 188 return (gettext("REMOVED")); 189 case VDEV_STATE_CANT_OPEN: 190 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 191 return (gettext("FAULTED")); 192 else if (aux == VDEV_AUX_SPLIT_POOL) 193 return (gettext("SPLIT")); 194 else 195 return (gettext("UNAVAIL")); 196 case VDEV_STATE_FAULTED: 197 return (gettext("FAULTED")); 198 case VDEV_STATE_DEGRADED: 199 return (gettext("DEGRADED")); 200 case VDEV_STATE_HEALTHY: 201 return (gettext("ONLINE")); 202 } 203 204 return (gettext("UNKNOWN")); 205 } 206 207 /* 208 * Map POOL STATE to printed strings. 209 */ 210 const char * 211 zpool_pool_state_to_name(pool_state_t state) 212 { 213 switch (state) { 214 case POOL_STATE_ACTIVE: 215 return (gettext("ACTIVE")); 216 case POOL_STATE_EXPORTED: 217 return (gettext("EXPORTED")); 218 case POOL_STATE_DESTROYED: 219 return (gettext("DESTROYED")); 220 case POOL_STATE_SPARE: 221 return (gettext("SPARE")); 222 case POOL_STATE_L2CACHE: 223 return (gettext("L2CACHE")); 224 case POOL_STATE_UNINITIALIZED: 225 return (gettext("UNINITIALIZED")); 226 case POOL_STATE_UNAVAIL: 227 return (gettext("UNAVAIL")); 228 case POOL_STATE_POTENTIALLY_ACTIVE: 229 return (gettext("POTENTIALLY_ACTIVE")); 230 } 231 232 return (gettext("UNKNOWN")); 233 } 234 235 /* 236 * Get a zpool property value for 'prop' and return the value in 237 * a pre-allocated buffer. 238 */ 239 int 240 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 241 zprop_source_t *srctype, boolean_t literal) 242 { 243 uint64_t intval; 244 const char *strval; 245 zprop_source_t src = ZPROP_SRC_NONE; 246 nvlist_t *nvroot; 247 vdev_stat_t *vs; 248 uint_t vsc; 249 250 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 251 switch (prop) { 252 case ZPOOL_PROP_NAME: 253 (void) strlcpy(buf, zpool_get_name(zhp), len); 254 break; 255 256 case ZPOOL_PROP_HEALTH: 257 (void) strlcpy(buf, "FAULTED", len); 258 break; 259 260 case ZPOOL_PROP_GUID: 261 intval = zpool_get_prop_int(zhp, prop, &src); 262 (void) snprintf(buf, len, "%llu", intval); 263 break; 264 265 case ZPOOL_PROP_ALTROOT: 266 case ZPOOL_PROP_CACHEFILE: 267 case ZPOOL_PROP_COMMENT: 268 if (zhp->zpool_props != NULL || 269 zpool_get_all_props(zhp) == 0) { 270 (void) strlcpy(buf, 271 zpool_get_prop_string(zhp, prop, &src), 272 len); 273 break; 274 } 275 /* FALLTHROUGH */ 276 default: 277 (void) strlcpy(buf, "-", len); 278 break; 279 } 280 281 if (srctype != NULL) 282 *srctype = src; 283 return (0); 284 } 285 286 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 287 prop != ZPOOL_PROP_NAME) 288 return (-1); 289 290 switch (zpool_prop_get_type(prop)) { 291 case PROP_TYPE_STRING: 292 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 293 len); 294 break; 295 296 case PROP_TYPE_NUMBER: 297 intval = zpool_get_prop_int(zhp, prop, &src); 298 299 switch (prop) { 300 case ZPOOL_PROP_SIZE: 301 case ZPOOL_PROP_ALLOCATED: 302 case ZPOOL_PROP_FREE: 303 case ZPOOL_PROP_FREEING: 304 case ZPOOL_PROP_LEAKED: 305 if (literal) { 306 (void) snprintf(buf, len, "%llu", 307 (u_longlong_t)intval); 308 } else { 309 (void) zfs_nicenum(intval, buf, len); 310 } 311 break; 312 case ZPOOL_PROP_EXPANDSZ: 313 if (intval == 0) { 314 (void) strlcpy(buf, "-", len); 315 } else if (literal) { 316 (void) snprintf(buf, len, "%llu", 317 (u_longlong_t)intval); 318 } else { 319 (void) zfs_nicenum(intval, buf, len); 320 } 321 break; 322 case ZPOOL_PROP_CAPACITY: 323 if (literal) { 324 (void) snprintf(buf, len, "%llu", 325 (u_longlong_t)intval); 326 } else { 327 (void) snprintf(buf, len, "%llu%%", 328 (u_longlong_t)intval); 329 } 330 break; 331 case ZPOOL_PROP_FRAGMENTATION: 332 if (intval == UINT64_MAX) { 333 (void) strlcpy(buf, "-", len); 334 } else { 335 (void) snprintf(buf, len, "%llu%%", 336 (u_longlong_t)intval); 337 } 338 break; 339 case ZPOOL_PROP_DEDUPRATIO: 340 (void) snprintf(buf, len, "%llu.%02llux", 341 (u_longlong_t)(intval / 100), 342 (u_longlong_t)(intval % 100)); 343 break; 344 case ZPOOL_PROP_HEALTH: 345 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 346 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 347 verify(nvlist_lookup_uint64_array(nvroot, 348 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 349 == 0); 350 351 (void) strlcpy(buf, zpool_state_to_name(intval, 352 vs->vs_aux), len); 353 break; 354 case ZPOOL_PROP_VERSION: 355 if (intval >= SPA_VERSION_FEATURES) { 356 (void) snprintf(buf, len, "-"); 357 break; 358 } 359 /* FALLTHROUGH */ 360 default: 361 (void) snprintf(buf, len, "%llu", intval); 362 } 363 break; 364 365 case PROP_TYPE_INDEX: 366 intval = zpool_get_prop_int(zhp, prop, &src); 367 if (zpool_prop_index_to_string(prop, intval, &strval) 368 != 0) 369 return (-1); 370 (void) strlcpy(buf, strval, len); 371 break; 372 373 default: 374 abort(); 375 } 376 377 if (srctype) 378 *srctype = src; 379 380 return (0); 381 } 382 383 /* 384 * Check if the bootfs name has the same pool name as it is set to. 385 * Assuming bootfs is a valid dataset name. 386 */ 387 static boolean_t 388 bootfs_name_valid(const char *pool, char *bootfs) 389 { 390 int len = strlen(pool); 391 392 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 393 return (B_FALSE); 394 395 if (strncmp(pool, bootfs, len) == 0 && 396 (bootfs[len] == '/' || bootfs[len] == '\0')) 397 return (B_TRUE); 398 399 return (B_FALSE); 400 } 401 402 boolean_t 403 zpool_is_bootable(zpool_handle_t *zhp) 404 { 405 char bootfs[ZPOOL_MAXNAMELEN]; 406 407 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 408 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 409 sizeof (bootfs)) != 0); 410 } 411 412 413 /* 414 * Given an nvlist of zpool properties to be set, validate that they are 415 * correct, and parse any numeric properties (index, boolean, etc) if they are 416 * specified as strings. 417 */ 418 static nvlist_t * 419 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 420 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 421 { 422 nvpair_t *elem; 423 nvlist_t *retprops; 424 zpool_prop_t prop; 425 char *strval; 426 uint64_t intval; 427 char *slash, *check; 428 struct stat64 statbuf; 429 zpool_handle_t *zhp; 430 431 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 432 (void) no_memory(hdl); 433 return (NULL); 434 } 435 436 elem = NULL; 437 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 438 const char *propname = nvpair_name(elem); 439 440 prop = zpool_name_to_prop(propname); 441 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 442 int err; 443 char *fname = strchr(propname, '@') + 1; 444 445 err = zfeature_lookup_name(fname, NULL); 446 if (err != 0) { 447 ASSERT3U(err, ==, ENOENT); 448 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 449 "invalid feature '%s'"), fname); 450 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 451 goto error; 452 } 453 454 if (nvpair_type(elem) != DATA_TYPE_STRING) { 455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 456 "'%s' must be a string"), propname); 457 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 458 goto error; 459 } 460 461 (void) nvpair_value_string(elem, &strval); 462 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 463 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 464 "property '%s' can only be set to " 465 "'enabled'"), propname); 466 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 467 goto error; 468 } 469 470 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 471 (void) no_memory(hdl); 472 goto error; 473 } 474 continue; 475 } 476 477 /* 478 * Make sure this property is valid and applies to this type. 479 */ 480 if (prop == ZPROP_INVAL) { 481 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 482 "invalid property '%s'"), propname); 483 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 484 goto error; 485 } 486 487 if (zpool_prop_readonly(prop)) { 488 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 489 "is readonly"), propname); 490 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 491 goto error; 492 } 493 494 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 495 &strval, &intval, errbuf) != 0) 496 goto error; 497 498 /* 499 * Perform additional checking for specific properties. 500 */ 501 switch (prop) { 502 case ZPOOL_PROP_VERSION: 503 if (intval < version || 504 !SPA_VERSION_IS_SUPPORTED(intval)) { 505 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 506 "property '%s' number %d is invalid."), 507 propname, intval); 508 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 509 goto error; 510 } 511 break; 512 513 case ZPOOL_PROP_BOOTFS: 514 if (flags.create || flags.import) { 515 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 516 "property '%s' cannot be set at creation " 517 "or import time"), propname); 518 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 519 goto error; 520 } 521 522 if (version < SPA_VERSION_BOOTFS) { 523 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 524 "pool must be upgraded to support " 525 "'%s' property"), propname); 526 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 527 goto error; 528 } 529 530 /* 531 * bootfs property value has to be a dataset name and 532 * the dataset has to be in the same pool as it sets to. 533 */ 534 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 535 strval)) { 536 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 537 "is an invalid name"), strval); 538 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 539 goto error; 540 } 541 542 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 543 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 544 "could not open pool '%s'"), poolname); 545 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 546 goto error; 547 } 548 zpool_close(zhp); 549 break; 550 551 case ZPOOL_PROP_ALTROOT: 552 if (!flags.create && !flags.import) { 553 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 554 "property '%s' can only be set during pool " 555 "creation or import"), propname); 556 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 557 goto error; 558 } 559 560 if (strval[0] != '/') { 561 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 562 "bad alternate root '%s'"), strval); 563 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 564 goto error; 565 } 566 break; 567 568 case ZPOOL_PROP_CACHEFILE: 569 if (strval[0] == '\0') 570 break; 571 572 if (strcmp(strval, "none") == 0) 573 break; 574 575 if (strval[0] != '/') { 576 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 577 "property '%s' must be empty, an " 578 "absolute path, or 'none'"), propname); 579 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 580 goto error; 581 } 582 583 slash = strrchr(strval, '/'); 584 585 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 586 strcmp(slash, "/..") == 0) { 587 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 588 "'%s' is not a valid file"), strval); 589 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 590 goto error; 591 } 592 593 *slash = '\0'; 594 595 if (strval[0] != '\0' && 596 (stat64(strval, &statbuf) != 0 || 597 !S_ISDIR(statbuf.st_mode))) { 598 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 599 "'%s' is not a valid directory"), 600 strval); 601 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 602 goto error; 603 } 604 605 *slash = '/'; 606 break; 607 608 case ZPOOL_PROP_COMMENT: 609 for (check = strval; *check != '\0'; check++) { 610 if (!isprint(*check)) { 611 zfs_error_aux(hdl, 612 dgettext(TEXT_DOMAIN, 613 "comment may only have printable " 614 "characters")); 615 (void) zfs_error(hdl, EZFS_BADPROP, 616 errbuf); 617 goto error; 618 } 619 } 620 if (strlen(strval) > ZPROP_MAX_COMMENT) { 621 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 622 "comment must not exceed %d characters"), 623 ZPROP_MAX_COMMENT); 624 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 625 goto error; 626 } 627 break; 628 case ZPOOL_PROP_READONLY: 629 if (!flags.import) { 630 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 631 "property '%s' can only be set at " 632 "import time"), propname); 633 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 634 goto error; 635 } 636 break; 637 } 638 } 639 640 return (retprops); 641 error: 642 nvlist_free(retprops); 643 return (NULL); 644 } 645 646 /* 647 * Set zpool property : propname=propval. 648 */ 649 int 650 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 651 { 652 zfs_cmd_t zc = { 0 }; 653 int ret = -1; 654 char errbuf[1024]; 655 nvlist_t *nvl = NULL; 656 nvlist_t *realprops; 657 uint64_t version; 658 prop_flags_t flags = { 0 }; 659 660 (void) snprintf(errbuf, sizeof (errbuf), 661 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 662 zhp->zpool_name); 663 664 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 665 return (no_memory(zhp->zpool_hdl)); 666 667 if (nvlist_add_string(nvl, propname, propval) != 0) { 668 nvlist_free(nvl); 669 return (no_memory(zhp->zpool_hdl)); 670 } 671 672 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 673 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 674 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 675 nvlist_free(nvl); 676 return (-1); 677 } 678 679 nvlist_free(nvl); 680 nvl = realprops; 681 682 /* 683 * Execute the corresponding ioctl() to set this property. 684 */ 685 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 686 687 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 688 nvlist_free(nvl); 689 return (-1); 690 } 691 692 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 693 694 zcmd_free_nvlists(&zc); 695 nvlist_free(nvl); 696 697 if (ret) 698 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 699 else 700 (void) zpool_props_refresh(zhp); 701 702 return (ret); 703 } 704 705 int 706 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 707 { 708 libzfs_handle_t *hdl = zhp->zpool_hdl; 709 zprop_list_t *entry; 710 char buf[ZFS_MAXPROPLEN]; 711 nvlist_t *features = NULL; 712 zprop_list_t **last; 713 boolean_t firstexpand = (NULL == *plp); 714 715 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 716 return (-1); 717 718 last = plp; 719 while (*last != NULL) 720 last = &(*last)->pl_next; 721 722 if ((*plp)->pl_all) 723 features = zpool_get_features(zhp); 724 725 if ((*plp)->pl_all && firstexpand) { 726 for (int i = 0; i < SPA_FEATURES; i++) { 727 zprop_list_t *entry = zfs_alloc(hdl, 728 sizeof (zprop_list_t)); 729 entry->pl_prop = ZPROP_INVAL; 730 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 731 spa_feature_table[i].fi_uname); 732 entry->pl_width = strlen(entry->pl_user_prop); 733 entry->pl_all = B_TRUE; 734 735 *last = entry; 736 last = &entry->pl_next; 737 } 738 } 739 740 /* add any unsupported features */ 741 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 742 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 743 char *propname; 744 boolean_t found; 745 zprop_list_t *entry; 746 747 if (zfeature_is_supported(nvpair_name(nvp))) 748 continue; 749 750 propname = zfs_asprintf(hdl, "unsupported@%s", 751 nvpair_name(nvp)); 752 753 /* 754 * Before adding the property to the list make sure that no 755 * other pool already added the same property. 756 */ 757 found = B_FALSE; 758 entry = *plp; 759 while (entry != NULL) { 760 if (entry->pl_user_prop != NULL && 761 strcmp(propname, entry->pl_user_prop) == 0) { 762 found = B_TRUE; 763 break; 764 } 765 entry = entry->pl_next; 766 } 767 if (found) { 768 free(propname); 769 continue; 770 } 771 772 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 773 entry->pl_prop = ZPROP_INVAL; 774 entry->pl_user_prop = propname; 775 entry->pl_width = strlen(entry->pl_user_prop); 776 entry->pl_all = B_TRUE; 777 778 *last = entry; 779 last = &entry->pl_next; 780 } 781 782 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 783 784 if (entry->pl_fixed) 785 continue; 786 787 if (entry->pl_prop != ZPROP_INVAL && 788 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 789 NULL, B_FALSE) == 0) { 790 if (strlen(buf) > entry->pl_width) 791 entry->pl_width = strlen(buf); 792 } 793 } 794 795 return (0); 796 } 797 798 /* 799 * Get the state for the given feature on the given ZFS pool. 800 */ 801 int 802 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 803 size_t len) 804 { 805 uint64_t refcount; 806 boolean_t found = B_FALSE; 807 nvlist_t *features = zpool_get_features(zhp); 808 boolean_t supported; 809 const char *feature = strchr(propname, '@') + 1; 810 811 supported = zpool_prop_feature(propname); 812 ASSERT(supported || zfs_prop_unsupported(propname)); 813 814 /* 815 * Convert from feature name to feature guid. This conversion is 816 * unecessary for unsupported@... properties because they already 817 * use guids. 818 */ 819 if (supported) { 820 int ret; 821 spa_feature_t fid; 822 823 ret = zfeature_lookup_name(feature, &fid); 824 if (ret != 0) { 825 (void) strlcpy(buf, "-", len); 826 return (ENOTSUP); 827 } 828 feature = spa_feature_table[fid].fi_guid; 829 } 830 831 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 832 found = B_TRUE; 833 834 if (supported) { 835 if (!found) { 836 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 837 } else { 838 if (refcount == 0) 839 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 840 else 841 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 842 } 843 } else { 844 if (found) { 845 if (refcount == 0) { 846 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 847 } else { 848 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 849 } 850 } else { 851 (void) strlcpy(buf, "-", len); 852 return (ENOTSUP); 853 } 854 } 855 856 return (0); 857 } 858 859 /* 860 * Don't start the slice at the default block of 34; many storage 861 * devices will use a stripe width of 128k, so start there instead. 862 */ 863 #define NEW_START_BLOCK 256 864 865 /* 866 * Validate the given pool name, optionally putting an extended error message in 867 * 'buf'. 868 */ 869 boolean_t 870 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 871 { 872 namecheck_err_t why; 873 char what; 874 int ret; 875 876 ret = pool_namecheck(pool, &why, &what); 877 878 /* 879 * The rules for reserved pool names were extended at a later point. 880 * But we need to support users with existing pools that may now be 881 * invalid. So we only check for this expanded set of names during a 882 * create (or import), and only in userland. 883 */ 884 if (ret == 0 && !isopen && 885 (strncmp(pool, "mirror", 6) == 0 || 886 strncmp(pool, "raidz", 5) == 0 || 887 strncmp(pool, "spare", 5) == 0 || 888 strcmp(pool, "log") == 0)) { 889 if (hdl != NULL) 890 zfs_error_aux(hdl, 891 dgettext(TEXT_DOMAIN, "name is reserved")); 892 return (B_FALSE); 893 } 894 895 896 if (ret != 0) { 897 if (hdl != NULL) { 898 switch (why) { 899 case NAME_ERR_TOOLONG: 900 zfs_error_aux(hdl, 901 dgettext(TEXT_DOMAIN, "name is too long")); 902 break; 903 904 case NAME_ERR_INVALCHAR: 905 zfs_error_aux(hdl, 906 dgettext(TEXT_DOMAIN, "invalid character " 907 "'%c' in pool name"), what); 908 break; 909 910 case NAME_ERR_NOLETTER: 911 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 912 "name must begin with a letter")); 913 break; 914 915 case NAME_ERR_RESERVED: 916 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 917 "name is reserved")); 918 break; 919 920 case NAME_ERR_DISKLIKE: 921 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 922 "pool name is reserved")); 923 break; 924 925 case NAME_ERR_LEADING_SLASH: 926 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 927 "leading slash in name")); 928 break; 929 930 case NAME_ERR_EMPTY_COMPONENT: 931 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 932 "empty component in name")); 933 break; 934 935 case NAME_ERR_TRAILING_SLASH: 936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 937 "trailing slash in name")); 938 break; 939 940 case NAME_ERR_MULTIPLE_AT: 941 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 942 "multiple '@' delimiters in name")); 943 break; 944 945 } 946 } 947 return (B_FALSE); 948 } 949 950 return (B_TRUE); 951 } 952 953 /* 954 * Open a handle to the given pool, even if the pool is currently in the FAULTED 955 * state. 956 */ 957 zpool_handle_t * 958 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 959 { 960 zpool_handle_t *zhp; 961 boolean_t missing; 962 963 /* 964 * Make sure the pool name is valid. 965 */ 966 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 967 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 968 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 969 pool); 970 return (NULL); 971 } 972 973 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 974 return (NULL); 975 976 zhp->zpool_hdl = hdl; 977 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 978 979 if (zpool_refresh_stats(zhp, &missing) != 0) { 980 zpool_close(zhp); 981 return (NULL); 982 } 983 984 if (missing) { 985 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 986 (void) zfs_error_fmt(hdl, EZFS_NOENT, 987 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 988 zpool_close(zhp); 989 return (NULL); 990 } 991 992 return (zhp); 993 } 994 995 /* 996 * Like the above, but silent on error. Used when iterating over pools (because 997 * the configuration cache may be out of date). 998 */ 999 int 1000 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1001 { 1002 zpool_handle_t *zhp; 1003 boolean_t missing; 1004 1005 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1006 return (-1); 1007 1008 zhp->zpool_hdl = hdl; 1009 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1010 1011 if (zpool_refresh_stats(zhp, &missing) != 0) { 1012 zpool_close(zhp); 1013 return (-1); 1014 } 1015 1016 if (missing) { 1017 zpool_close(zhp); 1018 *ret = NULL; 1019 return (0); 1020 } 1021 1022 *ret = zhp; 1023 return (0); 1024 } 1025 1026 /* 1027 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1028 * state. 1029 */ 1030 zpool_handle_t * 1031 zpool_open(libzfs_handle_t *hdl, const char *pool) 1032 { 1033 zpool_handle_t *zhp; 1034 1035 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1036 return (NULL); 1037 1038 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1039 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1040 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1041 zpool_close(zhp); 1042 return (NULL); 1043 } 1044 1045 return (zhp); 1046 } 1047 1048 /* 1049 * Close the handle. Simply frees the memory associated with the handle. 1050 */ 1051 void 1052 zpool_close(zpool_handle_t *zhp) 1053 { 1054 nvlist_free(zhp->zpool_config); 1055 nvlist_free(zhp->zpool_old_config); 1056 nvlist_free(zhp->zpool_props); 1057 free(zhp); 1058 } 1059 1060 /* 1061 * Return the name of the pool. 1062 */ 1063 const char * 1064 zpool_get_name(zpool_handle_t *zhp) 1065 { 1066 return (zhp->zpool_name); 1067 } 1068 1069 1070 /* 1071 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1072 */ 1073 int 1074 zpool_get_state(zpool_handle_t *zhp) 1075 { 1076 return (zhp->zpool_state); 1077 } 1078 1079 /* 1080 * Create the named pool, using the provided vdev list. It is assumed 1081 * that the consumer has already validated the contents of the nvlist, so we 1082 * don't have to worry about error semantics. 1083 */ 1084 int 1085 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1086 nvlist_t *props, nvlist_t *fsprops) 1087 { 1088 zfs_cmd_t zc = { 0 }; 1089 nvlist_t *zc_fsprops = NULL; 1090 nvlist_t *zc_props = NULL; 1091 char msg[1024]; 1092 int ret = -1; 1093 1094 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1095 "cannot create '%s'"), pool); 1096 1097 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1098 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1099 1100 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1101 return (-1); 1102 1103 if (props) { 1104 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1105 1106 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1107 SPA_VERSION_1, flags, msg)) == NULL) { 1108 goto create_failed; 1109 } 1110 } 1111 1112 if (fsprops) { 1113 uint64_t zoned; 1114 char *zonestr; 1115 1116 zoned = ((nvlist_lookup_string(fsprops, 1117 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1118 strcmp(zonestr, "on") == 0); 1119 1120 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1121 fsprops, zoned, NULL, NULL, msg)) == NULL) { 1122 goto create_failed; 1123 } 1124 if (!zc_props && 1125 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1126 goto create_failed; 1127 } 1128 if (nvlist_add_nvlist(zc_props, 1129 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1130 goto create_failed; 1131 } 1132 } 1133 1134 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1135 goto create_failed; 1136 1137 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1138 1139 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1140 1141 zcmd_free_nvlists(&zc); 1142 nvlist_free(zc_props); 1143 nvlist_free(zc_fsprops); 1144 1145 switch (errno) { 1146 case EBUSY: 1147 /* 1148 * This can happen if the user has specified the same 1149 * device multiple times. We can't reliably detect this 1150 * until we try to add it and see we already have a 1151 * label. 1152 */ 1153 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1154 "one or more vdevs refer to the same device")); 1155 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1156 1157 case ERANGE: 1158 /* 1159 * This happens if the record size is smaller or larger 1160 * than the allowed size range, or not a power of 2. 1161 * 1162 * NOTE: although zfs_valid_proplist is called earlier, 1163 * this case may have slipped through since the 1164 * pool does not exist yet and it is therefore 1165 * impossible to read properties e.g. max blocksize 1166 * from the pool. 1167 */ 1168 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1169 "record size invalid")); 1170 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1171 1172 case EOVERFLOW: 1173 /* 1174 * This occurs when one of the devices is below 1175 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1176 * device was the problem device since there's no 1177 * reliable way to determine device size from userland. 1178 */ 1179 { 1180 char buf[64]; 1181 1182 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1183 1184 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1185 "one or more devices is less than the " 1186 "minimum size (%s)"), buf); 1187 } 1188 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1189 1190 case ENOSPC: 1191 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1192 "one or more devices is out of space")); 1193 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1194 1195 case ENOTBLK: 1196 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1197 "cache device must be a disk or disk slice")); 1198 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1199 1200 default: 1201 return (zpool_standard_error(hdl, errno, msg)); 1202 } 1203 } 1204 1205 create_failed: 1206 zcmd_free_nvlists(&zc); 1207 nvlist_free(zc_props); 1208 nvlist_free(zc_fsprops); 1209 return (ret); 1210 } 1211 1212 /* 1213 * Destroy the given pool. It is up to the caller to ensure that there are no 1214 * datasets left in the pool. 1215 */ 1216 int 1217 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1218 { 1219 zfs_cmd_t zc = { 0 }; 1220 zfs_handle_t *zfp = NULL; 1221 libzfs_handle_t *hdl = zhp->zpool_hdl; 1222 char msg[1024]; 1223 1224 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1225 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1226 return (-1); 1227 1228 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1229 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1230 1231 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1232 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1233 "cannot destroy '%s'"), zhp->zpool_name); 1234 1235 if (errno == EROFS) { 1236 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1237 "one or more devices is read only")); 1238 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1239 } else { 1240 (void) zpool_standard_error(hdl, errno, msg); 1241 } 1242 1243 if (zfp) 1244 zfs_close(zfp); 1245 return (-1); 1246 } 1247 1248 if (zfp) { 1249 remove_mountpoint(zfp); 1250 zfs_close(zfp); 1251 } 1252 1253 return (0); 1254 } 1255 1256 /* 1257 * Add the given vdevs to the pool. The caller must have already performed the 1258 * necessary verification to ensure that the vdev specification is well-formed. 1259 */ 1260 int 1261 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1262 { 1263 zfs_cmd_t zc = { 0 }; 1264 int ret; 1265 libzfs_handle_t *hdl = zhp->zpool_hdl; 1266 char msg[1024]; 1267 nvlist_t **spares, **l2cache; 1268 uint_t nspares, nl2cache; 1269 1270 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1271 "cannot add to '%s'"), zhp->zpool_name); 1272 1273 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1274 SPA_VERSION_SPARES && 1275 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1276 &spares, &nspares) == 0) { 1277 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1278 "upgraded to add hot spares")); 1279 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1280 } 1281 1282 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1283 SPA_VERSION_L2CACHE && 1284 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1285 &l2cache, &nl2cache) == 0) { 1286 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1287 "upgraded to add cache devices")); 1288 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1289 } 1290 1291 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1292 return (-1); 1293 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1294 1295 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1296 switch (errno) { 1297 case EBUSY: 1298 /* 1299 * This can happen if the user has specified the same 1300 * device multiple times. We can't reliably detect this 1301 * until we try to add it and see we already have a 1302 * label. 1303 */ 1304 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1305 "one or more vdevs refer to the same device")); 1306 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1307 break; 1308 1309 case EOVERFLOW: 1310 /* 1311 * This occurrs when one of the devices is below 1312 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1313 * device was the problem device since there's no 1314 * reliable way to determine device size from userland. 1315 */ 1316 { 1317 char buf[64]; 1318 1319 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1320 1321 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1322 "device is less than the minimum " 1323 "size (%s)"), buf); 1324 } 1325 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1326 break; 1327 1328 case ENOTSUP: 1329 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1330 "pool must be upgraded to add these vdevs")); 1331 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1332 break; 1333 1334 case EDOM: 1335 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1336 "root pool can not have multiple vdevs" 1337 " or separate logs")); 1338 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1339 break; 1340 1341 case ENOTBLK: 1342 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1343 "cache device must be a disk or disk slice")); 1344 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1345 break; 1346 1347 default: 1348 (void) zpool_standard_error(hdl, errno, msg); 1349 } 1350 1351 ret = -1; 1352 } else { 1353 ret = 0; 1354 } 1355 1356 zcmd_free_nvlists(&zc); 1357 1358 return (ret); 1359 } 1360 1361 /* 1362 * Exports the pool from the system. The caller must ensure that there are no 1363 * mounted datasets in the pool. 1364 */ 1365 static int 1366 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1367 const char *log_str) 1368 { 1369 zfs_cmd_t zc = { 0 }; 1370 char msg[1024]; 1371 1372 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1373 "cannot export '%s'"), zhp->zpool_name); 1374 1375 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1376 zc.zc_cookie = force; 1377 zc.zc_guid = hardforce; 1378 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1379 1380 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1381 switch (errno) { 1382 case EXDEV: 1383 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1384 "use '-f' to override the following errors:\n" 1385 "'%s' has an active shared spare which could be" 1386 " used by other pools once '%s' is exported."), 1387 zhp->zpool_name, zhp->zpool_name); 1388 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1389 msg)); 1390 default: 1391 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1392 msg)); 1393 } 1394 } 1395 1396 return (0); 1397 } 1398 1399 int 1400 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1401 { 1402 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1403 } 1404 1405 int 1406 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1407 { 1408 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1409 } 1410 1411 static void 1412 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1413 nvlist_t *config) 1414 { 1415 nvlist_t *nv = NULL; 1416 uint64_t rewindto; 1417 int64_t loss = -1; 1418 struct tm t; 1419 char timestr[128]; 1420 1421 if (!hdl->libzfs_printerr || config == NULL) 1422 return; 1423 1424 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1425 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1426 return; 1427 } 1428 1429 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1430 return; 1431 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1432 1433 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1434 strftime(timestr, 128, 0, &t) != 0) { 1435 if (dryrun) { 1436 (void) printf(dgettext(TEXT_DOMAIN, 1437 "Would be able to return %s " 1438 "to its state as of %s.\n"), 1439 name, timestr); 1440 } else { 1441 (void) printf(dgettext(TEXT_DOMAIN, 1442 "Pool %s returned to its state as of %s.\n"), 1443 name, timestr); 1444 } 1445 if (loss > 120) { 1446 (void) printf(dgettext(TEXT_DOMAIN, 1447 "%s approximately %lld "), 1448 dryrun ? "Would discard" : "Discarded", 1449 (loss + 30) / 60); 1450 (void) printf(dgettext(TEXT_DOMAIN, 1451 "minutes of transactions.\n")); 1452 } else if (loss > 0) { 1453 (void) printf(dgettext(TEXT_DOMAIN, 1454 "%s approximately %lld "), 1455 dryrun ? "Would discard" : "Discarded", loss); 1456 (void) printf(dgettext(TEXT_DOMAIN, 1457 "seconds of transactions.\n")); 1458 } 1459 } 1460 } 1461 1462 void 1463 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1464 nvlist_t *config) 1465 { 1466 nvlist_t *nv = NULL; 1467 int64_t loss = -1; 1468 uint64_t edata = UINT64_MAX; 1469 uint64_t rewindto; 1470 struct tm t; 1471 char timestr[128]; 1472 1473 if (!hdl->libzfs_printerr) 1474 return; 1475 1476 if (reason >= 0) 1477 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1478 else 1479 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1480 1481 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1482 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1483 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1484 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1485 goto no_info; 1486 1487 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1488 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1489 &edata); 1490 1491 (void) printf(dgettext(TEXT_DOMAIN, 1492 "Recovery is possible, but will result in some data loss.\n")); 1493 1494 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1495 strftime(timestr, 128, 0, &t) != 0) { 1496 (void) printf(dgettext(TEXT_DOMAIN, 1497 "\tReturning the pool to its state as of %s\n" 1498 "\tshould correct the problem. "), 1499 timestr); 1500 } else { 1501 (void) printf(dgettext(TEXT_DOMAIN, 1502 "\tReverting the pool to an earlier state " 1503 "should correct the problem.\n\t")); 1504 } 1505 1506 if (loss > 120) { 1507 (void) printf(dgettext(TEXT_DOMAIN, 1508 "Approximately %lld minutes of data\n" 1509 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1510 } else if (loss > 0) { 1511 (void) printf(dgettext(TEXT_DOMAIN, 1512 "Approximately %lld seconds of data\n" 1513 "\tmust be discarded, irreversibly. "), loss); 1514 } 1515 if (edata != 0 && edata != UINT64_MAX) { 1516 if (edata == 1) { 1517 (void) printf(dgettext(TEXT_DOMAIN, 1518 "After rewind, at least\n" 1519 "\tone persistent user-data error will remain. ")); 1520 } else { 1521 (void) printf(dgettext(TEXT_DOMAIN, 1522 "After rewind, several\n" 1523 "\tpersistent user-data errors will remain. ")); 1524 } 1525 } 1526 (void) printf(dgettext(TEXT_DOMAIN, 1527 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1528 reason >= 0 ? "clear" : "import", name); 1529 1530 (void) printf(dgettext(TEXT_DOMAIN, 1531 "A scrub of the pool\n" 1532 "\tis strongly recommended after recovery.\n")); 1533 return; 1534 1535 no_info: 1536 (void) printf(dgettext(TEXT_DOMAIN, 1537 "Destroy and re-create the pool from\n\ta backup source.\n")); 1538 } 1539 1540 /* 1541 * zpool_import() is a contracted interface. Should be kept the same 1542 * if possible. 1543 * 1544 * Applications should use zpool_import_props() to import a pool with 1545 * new properties value to be set. 1546 */ 1547 int 1548 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1549 char *altroot) 1550 { 1551 nvlist_t *props = NULL; 1552 int ret; 1553 1554 if (altroot != NULL) { 1555 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1556 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1557 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1558 newname)); 1559 } 1560 1561 if (nvlist_add_string(props, 1562 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1563 nvlist_add_string(props, 1564 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1565 nvlist_free(props); 1566 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1567 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1568 newname)); 1569 } 1570 } 1571 1572 ret = zpool_import_props(hdl, config, newname, props, 1573 ZFS_IMPORT_NORMAL); 1574 nvlist_free(props); 1575 return (ret); 1576 } 1577 1578 static void 1579 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1580 int indent) 1581 { 1582 nvlist_t **child; 1583 uint_t c, children; 1584 char *vname; 1585 uint64_t is_log = 0; 1586 1587 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1588 &is_log); 1589 1590 if (name != NULL) 1591 (void) printf("\t%*s%s%s\n", indent, "", name, 1592 is_log ? " [log]" : ""); 1593 1594 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1595 &child, &children) != 0) 1596 return; 1597 1598 for (c = 0; c < children; c++) { 1599 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1600 print_vdev_tree(hdl, vname, child[c], indent + 2); 1601 free(vname); 1602 } 1603 } 1604 1605 void 1606 zpool_print_unsup_feat(nvlist_t *config) 1607 { 1608 nvlist_t *nvinfo, *unsup_feat; 1609 1610 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1611 0); 1612 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1613 &unsup_feat) == 0); 1614 1615 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1616 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1617 char *desc; 1618 1619 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1620 verify(nvpair_value_string(nvp, &desc) == 0); 1621 1622 if (strlen(desc) > 0) 1623 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1624 else 1625 (void) printf("\t%s\n", nvpair_name(nvp)); 1626 } 1627 } 1628 1629 /* 1630 * Import the given pool using the known configuration and a list of 1631 * properties to be set. The configuration should have come from 1632 * zpool_find_import(). The 'newname' parameters control whether the pool 1633 * is imported with a different name. 1634 */ 1635 int 1636 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1637 nvlist_t *props, int flags) 1638 { 1639 zfs_cmd_t zc = { 0 }; 1640 zpool_rewind_policy_t policy; 1641 nvlist_t *nv = NULL; 1642 nvlist_t *nvinfo = NULL; 1643 nvlist_t *missing = NULL; 1644 char *thename; 1645 char *origname; 1646 int ret; 1647 int error = 0; 1648 char errbuf[1024]; 1649 1650 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1651 &origname) == 0); 1652 1653 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1654 "cannot import pool '%s'"), origname); 1655 1656 if (newname != NULL) { 1657 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1658 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1659 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1660 newname)); 1661 thename = (char *)newname; 1662 } else { 1663 thename = origname; 1664 } 1665 1666 if (props != NULL) { 1667 uint64_t version; 1668 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1669 1670 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1671 &version) == 0); 1672 1673 if ((props = zpool_valid_proplist(hdl, origname, 1674 props, version, flags, errbuf)) == NULL) 1675 return (-1); 1676 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1677 nvlist_free(props); 1678 return (-1); 1679 } 1680 nvlist_free(props); 1681 } 1682 1683 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1684 1685 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1686 &zc.zc_guid) == 0); 1687 1688 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1689 zcmd_free_nvlists(&zc); 1690 return (-1); 1691 } 1692 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1693 zcmd_free_nvlists(&zc); 1694 return (-1); 1695 } 1696 1697 zc.zc_cookie = flags; 1698 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1699 errno == ENOMEM) { 1700 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1701 zcmd_free_nvlists(&zc); 1702 return (-1); 1703 } 1704 } 1705 if (ret != 0) 1706 error = errno; 1707 1708 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1709 1710 zcmd_free_nvlists(&zc); 1711 1712 zpool_get_rewind_policy(config, &policy); 1713 1714 if (error) { 1715 char desc[1024]; 1716 1717 /* 1718 * Dry-run failed, but we print out what success 1719 * looks like if we found a best txg 1720 */ 1721 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1722 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1723 B_TRUE, nv); 1724 nvlist_free(nv); 1725 return (-1); 1726 } 1727 1728 if (newname == NULL) 1729 (void) snprintf(desc, sizeof (desc), 1730 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1731 thename); 1732 else 1733 (void) snprintf(desc, sizeof (desc), 1734 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1735 origname, thename); 1736 1737 switch (error) { 1738 case ENOTSUP: 1739 if (nv != NULL && nvlist_lookup_nvlist(nv, 1740 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1741 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1742 (void) printf(dgettext(TEXT_DOMAIN, "This " 1743 "pool uses the following feature(s) not " 1744 "supported by this system:\n")); 1745 zpool_print_unsup_feat(nv); 1746 if (nvlist_exists(nvinfo, 1747 ZPOOL_CONFIG_CAN_RDONLY)) { 1748 (void) printf(dgettext(TEXT_DOMAIN, 1749 "All unsupported features are only " 1750 "required for writing to the pool." 1751 "\nThe pool can be imported using " 1752 "'-o readonly=on'.\n")); 1753 } 1754 } 1755 /* 1756 * Unsupported version. 1757 */ 1758 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1759 break; 1760 1761 case EINVAL: 1762 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1763 break; 1764 1765 case EROFS: 1766 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1767 "one or more devices is read only")); 1768 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1769 break; 1770 1771 case ENXIO: 1772 if (nv && nvlist_lookup_nvlist(nv, 1773 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1774 nvlist_lookup_nvlist(nvinfo, 1775 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1776 (void) printf(dgettext(TEXT_DOMAIN, 1777 "The devices below are missing, use " 1778 "'-m' to import the pool anyway:\n")); 1779 print_vdev_tree(hdl, NULL, missing, 2); 1780 (void) printf("\n"); 1781 } 1782 (void) zpool_standard_error(hdl, error, desc); 1783 break; 1784 1785 case EEXIST: 1786 (void) zpool_standard_error(hdl, error, desc); 1787 break; 1788 1789 default: 1790 (void) zpool_standard_error(hdl, error, desc); 1791 zpool_explain_recover(hdl, 1792 newname ? origname : thename, -error, nv); 1793 break; 1794 } 1795 1796 nvlist_free(nv); 1797 ret = -1; 1798 } else { 1799 zpool_handle_t *zhp; 1800 1801 /* 1802 * This should never fail, but play it safe anyway. 1803 */ 1804 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1805 ret = -1; 1806 else if (zhp != NULL) 1807 zpool_close(zhp); 1808 if (policy.zrp_request & 1809 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1810 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1811 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1812 } 1813 nvlist_free(nv); 1814 return (0); 1815 } 1816 1817 return (ret); 1818 } 1819 1820 /* 1821 * Scan the pool. 1822 */ 1823 int 1824 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1825 { 1826 zfs_cmd_t zc = { 0 }; 1827 char msg[1024]; 1828 libzfs_handle_t *hdl = zhp->zpool_hdl; 1829 1830 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1831 zc.zc_cookie = func; 1832 1833 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1834 (errno == ENOENT && func != POOL_SCAN_NONE)) 1835 return (0); 1836 1837 if (func == POOL_SCAN_SCRUB) { 1838 (void) snprintf(msg, sizeof (msg), 1839 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1840 } else if (func == POOL_SCAN_NONE) { 1841 (void) snprintf(msg, sizeof (msg), 1842 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1843 zc.zc_name); 1844 } else { 1845 assert(!"unexpected result"); 1846 } 1847 1848 if (errno == EBUSY) { 1849 nvlist_t *nvroot; 1850 pool_scan_stat_t *ps = NULL; 1851 uint_t psc; 1852 1853 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1854 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1855 (void) nvlist_lookup_uint64_array(nvroot, 1856 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1857 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1858 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1859 else 1860 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1861 } else if (errno == ENOENT) { 1862 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1863 } else { 1864 return (zpool_standard_error(hdl, errno, msg)); 1865 } 1866 } 1867 1868 /* 1869 * This provides a very minimal check whether a given string is likely a 1870 * c#t#d# style string. Users of this are expected to do their own 1871 * verification of the s# part. 1872 */ 1873 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1874 1875 /* 1876 * More elaborate version for ones which may start with "/dev/dsk/" 1877 * and the like. 1878 */ 1879 static int 1880 ctd_check_path(char *str) 1881 { 1882 /* 1883 * If it starts with a slash, check the last component. 1884 */ 1885 if (str && str[0] == '/') { 1886 char *tmp = strrchr(str, '/'); 1887 1888 /* 1889 * If it ends in "/old", check the second-to-last 1890 * component of the string instead. 1891 */ 1892 if (tmp != str && strcmp(tmp, "/old") == 0) { 1893 for (tmp--; *tmp != '/'; tmp--) 1894 ; 1895 } 1896 str = tmp + 1; 1897 } 1898 return (CTD_CHECK(str)); 1899 } 1900 1901 /* 1902 * Find a vdev that matches the search criteria specified. We use the 1903 * the nvpair name to determine how we should look for the device. 1904 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1905 * spare; but FALSE if its an INUSE spare. 1906 */ 1907 static nvlist_t * 1908 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1909 boolean_t *l2cache, boolean_t *log) 1910 { 1911 uint_t c, children; 1912 nvlist_t **child; 1913 nvlist_t *ret; 1914 uint64_t is_log; 1915 char *srchkey; 1916 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1917 1918 /* Nothing to look for */ 1919 if (search == NULL || pair == NULL) 1920 return (NULL); 1921 1922 /* Obtain the key we will use to search */ 1923 srchkey = nvpair_name(pair); 1924 1925 switch (nvpair_type(pair)) { 1926 case DATA_TYPE_UINT64: 1927 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1928 uint64_t srchval, theguid; 1929 1930 verify(nvpair_value_uint64(pair, &srchval) == 0); 1931 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1932 &theguid) == 0); 1933 if (theguid == srchval) 1934 return (nv); 1935 } 1936 break; 1937 1938 case DATA_TYPE_STRING: { 1939 char *srchval, *val; 1940 1941 verify(nvpair_value_string(pair, &srchval) == 0); 1942 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1943 break; 1944 1945 /* 1946 * Search for the requested value. Special cases: 1947 * 1948 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1949 * "s0" or "s0/old". The "s0" part is hidden from the user, 1950 * but included in the string, so this matches around it. 1951 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1952 * 1953 * Otherwise, all other searches are simple string compares. 1954 */ 1955 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1956 ctd_check_path(val)) { 1957 uint64_t wholedisk = 0; 1958 1959 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1960 &wholedisk); 1961 if (wholedisk) { 1962 int slen = strlen(srchval); 1963 int vlen = strlen(val); 1964 1965 if (slen != vlen - 2) 1966 break; 1967 1968 /* 1969 * make_leaf_vdev() should only set 1970 * wholedisk for ZPOOL_CONFIG_PATHs which 1971 * will include "/dev/dsk/", giving plenty of 1972 * room for the indices used next. 1973 */ 1974 ASSERT(vlen >= 6); 1975 1976 /* 1977 * strings identical except trailing "s0" 1978 */ 1979 if (strcmp(&val[vlen - 2], "s0") == 0 && 1980 strncmp(srchval, val, slen) == 0) 1981 return (nv); 1982 1983 /* 1984 * strings identical except trailing "s0/old" 1985 */ 1986 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 1987 strcmp(&srchval[slen - 4], "/old") == 0 && 1988 strncmp(srchval, val, slen - 4) == 0) 1989 return (nv); 1990 1991 break; 1992 } 1993 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 1994 char *type, *idx, *end, *p; 1995 uint64_t id, vdev_id; 1996 1997 /* 1998 * Determine our vdev type, keeping in mind 1999 * that the srchval is composed of a type and 2000 * vdev id pair (i.e. mirror-4). 2001 */ 2002 if ((type = strdup(srchval)) == NULL) 2003 return (NULL); 2004 2005 if ((p = strrchr(type, '-')) == NULL) { 2006 free(type); 2007 break; 2008 } 2009 idx = p + 1; 2010 *p = '\0'; 2011 2012 /* 2013 * If the types don't match then keep looking. 2014 */ 2015 if (strncmp(val, type, strlen(val)) != 0) { 2016 free(type); 2017 break; 2018 } 2019 2020 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2021 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2022 strncmp(type, VDEV_TYPE_MIRROR, 2023 strlen(VDEV_TYPE_MIRROR)) == 0); 2024 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2025 &id) == 0); 2026 2027 errno = 0; 2028 vdev_id = strtoull(idx, &end, 10); 2029 2030 free(type); 2031 if (errno != 0) 2032 return (NULL); 2033 2034 /* 2035 * Now verify that we have the correct vdev id. 2036 */ 2037 if (vdev_id == id) 2038 return (nv); 2039 } 2040 2041 /* 2042 * Common case 2043 */ 2044 if (strcmp(srchval, val) == 0) 2045 return (nv); 2046 break; 2047 } 2048 2049 default: 2050 break; 2051 } 2052 2053 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2054 &child, &children) != 0) 2055 return (NULL); 2056 2057 for (c = 0; c < children; c++) { 2058 if ((ret = vdev_to_nvlist_iter(child[c], search, 2059 avail_spare, l2cache, NULL)) != NULL) { 2060 /* 2061 * The 'is_log' value is only set for the toplevel 2062 * vdev, not the leaf vdevs. So we always lookup the 2063 * log device from the root of the vdev tree (where 2064 * 'log' is non-NULL). 2065 */ 2066 if (log != NULL && 2067 nvlist_lookup_uint64(child[c], 2068 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2069 is_log) { 2070 *log = B_TRUE; 2071 } 2072 return (ret); 2073 } 2074 } 2075 2076 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2077 &child, &children) == 0) { 2078 for (c = 0; c < children; c++) { 2079 if ((ret = vdev_to_nvlist_iter(child[c], search, 2080 avail_spare, l2cache, NULL)) != NULL) { 2081 *avail_spare = B_TRUE; 2082 return (ret); 2083 } 2084 } 2085 } 2086 2087 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2088 &child, &children) == 0) { 2089 for (c = 0; c < children; c++) { 2090 if ((ret = vdev_to_nvlist_iter(child[c], search, 2091 avail_spare, l2cache, NULL)) != NULL) { 2092 *l2cache = B_TRUE; 2093 return (ret); 2094 } 2095 } 2096 } 2097 2098 return (NULL); 2099 } 2100 2101 /* 2102 * Given a physical path (minus the "/devices" prefix), find the 2103 * associated vdev. 2104 */ 2105 nvlist_t * 2106 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2107 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2108 { 2109 nvlist_t *search, *nvroot, *ret; 2110 2111 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2112 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2113 2114 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2115 &nvroot) == 0); 2116 2117 *avail_spare = B_FALSE; 2118 *l2cache = B_FALSE; 2119 if (log != NULL) 2120 *log = B_FALSE; 2121 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2122 nvlist_free(search); 2123 2124 return (ret); 2125 } 2126 2127 /* 2128 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2129 */ 2130 boolean_t 2131 zpool_vdev_is_interior(const char *name) 2132 { 2133 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2134 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2135 return (B_TRUE); 2136 return (B_FALSE); 2137 } 2138 2139 nvlist_t * 2140 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2141 boolean_t *l2cache, boolean_t *log) 2142 { 2143 char buf[MAXPATHLEN]; 2144 char *end; 2145 nvlist_t *nvroot, *search, *ret; 2146 uint64_t guid; 2147 2148 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2149 2150 guid = strtoull(path, &end, 10); 2151 if (guid != 0 && *end == '\0') { 2152 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2153 } else if (zpool_vdev_is_interior(path)) { 2154 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2155 } else if (path[0] != '/') { 2156 (void) snprintf(buf, sizeof (buf), "%s/%s", ZFS_DISK_ROOT, 2157 path); 2158 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2159 } else { 2160 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2161 } 2162 2163 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2164 &nvroot) == 0); 2165 2166 *avail_spare = B_FALSE; 2167 *l2cache = B_FALSE; 2168 if (log != NULL) 2169 *log = B_FALSE; 2170 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2171 nvlist_free(search); 2172 2173 return (ret); 2174 } 2175 2176 static int 2177 vdev_online(nvlist_t *nv) 2178 { 2179 uint64_t ival; 2180 2181 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2182 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2183 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2184 return (0); 2185 2186 return (1); 2187 } 2188 2189 /* 2190 * Helper function for zpool_get_physpaths(). 2191 */ 2192 static int 2193 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2194 size_t *bytes_written) 2195 { 2196 size_t bytes_left, pos, rsz; 2197 char *tmppath; 2198 const char *format; 2199 2200 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2201 &tmppath) != 0) 2202 return (EZFS_NODEVICE); 2203 2204 pos = *bytes_written; 2205 bytes_left = physpath_size - pos; 2206 format = (pos == 0) ? "%s" : " %s"; 2207 2208 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2209 *bytes_written += rsz; 2210 2211 if (rsz >= bytes_left) { 2212 /* if physpath was not copied properly, clear it */ 2213 if (bytes_left != 0) { 2214 physpath[pos] = 0; 2215 } 2216 return (EZFS_NOSPC); 2217 } 2218 return (0); 2219 } 2220 2221 static int 2222 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2223 size_t *rsz, boolean_t is_spare) 2224 { 2225 char *type; 2226 int ret; 2227 2228 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2229 return (EZFS_INVALCONFIG); 2230 2231 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2232 /* 2233 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2234 * For a spare vdev, we only want to boot from the active 2235 * spare device. 2236 */ 2237 if (is_spare) { 2238 uint64_t spare = 0; 2239 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2240 &spare); 2241 if (!spare) 2242 return (EZFS_INVALCONFIG); 2243 } 2244 2245 if (vdev_online(nv)) { 2246 if ((ret = vdev_get_one_physpath(nv, physpath, 2247 phypath_size, rsz)) != 0) 2248 return (ret); 2249 } 2250 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2251 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2252 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2253 nvlist_t **child; 2254 uint_t count; 2255 int i, ret; 2256 2257 if (nvlist_lookup_nvlist_array(nv, 2258 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2259 return (EZFS_INVALCONFIG); 2260 2261 for (i = 0; i < count; i++) { 2262 ret = vdev_get_physpaths(child[i], physpath, 2263 phypath_size, rsz, is_spare); 2264 if (ret == EZFS_NOSPC) 2265 return (ret); 2266 } 2267 } 2268 2269 return (EZFS_POOL_INVALARG); 2270 } 2271 2272 /* 2273 * Get phys_path for a root pool config. 2274 * Return 0 on success; non-zero on failure. 2275 */ 2276 static int 2277 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2278 { 2279 size_t rsz; 2280 nvlist_t *vdev_root; 2281 nvlist_t **child; 2282 uint_t count; 2283 char *type; 2284 2285 rsz = 0; 2286 2287 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2288 &vdev_root) != 0) 2289 return (EZFS_INVALCONFIG); 2290 2291 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2292 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2293 &child, &count) != 0) 2294 return (EZFS_INVALCONFIG); 2295 2296 /* 2297 * root pool can only have a single top-level vdev. 2298 */ 2299 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2300 return (EZFS_POOL_INVALARG); 2301 2302 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2303 B_FALSE); 2304 2305 /* No online devices */ 2306 if (rsz == 0) 2307 return (EZFS_NODEVICE); 2308 2309 return (0); 2310 } 2311 2312 /* 2313 * Get phys_path for a root pool 2314 * Return 0 on success; non-zero on failure. 2315 */ 2316 int 2317 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2318 { 2319 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2320 phypath_size)); 2321 } 2322 2323 /* 2324 * If the device has being dynamically expanded then we need to relabel 2325 * the disk to use the new unallocated space. 2326 */ 2327 static int 2328 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2329 { 2330 char path[MAXPATHLEN]; 2331 char errbuf[1024]; 2332 int fd, error; 2333 int (*_efi_use_whole_disk)(int); 2334 2335 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2336 "efi_use_whole_disk")) == NULL) 2337 return (-1); 2338 2339 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name); 2340 2341 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2342 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2343 "relabel '%s': unable to open device"), name); 2344 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2345 } 2346 2347 /* 2348 * It's possible that we might encounter an error if the device 2349 * does not have any unallocated space left. If so, we simply 2350 * ignore that error and continue on. 2351 */ 2352 error = _efi_use_whole_disk(fd); 2353 (void) close(fd); 2354 if (error && error != VT_ENOSPC) { 2355 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2356 "relabel '%s': unable to read disk capacity"), name); 2357 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2358 } 2359 return (0); 2360 } 2361 2362 /* 2363 * Bring the specified vdev online. The 'flags' parameter is a set of the 2364 * ZFS_ONLINE_* flags. 2365 */ 2366 int 2367 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2368 vdev_state_t *newstate) 2369 { 2370 zfs_cmd_t zc = { 0 }; 2371 char msg[1024]; 2372 nvlist_t *tgt; 2373 boolean_t avail_spare, l2cache, islog; 2374 libzfs_handle_t *hdl = zhp->zpool_hdl; 2375 2376 if (flags & ZFS_ONLINE_EXPAND) { 2377 (void) snprintf(msg, sizeof (msg), 2378 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2379 } else { 2380 (void) snprintf(msg, sizeof (msg), 2381 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2382 } 2383 2384 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2385 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2386 &islog)) == NULL) 2387 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2388 2389 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2390 2391 if (avail_spare) 2392 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2393 2394 if (flags & ZFS_ONLINE_EXPAND || 2395 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2396 char *pathname = NULL; 2397 uint64_t wholedisk = 0; 2398 2399 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2400 &wholedisk); 2401 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2402 &pathname) == 0); 2403 2404 /* 2405 * XXX - L2ARC 1.0 devices can't support expansion. 2406 */ 2407 if (l2cache) { 2408 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2409 "cannot expand cache devices")); 2410 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2411 } 2412 2413 if (wholedisk) { 2414 pathname += strlen(ZFS_DISK_ROOT) + 1; 2415 (void) zpool_relabel_disk(hdl, pathname); 2416 } 2417 } 2418 2419 zc.zc_cookie = VDEV_STATE_ONLINE; 2420 zc.zc_obj = flags; 2421 2422 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2423 if (errno == EINVAL) { 2424 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2425 "from this pool into a new one. Use '%s' " 2426 "instead"), "zpool detach"); 2427 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2428 } 2429 return (zpool_standard_error(hdl, errno, msg)); 2430 } 2431 2432 *newstate = zc.zc_cookie; 2433 return (0); 2434 } 2435 2436 /* 2437 * Take the specified vdev offline 2438 */ 2439 int 2440 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2441 { 2442 zfs_cmd_t zc = { 0 }; 2443 char msg[1024]; 2444 nvlist_t *tgt; 2445 boolean_t avail_spare, l2cache; 2446 libzfs_handle_t *hdl = zhp->zpool_hdl; 2447 2448 (void) snprintf(msg, sizeof (msg), 2449 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2450 2451 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2452 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2453 NULL)) == NULL) 2454 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2455 2456 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2457 2458 if (avail_spare) 2459 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2460 2461 zc.zc_cookie = VDEV_STATE_OFFLINE; 2462 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2463 2464 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2465 return (0); 2466 2467 switch (errno) { 2468 case EBUSY: 2469 2470 /* 2471 * There are no other replicas of this device. 2472 */ 2473 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2474 2475 case EEXIST: 2476 /* 2477 * The log device has unplayed logs 2478 */ 2479 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2480 2481 default: 2482 return (zpool_standard_error(hdl, errno, msg)); 2483 } 2484 } 2485 2486 /* 2487 * Mark the given vdev faulted. 2488 */ 2489 int 2490 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2491 { 2492 zfs_cmd_t zc = { 0 }; 2493 char msg[1024]; 2494 libzfs_handle_t *hdl = zhp->zpool_hdl; 2495 2496 (void) snprintf(msg, sizeof (msg), 2497 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2498 2499 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2500 zc.zc_guid = guid; 2501 zc.zc_cookie = VDEV_STATE_FAULTED; 2502 zc.zc_obj = aux; 2503 2504 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2505 return (0); 2506 2507 switch (errno) { 2508 case EBUSY: 2509 2510 /* 2511 * There are no other replicas of this device. 2512 */ 2513 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2514 2515 default: 2516 return (zpool_standard_error(hdl, errno, msg)); 2517 } 2518 2519 } 2520 2521 /* 2522 * Mark the given vdev degraded. 2523 */ 2524 int 2525 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2526 { 2527 zfs_cmd_t zc = { 0 }; 2528 char msg[1024]; 2529 libzfs_handle_t *hdl = zhp->zpool_hdl; 2530 2531 (void) snprintf(msg, sizeof (msg), 2532 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2533 2534 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2535 zc.zc_guid = guid; 2536 zc.zc_cookie = VDEV_STATE_DEGRADED; 2537 zc.zc_obj = aux; 2538 2539 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2540 return (0); 2541 2542 return (zpool_standard_error(hdl, errno, msg)); 2543 } 2544 2545 /* 2546 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2547 * a hot spare. 2548 */ 2549 static boolean_t 2550 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2551 { 2552 nvlist_t **child; 2553 uint_t c, children; 2554 char *type; 2555 2556 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2557 &children) == 0) { 2558 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2559 &type) == 0); 2560 2561 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2562 children == 2 && child[which] == tgt) 2563 return (B_TRUE); 2564 2565 for (c = 0; c < children; c++) 2566 if (is_replacing_spare(child[c], tgt, which)) 2567 return (B_TRUE); 2568 } 2569 2570 return (B_FALSE); 2571 } 2572 2573 /* 2574 * Attach new_disk (fully described by nvroot) to old_disk. 2575 * If 'replacing' is specified, the new disk will replace the old one. 2576 */ 2577 int 2578 zpool_vdev_attach(zpool_handle_t *zhp, 2579 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2580 { 2581 zfs_cmd_t zc = { 0 }; 2582 char msg[1024]; 2583 int ret; 2584 nvlist_t *tgt; 2585 boolean_t avail_spare, l2cache, islog; 2586 uint64_t val; 2587 char *newname; 2588 nvlist_t **child; 2589 uint_t children; 2590 nvlist_t *config_root; 2591 libzfs_handle_t *hdl = zhp->zpool_hdl; 2592 boolean_t rootpool = zpool_is_bootable(zhp); 2593 2594 if (replacing) 2595 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2596 "cannot replace %s with %s"), old_disk, new_disk); 2597 else 2598 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2599 "cannot attach %s to %s"), new_disk, old_disk); 2600 2601 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2602 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2603 &islog)) == 0) 2604 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2605 2606 if (avail_spare) 2607 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2608 2609 if (l2cache) 2610 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2611 2612 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2613 zc.zc_cookie = replacing; 2614 2615 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2616 &child, &children) != 0 || children != 1) { 2617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2618 "new device must be a single disk")); 2619 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2620 } 2621 2622 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2623 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2624 2625 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2626 return (-1); 2627 2628 /* 2629 * If the target is a hot spare that has been swapped in, we can only 2630 * replace it with another hot spare. 2631 */ 2632 if (replacing && 2633 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2634 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2635 NULL) == NULL || !avail_spare) && 2636 is_replacing_spare(config_root, tgt, 1)) { 2637 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2638 "can only be replaced by another hot spare")); 2639 free(newname); 2640 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2641 } 2642 2643 free(newname); 2644 2645 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2646 return (-1); 2647 2648 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2649 2650 zcmd_free_nvlists(&zc); 2651 2652 if (ret == 0) { 2653 if (rootpool) { 2654 /* 2655 * XXX need a better way to prevent user from 2656 * booting up a half-baked vdev. 2657 */ 2658 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2659 "sure to wait until resilver is done " 2660 "before rebooting.\n")); 2661 } 2662 return (0); 2663 } 2664 2665 switch (errno) { 2666 case ENOTSUP: 2667 /* 2668 * Can't attach to or replace this type of vdev. 2669 */ 2670 if (replacing) { 2671 uint64_t version = zpool_get_prop_int(zhp, 2672 ZPOOL_PROP_VERSION, NULL); 2673 2674 if (islog) 2675 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2676 "cannot replace a log with a spare")); 2677 else if (version >= SPA_VERSION_MULTI_REPLACE) 2678 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2679 "already in replacing/spare config; wait " 2680 "for completion or use 'zpool detach'")); 2681 else 2682 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2683 "cannot replace a replacing device")); 2684 } else { 2685 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2686 "can only attach to mirrors and top-level " 2687 "disks")); 2688 } 2689 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2690 break; 2691 2692 case EINVAL: 2693 /* 2694 * The new device must be a single disk. 2695 */ 2696 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2697 "new device must be a single disk")); 2698 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2699 break; 2700 2701 case EBUSY: 2702 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2703 new_disk); 2704 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2705 break; 2706 2707 case EOVERFLOW: 2708 /* 2709 * The new device is too small. 2710 */ 2711 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2712 "device is too small")); 2713 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2714 break; 2715 2716 case EDOM: 2717 /* 2718 * The new device has a different alignment requirement. 2719 */ 2720 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2721 "devices have different sector alignment")); 2722 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2723 break; 2724 2725 case ENAMETOOLONG: 2726 /* 2727 * The resulting top-level vdev spec won't fit in the label. 2728 */ 2729 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2730 break; 2731 2732 default: 2733 (void) zpool_standard_error(hdl, errno, msg); 2734 } 2735 2736 return (-1); 2737 } 2738 2739 /* 2740 * Detach the specified device. 2741 */ 2742 int 2743 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2744 { 2745 zfs_cmd_t zc = { 0 }; 2746 char msg[1024]; 2747 nvlist_t *tgt; 2748 boolean_t avail_spare, l2cache; 2749 libzfs_handle_t *hdl = zhp->zpool_hdl; 2750 2751 (void) snprintf(msg, sizeof (msg), 2752 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2753 2754 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2755 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2756 NULL)) == 0) 2757 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2758 2759 if (avail_spare) 2760 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2761 2762 if (l2cache) 2763 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2764 2765 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2766 2767 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2768 return (0); 2769 2770 switch (errno) { 2771 2772 case ENOTSUP: 2773 /* 2774 * Can't detach from this type of vdev. 2775 */ 2776 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2777 "applicable to mirror and replacing vdevs")); 2778 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2779 break; 2780 2781 case EBUSY: 2782 /* 2783 * There are no other replicas of this device. 2784 */ 2785 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2786 break; 2787 2788 default: 2789 (void) zpool_standard_error(hdl, errno, msg); 2790 } 2791 2792 return (-1); 2793 } 2794 2795 /* 2796 * Find a mirror vdev in the source nvlist. 2797 * 2798 * The mchild array contains a list of disks in one of the top-level mirrors 2799 * of the source pool. The schild array contains a list of disks that the 2800 * user specified on the command line. We loop over the mchild array to 2801 * see if any entry in the schild array matches. 2802 * 2803 * If a disk in the mchild array is found in the schild array, we return 2804 * the index of that entry. Otherwise we return -1. 2805 */ 2806 static int 2807 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2808 nvlist_t **schild, uint_t schildren) 2809 { 2810 uint_t mc; 2811 2812 for (mc = 0; mc < mchildren; mc++) { 2813 uint_t sc; 2814 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2815 mchild[mc], B_FALSE); 2816 2817 for (sc = 0; sc < schildren; sc++) { 2818 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2819 schild[sc], B_FALSE); 2820 boolean_t result = (strcmp(mpath, spath) == 0); 2821 2822 free(spath); 2823 if (result) { 2824 free(mpath); 2825 return (mc); 2826 } 2827 } 2828 2829 free(mpath); 2830 } 2831 2832 return (-1); 2833 } 2834 2835 /* 2836 * Split a mirror pool. If newroot points to null, then a new nvlist 2837 * is generated and it is the responsibility of the caller to free it. 2838 */ 2839 int 2840 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2841 nvlist_t *props, splitflags_t flags) 2842 { 2843 zfs_cmd_t zc = { 0 }; 2844 char msg[1024]; 2845 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2846 nvlist_t **varray = NULL, *zc_props = NULL; 2847 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2848 libzfs_handle_t *hdl = zhp->zpool_hdl; 2849 uint64_t vers; 2850 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2851 int retval = 0; 2852 2853 (void) snprintf(msg, sizeof (msg), 2854 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2855 2856 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2857 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2858 2859 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2860 (void) fprintf(stderr, gettext("Internal error: unable to " 2861 "retrieve pool configuration\n")); 2862 return (-1); 2863 } 2864 2865 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2866 == 0); 2867 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2868 2869 if (props) { 2870 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2871 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2872 props, vers, flags, msg)) == NULL) 2873 return (-1); 2874 } 2875 2876 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2877 &children) != 0) { 2878 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2879 "Source pool is missing vdev tree")); 2880 nvlist_free(zc_props); 2881 return (-1); 2882 } 2883 2884 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2885 vcount = 0; 2886 2887 if (*newroot == NULL || 2888 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2889 &newchild, &newchildren) != 0) 2890 newchildren = 0; 2891 2892 for (c = 0; c < children; c++) { 2893 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2894 char *type; 2895 nvlist_t **mchild, *vdev; 2896 uint_t mchildren; 2897 int entry; 2898 2899 /* 2900 * Unlike cache & spares, slogs are stored in the 2901 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2902 */ 2903 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2904 &is_log); 2905 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2906 &is_hole); 2907 if (is_log || is_hole) { 2908 /* 2909 * Create a hole vdev and put it in the config. 2910 */ 2911 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2912 goto out; 2913 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2914 VDEV_TYPE_HOLE) != 0) 2915 goto out; 2916 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2917 1) != 0) 2918 goto out; 2919 if (lastlog == 0) 2920 lastlog = vcount; 2921 varray[vcount++] = vdev; 2922 continue; 2923 } 2924 lastlog = 0; 2925 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2926 == 0); 2927 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2928 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2929 "Source pool must be composed only of mirrors\n")); 2930 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2931 goto out; 2932 } 2933 2934 verify(nvlist_lookup_nvlist_array(child[c], 2935 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2936 2937 /* find or add an entry for this top-level vdev */ 2938 if (newchildren > 0 && 2939 (entry = find_vdev_entry(zhp, mchild, mchildren, 2940 newchild, newchildren)) >= 0) { 2941 /* We found a disk that the user specified. */ 2942 vdev = mchild[entry]; 2943 ++found; 2944 } else { 2945 /* User didn't specify a disk for this vdev. */ 2946 vdev = mchild[mchildren - 1]; 2947 } 2948 2949 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2950 goto out; 2951 } 2952 2953 /* did we find every disk the user specified? */ 2954 if (found != newchildren) { 2955 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2956 "include at most one disk from each mirror")); 2957 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2958 goto out; 2959 } 2960 2961 /* Prepare the nvlist for populating. */ 2962 if (*newroot == NULL) { 2963 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2964 goto out; 2965 freelist = B_TRUE; 2966 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2967 VDEV_TYPE_ROOT) != 0) 2968 goto out; 2969 } else { 2970 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 2971 } 2972 2973 /* Add all the children we found */ 2974 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 2975 lastlog == 0 ? vcount : lastlog) != 0) 2976 goto out; 2977 2978 /* 2979 * If we're just doing a dry run, exit now with success. 2980 */ 2981 if (flags.dryrun) { 2982 memory_err = B_FALSE; 2983 freelist = B_FALSE; 2984 goto out; 2985 } 2986 2987 /* now build up the config list & call the ioctl */ 2988 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 2989 goto out; 2990 2991 if (nvlist_add_nvlist(newconfig, 2992 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 2993 nvlist_add_string(newconfig, 2994 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 2995 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 2996 goto out; 2997 2998 /* 2999 * The new pool is automatically part of the namespace unless we 3000 * explicitly export it. 3001 */ 3002 if (!flags.import) 3003 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3004 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3005 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3006 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3007 goto out; 3008 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3009 goto out; 3010 3011 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3012 retval = zpool_standard_error(hdl, errno, msg); 3013 goto out; 3014 } 3015 3016 freelist = B_FALSE; 3017 memory_err = B_FALSE; 3018 3019 out: 3020 if (varray != NULL) { 3021 int v; 3022 3023 for (v = 0; v < vcount; v++) 3024 nvlist_free(varray[v]); 3025 free(varray); 3026 } 3027 zcmd_free_nvlists(&zc); 3028 nvlist_free(zc_props); 3029 nvlist_free(newconfig); 3030 if (freelist) { 3031 nvlist_free(*newroot); 3032 *newroot = NULL; 3033 } 3034 3035 if (retval != 0) 3036 return (retval); 3037 3038 if (memory_err) 3039 return (no_memory(hdl)); 3040 3041 return (0); 3042 } 3043 3044 /* 3045 * Remove the given device. Currently, this is supported only for hot spares 3046 * and level 2 cache devices. 3047 */ 3048 int 3049 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3050 { 3051 zfs_cmd_t zc = { 0 }; 3052 char msg[1024]; 3053 nvlist_t *tgt; 3054 boolean_t avail_spare, l2cache, islog; 3055 libzfs_handle_t *hdl = zhp->zpool_hdl; 3056 uint64_t version; 3057 3058 (void) snprintf(msg, sizeof (msg), 3059 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3060 3061 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3062 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3063 &islog)) == 0) 3064 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3065 /* 3066 * XXX - this should just go away. 3067 */ 3068 if (!avail_spare && !l2cache && !islog) { 3069 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3070 "only inactive hot spares, cache, top-level, " 3071 "or log devices can be removed")); 3072 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3073 } 3074 3075 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3076 if (islog && version < SPA_VERSION_HOLES) { 3077 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3078 "pool must be upgrade to support log removal")); 3079 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3080 } 3081 3082 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3083 3084 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3085 return (0); 3086 3087 return (zpool_standard_error(hdl, errno, msg)); 3088 } 3089 3090 /* 3091 * Clear the errors for the pool, or the particular device if specified. 3092 */ 3093 int 3094 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3095 { 3096 zfs_cmd_t zc = { 0 }; 3097 char msg[1024]; 3098 nvlist_t *tgt; 3099 zpool_rewind_policy_t policy; 3100 boolean_t avail_spare, l2cache; 3101 libzfs_handle_t *hdl = zhp->zpool_hdl; 3102 nvlist_t *nvi = NULL; 3103 int error; 3104 3105 if (path) 3106 (void) snprintf(msg, sizeof (msg), 3107 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3108 path); 3109 else 3110 (void) snprintf(msg, sizeof (msg), 3111 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3112 zhp->zpool_name); 3113 3114 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3115 if (path) { 3116 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3117 &l2cache, NULL)) == 0) 3118 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3119 3120 /* 3121 * Don't allow error clearing for hot spares. Do allow 3122 * error clearing for l2cache devices. 3123 */ 3124 if (avail_spare) 3125 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3126 3127 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3128 &zc.zc_guid) == 0); 3129 } 3130 3131 zpool_get_rewind_policy(rewindnvl, &policy); 3132 zc.zc_cookie = policy.zrp_request; 3133 3134 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3135 return (-1); 3136 3137 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3138 return (-1); 3139 3140 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3141 errno == ENOMEM) { 3142 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3143 zcmd_free_nvlists(&zc); 3144 return (-1); 3145 } 3146 } 3147 3148 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3149 errno != EPERM && errno != EACCES)) { 3150 if (policy.zrp_request & 3151 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3152 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3153 zpool_rewind_exclaim(hdl, zc.zc_name, 3154 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3155 nvi); 3156 nvlist_free(nvi); 3157 } 3158 zcmd_free_nvlists(&zc); 3159 return (0); 3160 } 3161 3162 zcmd_free_nvlists(&zc); 3163 return (zpool_standard_error(hdl, errno, msg)); 3164 } 3165 3166 /* 3167 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3168 */ 3169 int 3170 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3171 { 3172 zfs_cmd_t zc = { 0 }; 3173 char msg[1024]; 3174 libzfs_handle_t *hdl = zhp->zpool_hdl; 3175 3176 (void) snprintf(msg, sizeof (msg), 3177 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3178 guid); 3179 3180 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3181 zc.zc_guid = guid; 3182 zc.zc_cookie = ZPOOL_NO_REWIND; 3183 3184 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3185 return (0); 3186 3187 return (zpool_standard_error(hdl, errno, msg)); 3188 } 3189 3190 /* 3191 * Change the GUID for a pool. 3192 */ 3193 int 3194 zpool_reguid(zpool_handle_t *zhp) 3195 { 3196 char msg[1024]; 3197 libzfs_handle_t *hdl = zhp->zpool_hdl; 3198 zfs_cmd_t zc = { 0 }; 3199 3200 (void) snprintf(msg, sizeof (msg), 3201 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3202 3203 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3204 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3205 return (0); 3206 3207 return (zpool_standard_error(hdl, errno, msg)); 3208 } 3209 3210 /* 3211 * Reopen the pool. 3212 */ 3213 int 3214 zpool_reopen(zpool_handle_t *zhp) 3215 { 3216 zfs_cmd_t zc = { 0 }; 3217 char msg[1024]; 3218 libzfs_handle_t *hdl = zhp->zpool_hdl; 3219 3220 (void) snprintf(msg, sizeof (msg), 3221 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3222 zhp->zpool_name); 3223 3224 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3225 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3226 return (0); 3227 return (zpool_standard_error(hdl, errno, msg)); 3228 } 3229 3230 /* 3231 * Convert from a devid string to a path. 3232 */ 3233 static char * 3234 devid_to_path(char *devid_str) 3235 { 3236 ddi_devid_t devid; 3237 char *minor; 3238 char *path; 3239 devid_nmlist_t *list = NULL; 3240 int ret; 3241 3242 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3243 return (NULL); 3244 3245 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3246 3247 devid_str_free(minor); 3248 devid_free(devid); 3249 3250 if (ret != 0) 3251 return (NULL); 3252 3253 /* 3254 * In a case the strdup() fails, we will just return NULL below. 3255 */ 3256 path = strdup(list[0].devname); 3257 3258 devid_free_nmlist(list); 3259 3260 return (path); 3261 } 3262 3263 /* 3264 * Convert from a path to a devid string. 3265 */ 3266 static char * 3267 path_to_devid(const char *path) 3268 { 3269 int fd; 3270 ddi_devid_t devid; 3271 char *minor, *ret; 3272 3273 if ((fd = open(path, O_RDONLY)) < 0) 3274 return (NULL); 3275 3276 minor = NULL; 3277 ret = NULL; 3278 if (devid_get(fd, &devid) == 0) { 3279 if (devid_get_minor_name(fd, &minor) == 0) 3280 ret = devid_str_encode(devid, minor); 3281 if (minor != NULL) 3282 devid_str_free(minor); 3283 devid_free(devid); 3284 } 3285 (void) close(fd); 3286 3287 return (ret); 3288 } 3289 3290 /* 3291 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3292 * ignore any failure here, since a common case is for an unprivileged user to 3293 * type 'zpool status', and we'll display the correct information anyway. 3294 */ 3295 static void 3296 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3297 { 3298 zfs_cmd_t zc = { 0 }; 3299 3300 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3301 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3302 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3303 &zc.zc_guid) == 0); 3304 3305 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3306 } 3307 3308 /* 3309 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3310 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3311 * We also check if this is a whole disk, in which case we strip off the 3312 * trailing 's0' slice name. 3313 * 3314 * This routine is also responsible for identifying when disks have been 3315 * reconfigured in a new location. The kernel will have opened the device by 3316 * devid, but the path will still refer to the old location. To catch this, we 3317 * first do a path -> devid translation (which is fast for the common case). If 3318 * the devid matches, we're done. If not, we do a reverse devid -> path 3319 * translation and issue the appropriate ioctl() to update the path of the vdev. 3320 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3321 * of these checks. 3322 */ 3323 char * 3324 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3325 boolean_t verbose) 3326 { 3327 char *path, *devid; 3328 uint64_t value; 3329 char buf[64]; 3330 vdev_stat_t *vs; 3331 uint_t vsc; 3332 3333 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3334 &value) == 0) { 3335 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3336 &value) == 0); 3337 (void) snprintf(buf, sizeof (buf), "%llu", 3338 (u_longlong_t)value); 3339 path = buf; 3340 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3341 3342 /* 3343 * If the device is dead (faulted, offline, etc) then don't 3344 * bother opening it. Otherwise we may be forcing the user to 3345 * open a misbehaving device, which can have undesirable 3346 * effects. 3347 */ 3348 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3349 (uint64_t **)&vs, &vsc) != 0 || 3350 vs->vs_state >= VDEV_STATE_DEGRADED) && 3351 zhp != NULL && 3352 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3353 /* 3354 * Determine if the current path is correct. 3355 */ 3356 char *newdevid = path_to_devid(path); 3357 3358 if (newdevid == NULL || 3359 strcmp(devid, newdevid) != 0) { 3360 char *newpath; 3361 3362 if ((newpath = devid_to_path(devid)) != NULL) { 3363 /* 3364 * Update the path appropriately. 3365 */ 3366 set_path(zhp, nv, newpath); 3367 if (nvlist_add_string(nv, 3368 ZPOOL_CONFIG_PATH, newpath) == 0) 3369 verify(nvlist_lookup_string(nv, 3370 ZPOOL_CONFIG_PATH, 3371 &path) == 0); 3372 free(newpath); 3373 } 3374 } 3375 3376 if (newdevid) 3377 devid_str_free(newdevid); 3378 } 3379 3380 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0) 3381 path += strlen(ZFS_DISK_ROOTD); 3382 3383 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3384 &value) == 0 && value) { 3385 int pathlen = strlen(path); 3386 char *tmp = zfs_strdup(hdl, path); 3387 3388 /* 3389 * If it starts with c#, and ends with "s0", chop 3390 * the "s0" off, or if it ends with "s0/old", remove 3391 * the "s0" from the middle. 3392 */ 3393 if (CTD_CHECK(tmp)) { 3394 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3395 tmp[pathlen - 2] = '\0'; 3396 } else if (pathlen > 6 && 3397 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3398 (void) strcpy(&tmp[pathlen - 6], 3399 "/old"); 3400 } 3401 } 3402 return (tmp); 3403 } 3404 } else { 3405 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3406 3407 /* 3408 * If it's a raidz device, we need to stick in the parity level. 3409 */ 3410 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3411 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3412 &value) == 0); 3413 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3414 (u_longlong_t)value); 3415 path = buf; 3416 } 3417 3418 /* 3419 * We identify each top-level vdev by using a <type-id> 3420 * naming convention. 3421 */ 3422 if (verbose) { 3423 uint64_t id; 3424 3425 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3426 &id) == 0); 3427 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3428 (u_longlong_t)id); 3429 path = buf; 3430 } 3431 } 3432 3433 return (zfs_strdup(hdl, path)); 3434 } 3435 3436 static int 3437 zbookmark_mem_compare(const void *a, const void *b) 3438 { 3439 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3440 } 3441 3442 /* 3443 * Retrieve the persistent error log, uniquify the members, and return to the 3444 * caller. 3445 */ 3446 int 3447 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3448 { 3449 zfs_cmd_t zc = { 0 }; 3450 uint64_t count; 3451 zbookmark_phys_t *zb = NULL; 3452 int i; 3453 3454 /* 3455 * Retrieve the raw error list from the kernel. If the number of errors 3456 * has increased, allocate more space and continue until we get the 3457 * entire list. 3458 */ 3459 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3460 &count) == 0); 3461 if (count == 0) 3462 return (0); 3463 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3464 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3465 return (-1); 3466 zc.zc_nvlist_dst_size = count; 3467 (void) strcpy(zc.zc_name, zhp->zpool_name); 3468 for (;;) { 3469 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3470 &zc) != 0) { 3471 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3472 if (errno == ENOMEM) { 3473 void *dst; 3474 3475 count = zc.zc_nvlist_dst_size; 3476 dst = zfs_alloc(zhp->zpool_hdl, count * 3477 sizeof (zbookmark_phys_t)); 3478 if (dst == NULL) 3479 return (-1); 3480 zc.zc_nvlist_dst = (uintptr_t)dst; 3481 } else { 3482 return (-1); 3483 } 3484 } else { 3485 break; 3486 } 3487 } 3488 3489 /* 3490 * Sort the resulting bookmarks. This is a little confusing due to the 3491 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3492 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3493 * _not_ copied as part of the process. So we point the start of our 3494 * array appropriate and decrement the total number of elements. 3495 */ 3496 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3497 zc.zc_nvlist_dst_size; 3498 count -= zc.zc_nvlist_dst_size; 3499 3500 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 3501 3502 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3503 3504 /* 3505 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3506 */ 3507 for (i = 0; i < count; i++) { 3508 nvlist_t *nv; 3509 3510 /* ignoring zb_blkid and zb_level for now */ 3511 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3512 zb[i-1].zb_object == zb[i].zb_object) 3513 continue; 3514 3515 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3516 goto nomem; 3517 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3518 zb[i].zb_objset) != 0) { 3519 nvlist_free(nv); 3520 goto nomem; 3521 } 3522 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3523 zb[i].zb_object) != 0) { 3524 nvlist_free(nv); 3525 goto nomem; 3526 } 3527 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3528 nvlist_free(nv); 3529 goto nomem; 3530 } 3531 nvlist_free(nv); 3532 } 3533 3534 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3535 return (0); 3536 3537 nomem: 3538 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3539 return (no_memory(zhp->zpool_hdl)); 3540 } 3541 3542 /* 3543 * Upgrade a ZFS pool to the latest on-disk version. 3544 */ 3545 int 3546 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3547 { 3548 zfs_cmd_t zc = { 0 }; 3549 libzfs_handle_t *hdl = zhp->zpool_hdl; 3550 3551 (void) strcpy(zc.zc_name, zhp->zpool_name); 3552 zc.zc_cookie = new_version; 3553 3554 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3555 return (zpool_standard_error_fmt(hdl, errno, 3556 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3557 zhp->zpool_name)); 3558 return (0); 3559 } 3560 3561 void 3562 zfs_save_arguments(int argc, char **argv, char *string, int len) 3563 { 3564 (void) strlcpy(string, basename(argv[0]), len); 3565 for (int i = 1; i < argc; i++) { 3566 (void) strlcat(string, " ", len); 3567 (void) strlcat(string, argv[i], len); 3568 } 3569 } 3570 3571 int 3572 zpool_log_history(libzfs_handle_t *hdl, const char *message) 3573 { 3574 zfs_cmd_t zc = { 0 }; 3575 nvlist_t *args; 3576 int err; 3577 3578 args = fnvlist_alloc(); 3579 fnvlist_add_string(args, "message", message); 3580 err = zcmd_write_src_nvlist(hdl, &zc, args); 3581 if (err == 0) 3582 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3583 nvlist_free(args); 3584 zcmd_free_nvlists(&zc); 3585 return (err); 3586 } 3587 3588 /* 3589 * Perform ioctl to get some command history of a pool. 3590 * 3591 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3592 * logical offset of the history buffer to start reading from. 3593 * 3594 * Upon return, 'off' is the next logical offset to read from and 3595 * 'len' is the actual amount of bytes read into 'buf'. 3596 */ 3597 static int 3598 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3599 { 3600 zfs_cmd_t zc = { 0 }; 3601 libzfs_handle_t *hdl = zhp->zpool_hdl; 3602 3603 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3604 3605 zc.zc_history = (uint64_t)(uintptr_t)buf; 3606 zc.zc_history_len = *len; 3607 zc.zc_history_offset = *off; 3608 3609 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3610 switch (errno) { 3611 case EPERM: 3612 return (zfs_error_fmt(hdl, EZFS_PERM, 3613 dgettext(TEXT_DOMAIN, 3614 "cannot show history for pool '%s'"), 3615 zhp->zpool_name)); 3616 case ENOENT: 3617 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3618 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3619 "'%s'"), zhp->zpool_name)); 3620 case ENOTSUP: 3621 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3622 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3623 "'%s', pool must be upgraded"), zhp->zpool_name)); 3624 default: 3625 return (zpool_standard_error_fmt(hdl, errno, 3626 dgettext(TEXT_DOMAIN, 3627 "cannot get history for '%s'"), zhp->zpool_name)); 3628 } 3629 } 3630 3631 *len = zc.zc_history_len; 3632 *off = zc.zc_history_offset; 3633 3634 return (0); 3635 } 3636 3637 /* 3638 * Process the buffer of nvlists, unpacking and storing each nvlist record 3639 * into 'records'. 'leftover' is set to the number of bytes that weren't 3640 * processed as there wasn't a complete record. 3641 */ 3642 int 3643 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3644 nvlist_t ***records, uint_t *numrecords) 3645 { 3646 uint64_t reclen; 3647 nvlist_t *nv; 3648 int i; 3649 3650 while (bytes_read > sizeof (reclen)) { 3651 3652 /* get length of packed record (stored as little endian) */ 3653 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3654 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3655 3656 if (bytes_read < sizeof (reclen) + reclen) 3657 break; 3658 3659 /* unpack record */ 3660 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3661 return (ENOMEM); 3662 bytes_read -= sizeof (reclen) + reclen; 3663 buf += sizeof (reclen) + reclen; 3664 3665 /* add record to nvlist array */ 3666 (*numrecords)++; 3667 if (ISP2(*numrecords + 1)) { 3668 *records = realloc(*records, 3669 *numrecords * 2 * sizeof (nvlist_t *)); 3670 } 3671 (*records)[*numrecords - 1] = nv; 3672 } 3673 3674 *leftover = bytes_read; 3675 return (0); 3676 } 3677 3678 /* 3679 * Retrieve the command history of a pool. 3680 */ 3681 int 3682 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3683 { 3684 char *buf; 3685 int buflen = 128 * 1024; 3686 uint64_t off = 0; 3687 nvlist_t **records = NULL; 3688 uint_t numrecords = 0; 3689 int err, i; 3690 3691 buf = malloc(buflen); 3692 if (buf == NULL) 3693 return (ENOMEM); 3694 do { 3695 uint64_t bytes_read = buflen; 3696 uint64_t leftover; 3697 3698 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3699 break; 3700 3701 /* if nothing else was read in, we're at EOF, just return */ 3702 if (!bytes_read) 3703 break; 3704 3705 if ((err = zpool_history_unpack(buf, bytes_read, 3706 &leftover, &records, &numrecords)) != 0) 3707 break; 3708 off -= leftover; 3709 if (leftover == bytes_read) { 3710 /* 3711 * no progress made, because buffer is not big enough 3712 * to hold this record; resize and retry. 3713 */ 3714 buflen *= 2; 3715 free(buf); 3716 buf = malloc(buflen); 3717 if (buf == NULL) 3718 return (ENOMEM); 3719 } 3720 3721 /* CONSTCOND */ 3722 } while (1); 3723 3724 free(buf); 3725 3726 if (!err) { 3727 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3728 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3729 records, numrecords) == 0); 3730 } 3731 for (i = 0; i < numrecords; i++) 3732 nvlist_free(records[i]); 3733 free(records); 3734 3735 return (err); 3736 } 3737 3738 void 3739 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3740 char *pathname, size_t len) 3741 { 3742 zfs_cmd_t zc = { 0 }; 3743 boolean_t mounted = B_FALSE; 3744 char *mntpnt = NULL; 3745 char dsname[MAXNAMELEN]; 3746 3747 if (dsobj == 0) { 3748 /* special case for the MOS */ 3749 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3750 return; 3751 } 3752 3753 /* get the dataset's name */ 3754 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3755 zc.zc_obj = dsobj; 3756 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3757 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3758 /* just write out a path of two object numbers */ 3759 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3760 dsobj, obj); 3761 return; 3762 } 3763 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3764 3765 /* find out if the dataset is mounted */ 3766 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3767 3768 /* get the corrupted object's path */ 3769 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3770 zc.zc_obj = obj; 3771 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3772 &zc) == 0) { 3773 if (mounted) { 3774 (void) snprintf(pathname, len, "%s%s", mntpnt, 3775 zc.zc_value); 3776 } else { 3777 (void) snprintf(pathname, len, "%s:%s", 3778 dsname, zc.zc_value); 3779 } 3780 } else { 3781 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3782 } 3783 free(mntpnt); 3784 } 3785 3786 /* 3787 * Read the EFI label from the config, if a label does not exist then 3788 * pass back the error to the caller. If the caller has passed a non-NULL 3789 * diskaddr argument then we set it to the starting address of the EFI 3790 * partition. 3791 */ 3792 static int 3793 read_efi_label(nvlist_t *config, diskaddr_t *sb) 3794 { 3795 char *path; 3796 int fd; 3797 char diskname[MAXPATHLEN]; 3798 int err = -1; 3799 3800 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3801 return (err); 3802 3803 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT, 3804 strrchr(path, '/')); 3805 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3806 struct dk_gpt *vtoc; 3807 3808 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3809 if (sb != NULL) 3810 *sb = vtoc->efi_parts[0].p_start; 3811 efi_free(vtoc); 3812 } 3813 (void) close(fd); 3814 } 3815 return (err); 3816 } 3817 3818 /* 3819 * determine where a partition starts on a disk in the current 3820 * configuration 3821 */ 3822 static diskaddr_t 3823 find_start_block(nvlist_t *config) 3824 { 3825 nvlist_t **child; 3826 uint_t c, children; 3827 diskaddr_t sb = MAXOFFSET_T; 3828 uint64_t wholedisk; 3829 3830 if (nvlist_lookup_nvlist_array(config, 3831 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3832 if (nvlist_lookup_uint64(config, 3833 ZPOOL_CONFIG_WHOLE_DISK, 3834 &wholedisk) != 0 || !wholedisk) { 3835 return (MAXOFFSET_T); 3836 } 3837 if (read_efi_label(config, &sb) < 0) 3838 sb = MAXOFFSET_T; 3839 return (sb); 3840 } 3841 3842 for (c = 0; c < children; c++) { 3843 sb = find_start_block(child[c]); 3844 if (sb != MAXOFFSET_T) { 3845 return (sb); 3846 } 3847 } 3848 return (MAXOFFSET_T); 3849 } 3850 3851 /* 3852 * Label an individual disk. The name provided is the short name, 3853 * stripped of any leading /dev path. 3854 */ 3855 int 3856 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3857 { 3858 char path[MAXPATHLEN]; 3859 struct dk_gpt *vtoc; 3860 int fd; 3861 size_t resv = EFI_MIN_RESV_SIZE; 3862 uint64_t slice_size; 3863 diskaddr_t start_block; 3864 char errbuf[1024]; 3865 3866 /* prepare an error message just in case */ 3867 (void) snprintf(errbuf, sizeof (errbuf), 3868 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3869 3870 if (zhp) { 3871 nvlist_t *nvroot; 3872 3873 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3874 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3875 3876 if (zhp->zpool_start_block == 0) 3877 start_block = find_start_block(nvroot); 3878 else 3879 start_block = zhp->zpool_start_block; 3880 zhp->zpool_start_block = start_block; 3881 } else { 3882 /* new pool */ 3883 start_block = NEW_START_BLOCK; 3884 } 3885 3886 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name, 3887 BACKUP_SLICE); 3888 3889 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3890 /* 3891 * This shouldn't happen. We've long since verified that this 3892 * is a valid device. 3893 */ 3894 zfs_error_aux(hdl, 3895 dgettext(TEXT_DOMAIN, "unable to open device")); 3896 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3897 } 3898 3899 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3900 /* 3901 * The only way this can fail is if we run out of memory, or we 3902 * were unable to read the disk's capacity 3903 */ 3904 if (errno == ENOMEM) 3905 (void) no_memory(hdl); 3906 3907 (void) close(fd); 3908 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3909 "unable to read disk capacity"), name); 3910 3911 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3912 } 3913 3914 slice_size = vtoc->efi_last_u_lba + 1; 3915 slice_size -= EFI_MIN_RESV_SIZE; 3916 if (start_block == MAXOFFSET_T) 3917 start_block = NEW_START_BLOCK; 3918 slice_size -= start_block; 3919 3920 vtoc->efi_parts[0].p_start = start_block; 3921 vtoc->efi_parts[0].p_size = slice_size; 3922 3923 /* 3924 * Why we use V_USR: V_BACKUP confuses users, and is considered 3925 * disposable by some EFI utilities (since EFI doesn't have a backup 3926 * slice). V_UNASSIGNED is supposed to be used only for zero size 3927 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3928 * etc. were all pretty specific. V_USR is as close to reality as we 3929 * can get, in the absence of V_OTHER. 3930 */ 3931 vtoc->efi_parts[0].p_tag = V_USR; 3932 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3933 3934 vtoc->efi_parts[8].p_start = slice_size + start_block; 3935 vtoc->efi_parts[8].p_size = resv; 3936 vtoc->efi_parts[8].p_tag = V_RESERVED; 3937 3938 if (efi_write(fd, vtoc) != 0) { 3939 /* 3940 * Some block drivers (like pcata) may not support EFI 3941 * GPT labels. Print out a helpful error message dir- 3942 * ecting the user to manually label the disk and give 3943 * a specific slice. 3944 */ 3945 (void) close(fd); 3946 efi_free(vtoc); 3947 3948 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3949 "try using fdisk(1M) and then provide a specific slice")); 3950 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3951 } 3952 3953 (void) close(fd); 3954 efi_free(vtoc); 3955 return (0); 3956 } 3957 3958 static boolean_t 3959 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3960 { 3961 char *type; 3962 nvlist_t **child; 3963 uint_t children, c; 3964 3965 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3966 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 3967 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3968 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3970 "vdev type '%s' is not supported"), type); 3971 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3972 return (B_FALSE); 3973 } 3974 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3975 &child, &children) == 0) { 3976 for (c = 0; c < children; c++) { 3977 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3978 return (B_FALSE); 3979 } 3980 } 3981 return (B_TRUE); 3982 } 3983 3984 /* 3985 * Check if this zvol is allowable for use as a dump device; zero if 3986 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 3987 * 3988 * Allowable storage configurations include mirrors, all raidz variants, and 3989 * pools with log, cache, and spare devices. Pools which are backed by files or 3990 * have missing/hole vdevs are not suitable. 3991 */ 3992 int 3993 zvol_check_dump_config(char *arg) 3994 { 3995 zpool_handle_t *zhp = NULL; 3996 nvlist_t *config, *nvroot; 3997 char *p, *volname; 3998 nvlist_t **top; 3999 uint_t toplevels; 4000 libzfs_handle_t *hdl; 4001 char errbuf[1024]; 4002 char poolname[ZPOOL_MAXNAMELEN]; 4003 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4004 int ret = 1; 4005 4006 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4007 return (-1); 4008 } 4009 4010 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4011 "dump is not supported on device '%s'"), arg); 4012 4013 if ((hdl = libzfs_init()) == NULL) 4014 return (1); 4015 libzfs_print_on_error(hdl, B_TRUE); 4016 4017 volname = arg + pathlen; 4018 4019 /* check the configuration of the pool */ 4020 if ((p = strchr(volname, '/')) == NULL) { 4021 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4022 "malformed dataset name")); 4023 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4024 return (1); 4025 } else if (p - volname >= ZFS_MAXNAMELEN) { 4026 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4027 "dataset name is too long")); 4028 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4029 return (1); 4030 } else { 4031 (void) strncpy(poolname, volname, p - volname); 4032 poolname[p - volname] = '\0'; 4033 } 4034 4035 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4036 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4037 "could not open pool '%s'"), poolname); 4038 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4039 goto out; 4040 } 4041 config = zpool_get_config(zhp, NULL); 4042 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4043 &nvroot) != 0) { 4044 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4045 "could not obtain vdev configuration for '%s'"), poolname); 4046 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4047 goto out; 4048 } 4049 4050 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4051 &top, &toplevels) == 0); 4052 4053 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4054 goto out; 4055 } 4056 ret = 0; 4057 4058 out: 4059 if (zhp) 4060 zpool_close(zhp); 4061 libzfs_fini(hdl); 4062 return (ret); 4063 } 4064