1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright 2016 Nexenta Systems, Inc. 27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 28 */ 29 30 #include <ctype.h> 31 #include <errno.h> 32 #include <devid.h> 33 #include <fcntl.h> 34 #include <libintl.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <strings.h> 38 #include <unistd.h> 39 #include <libgen.h> 40 #include <sys/efi_partition.h> 41 #include <sys/vtoc.h> 42 #include <sys/zfs_ioctl.h> 43 #include <dlfcn.h> 44 45 #include "zfs_namecheck.h" 46 #include "zfs_prop.h" 47 #include "libzfs_impl.h" 48 #include "zfs_comutil.h" 49 #include "zfeature_common.h" 50 51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 52 53 #define BACKUP_SLICE "s2" 54 55 typedef struct prop_flags { 56 int create:1; /* Validate property on creation */ 57 int import:1; /* Validate property on import */ 58 } prop_flags_t; 59 60 /* 61 * ==================================================================== 62 * zpool property functions 63 * ==================================================================== 64 */ 65 66 static int 67 zpool_get_all_props(zpool_handle_t *zhp) 68 { 69 zfs_cmd_t zc = { 0 }; 70 libzfs_handle_t *hdl = zhp->zpool_hdl; 71 72 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 73 74 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 75 return (-1); 76 77 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 78 if (errno == ENOMEM) { 79 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 80 zcmd_free_nvlists(&zc); 81 return (-1); 82 } 83 } else { 84 zcmd_free_nvlists(&zc); 85 return (-1); 86 } 87 } 88 89 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 90 zcmd_free_nvlists(&zc); 91 return (-1); 92 } 93 94 zcmd_free_nvlists(&zc); 95 96 return (0); 97 } 98 99 static int 100 zpool_props_refresh(zpool_handle_t *zhp) 101 { 102 nvlist_t *old_props; 103 104 old_props = zhp->zpool_props; 105 106 if (zpool_get_all_props(zhp) != 0) 107 return (-1); 108 109 nvlist_free(old_props); 110 return (0); 111 } 112 113 static char * 114 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 115 zprop_source_t *src) 116 { 117 nvlist_t *nv, *nvl; 118 uint64_t ival; 119 char *value; 120 zprop_source_t source; 121 122 nvl = zhp->zpool_props; 123 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 124 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 125 source = ival; 126 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 127 } else { 128 source = ZPROP_SRC_DEFAULT; 129 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 130 value = "-"; 131 } 132 133 if (src) 134 *src = source; 135 136 return (value); 137 } 138 139 uint64_t 140 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 141 { 142 nvlist_t *nv, *nvl; 143 uint64_t value; 144 zprop_source_t source; 145 146 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 147 /* 148 * zpool_get_all_props() has most likely failed because 149 * the pool is faulted, but if all we need is the top level 150 * vdev's guid then get it from the zhp config nvlist. 151 */ 152 if ((prop == ZPOOL_PROP_GUID) && 153 (nvlist_lookup_nvlist(zhp->zpool_config, 154 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 155 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 156 == 0)) { 157 return (value); 158 } 159 return (zpool_prop_default_numeric(prop)); 160 } 161 162 nvl = zhp->zpool_props; 163 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 164 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 165 source = value; 166 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 167 } else { 168 source = ZPROP_SRC_DEFAULT; 169 value = zpool_prop_default_numeric(prop); 170 } 171 172 if (src) 173 *src = source; 174 175 return (value); 176 } 177 178 /* 179 * Map VDEV STATE to printed strings. 180 */ 181 const char * 182 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 183 { 184 switch (state) { 185 case VDEV_STATE_CLOSED: 186 case VDEV_STATE_OFFLINE: 187 return (gettext("OFFLINE")); 188 case VDEV_STATE_REMOVED: 189 return (gettext("REMOVED")); 190 case VDEV_STATE_CANT_OPEN: 191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 192 return (gettext("FAULTED")); 193 else if (aux == VDEV_AUX_SPLIT_POOL) 194 return (gettext("SPLIT")); 195 else 196 return (gettext("UNAVAIL")); 197 case VDEV_STATE_FAULTED: 198 return (gettext("FAULTED")); 199 case VDEV_STATE_DEGRADED: 200 return (gettext("DEGRADED")); 201 case VDEV_STATE_HEALTHY: 202 return (gettext("ONLINE")); 203 204 default: 205 break; 206 } 207 208 return (gettext("UNKNOWN")); 209 } 210 211 /* 212 * Map POOL STATE to printed strings. 213 */ 214 const char * 215 zpool_pool_state_to_name(pool_state_t state) 216 { 217 switch (state) { 218 case POOL_STATE_ACTIVE: 219 return (gettext("ACTIVE")); 220 case POOL_STATE_EXPORTED: 221 return (gettext("EXPORTED")); 222 case POOL_STATE_DESTROYED: 223 return (gettext("DESTROYED")); 224 case POOL_STATE_SPARE: 225 return (gettext("SPARE")); 226 case POOL_STATE_L2CACHE: 227 return (gettext("L2CACHE")); 228 case POOL_STATE_UNINITIALIZED: 229 return (gettext("UNINITIALIZED")); 230 case POOL_STATE_UNAVAIL: 231 return (gettext("UNAVAIL")); 232 case POOL_STATE_POTENTIALLY_ACTIVE: 233 return (gettext("POTENTIALLY_ACTIVE")); 234 } 235 236 return (gettext("UNKNOWN")); 237 } 238 239 /* 240 * Get a zpool property value for 'prop' and return the value in 241 * a pre-allocated buffer. 242 */ 243 int 244 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 245 zprop_source_t *srctype, boolean_t literal) 246 { 247 uint64_t intval; 248 const char *strval; 249 zprop_source_t src = ZPROP_SRC_NONE; 250 nvlist_t *nvroot; 251 vdev_stat_t *vs; 252 uint_t vsc; 253 254 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 255 switch (prop) { 256 case ZPOOL_PROP_NAME: 257 (void) strlcpy(buf, zpool_get_name(zhp), len); 258 break; 259 260 case ZPOOL_PROP_HEALTH: 261 (void) strlcpy(buf, "FAULTED", len); 262 break; 263 264 case ZPOOL_PROP_GUID: 265 intval = zpool_get_prop_int(zhp, prop, &src); 266 (void) snprintf(buf, len, "%llu", intval); 267 break; 268 269 case ZPOOL_PROP_ALTROOT: 270 case ZPOOL_PROP_CACHEFILE: 271 case ZPOOL_PROP_COMMENT: 272 if (zhp->zpool_props != NULL || 273 zpool_get_all_props(zhp) == 0) { 274 (void) strlcpy(buf, 275 zpool_get_prop_string(zhp, prop, &src), 276 len); 277 break; 278 } 279 /* FALLTHROUGH */ 280 default: 281 (void) strlcpy(buf, "-", len); 282 break; 283 } 284 285 if (srctype != NULL) 286 *srctype = src; 287 return (0); 288 } 289 290 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 291 prop != ZPOOL_PROP_NAME) 292 return (-1); 293 294 switch (zpool_prop_get_type(prop)) { 295 case PROP_TYPE_STRING: 296 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 297 len); 298 break; 299 300 case PROP_TYPE_NUMBER: 301 intval = zpool_get_prop_int(zhp, prop, &src); 302 303 switch (prop) { 304 case ZPOOL_PROP_SIZE: 305 case ZPOOL_PROP_ALLOCATED: 306 case ZPOOL_PROP_FREE: 307 case ZPOOL_PROP_FREEING: 308 case ZPOOL_PROP_LEAKED: 309 if (literal) { 310 (void) snprintf(buf, len, "%llu", 311 (u_longlong_t)intval); 312 } else { 313 (void) zfs_nicenum(intval, buf, len); 314 } 315 break; 316 case ZPOOL_PROP_EXPANDSZ: 317 if (intval == 0) { 318 (void) strlcpy(buf, "-", len); 319 } else if (literal) { 320 (void) snprintf(buf, len, "%llu", 321 (u_longlong_t)intval); 322 } else { 323 (void) zfs_nicenum(intval, buf, len); 324 } 325 break; 326 case ZPOOL_PROP_CAPACITY: 327 if (literal) { 328 (void) snprintf(buf, len, "%llu", 329 (u_longlong_t)intval); 330 } else { 331 (void) snprintf(buf, len, "%llu%%", 332 (u_longlong_t)intval); 333 } 334 break; 335 case ZPOOL_PROP_FRAGMENTATION: 336 if (intval == UINT64_MAX) { 337 (void) strlcpy(buf, "-", len); 338 } else { 339 (void) snprintf(buf, len, "%llu%%", 340 (u_longlong_t)intval); 341 } 342 break; 343 case ZPOOL_PROP_DEDUPRATIO: 344 (void) snprintf(buf, len, "%llu.%02llux", 345 (u_longlong_t)(intval / 100), 346 (u_longlong_t)(intval % 100)); 347 break; 348 case ZPOOL_PROP_HEALTH: 349 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 350 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 351 verify(nvlist_lookup_uint64_array(nvroot, 352 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 353 == 0); 354 355 (void) strlcpy(buf, zpool_state_to_name(intval, 356 vs->vs_aux), len); 357 break; 358 case ZPOOL_PROP_VERSION: 359 if (intval >= SPA_VERSION_FEATURES) { 360 (void) snprintf(buf, len, "-"); 361 break; 362 } 363 /* FALLTHROUGH */ 364 default: 365 (void) snprintf(buf, len, "%llu", intval); 366 } 367 break; 368 369 case PROP_TYPE_INDEX: 370 intval = zpool_get_prop_int(zhp, prop, &src); 371 if (zpool_prop_index_to_string(prop, intval, &strval) 372 != 0) 373 return (-1); 374 (void) strlcpy(buf, strval, len); 375 break; 376 377 default: 378 abort(); 379 } 380 381 if (srctype) 382 *srctype = src; 383 384 return (0); 385 } 386 387 /* 388 * Check if the bootfs name has the same pool name as it is set to. 389 * Assuming bootfs is a valid dataset name. 390 */ 391 static boolean_t 392 bootfs_name_valid(const char *pool, char *bootfs) 393 { 394 int len = strlen(pool); 395 396 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 397 return (B_FALSE); 398 399 if (strncmp(pool, bootfs, len) == 0 && 400 (bootfs[len] == '/' || bootfs[len] == '\0')) 401 return (B_TRUE); 402 403 return (B_FALSE); 404 } 405 406 boolean_t 407 zpool_is_bootable(zpool_handle_t *zhp) 408 { 409 char bootfs[ZFS_MAX_DATASET_NAME_LEN]; 410 411 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 412 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 413 sizeof (bootfs)) != 0); 414 } 415 416 417 /* 418 * Given an nvlist of zpool properties to be set, validate that they are 419 * correct, and parse any numeric properties (index, boolean, etc) if they are 420 * specified as strings. 421 */ 422 static nvlist_t * 423 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 424 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 425 { 426 nvpair_t *elem; 427 nvlist_t *retprops; 428 zpool_prop_t prop; 429 char *strval; 430 uint64_t intval; 431 char *slash, *check; 432 struct stat64 statbuf; 433 zpool_handle_t *zhp; 434 435 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 436 (void) no_memory(hdl); 437 return (NULL); 438 } 439 440 elem = NULL; 441 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 442 const char *propname = nvpair_name(elem); 443 444 prop = zpool_name_to_prop(propname); 445 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 446 int err; 447 char *fname = strchr(propname, '@') + 1; 448 449 err = zfeature_lookup_name(fname, NULL); 450 if (err != 0) { 451 ASSERT3U(err, ==, ENOENT); 452 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 453 "invalid feature '%s'"), fname); 454 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 455 goto error; 456 } 457 458 if (nvpair_type(elem) != DATA_TYPE_STRING) { 459 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 460 "'%s' must be a string"), propname); 461 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 462 goto error; 463 } 464 465 (void) nvpair_value_string(elem, &strval); 466 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 468 "property '%s' can only be set to " 469 "'enabled'"), propname); 470 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 471 goto error; 472 } 473 474 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 475 (void) no_memory(hdl); 476 goto error; 477 } 478 continue; 479 } 480 481 /* 482 * Make sure this property is valid and applies to this type. 483 */ 484 if (prop == ZPROP_INVAL) { 485 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 486 "invalid property '%s'"), propname); 487 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 488 goto error; 489 } 490 491 if (zpool_prop_readonly(prop)) { 492 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 493 "is readonly"), propname); 494 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 495 goto error; 496 } 497 498 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 499 &strval, &intval, errbuf) != 0) 500 goto error; 501 502 /* 503 * Perform additional checking for specific properties. 504 */ 505 switch (prop) { 506 case ZPOOL_PROP_VERSION: 507 if (intval < version || 508 !SPA_VERSION_IS_SUPPORTED(intval)) { 509 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 510 "property '%s' number %d is invalid."), 511 propname, intval); 512 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 513 goto error; 514 } 515 break; 516 517 case ZPOOL_PROP_BOOTFS: 518 if (flags.create || flags.import) { 519 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 520 "property '%s' cannot be set at creation " 521 "or import time"), propname); 522 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 523 goto error; 524 } 525 526 if (version < SPA_VERSION_BOOTFS) { 527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 528 "pool must be upgraded to support " 529 "'%s' property"), propname); 530 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 531 goto error; 532 } 533 534 /* 535 * bootfs property value has to be a dataset name and 536 * the dataset has to be in the same pool as it sets to. 537 */ 538 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 539 strval)) { 540 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 541 "is an invalid name"), strval); 542 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 543 goto error; 544 } 545 546 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 548 "could not open pool '%s'"), poolname); 549 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 550 goto error; 551 } 552 zpool_close(zhp); 553 break; 554 555 case ZPOOL_PROP_ALTROOT: 556 if (!flags.create && !flags.import) { 557 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 558 "property '%s' can only be set during pool " 559 "creation or import"), propname); 560 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 561 goto error; 562 } 563 564 if (strval[0] != '/') { 565 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 566 "bad alternate root '%s'"), strval); 567 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 568 goto error; 569 } 570 break; 571 572 case ZPOOL_PROP_CACHEFILE: 573 if (strval[0] == '\0') 574 break; 575 576 if (strcmp(strval, "none") == 0) 577 break; 578 579 if (strval[0] != '/') { 580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 581 "property '%s' must be empty, an " 582 "absolute path, or 'none'"), propname); 583 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 584 goto error; 585 } 586 587 slash = strrchr(strval, '/'); 588 589 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 590 strcmp(slash, "/..") == 0) { 591 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 592 "'%s' is not a valid file"), strval); 593 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 594 goto error; 595 } 596 597 *slash = '\0'; 598 599 if (strval[0] != '\0' && 600 (stat64(strval, &statbuf) != 0 || 601 !S_ISDIR(statbuf.st_mode))) { 602 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 603 "'%s' is not a valid directory"), 604 strval); 605 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 606 goto error; 607 } 608 609 *slash = '/'; 610 break; 611 612 case ZPOOL_PROP_COMMENT: 613 for (check = strval; *check != '\0'; check++) { 614 if (!isprint(*check)) { 615 zfs_error_aux(hdl, 616 dgettext(TEXT_DOMAIN, 617 "comment may only have printable " 618 "characters")); 619 (void) zfs_error(hdl, EZFS_BADPROP, 620 errbuf); 621 goto error; 622 } 623 } 624 if (strlen(strval) > ZPROP_MAX_COMMENT) { 625 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 626 "comment must not exceed %d characters"), 627 ZPROP_MAX_COMMENT); 628 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 629 goto error; 630 } 631 break; 632 case ZPOOL_PROP_READONLY: 633 if (!flags.import) { 634 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 635 "property '%s' can only be set at " 636 "import time"), propname); 637 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 638 goto error; 639 } 640 break; 641 642 default: 643 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 644 "property '%s'(%d) not defined"), propname, prop); 645 break; 646 } 647 } 648 649 return (retprops); 650 error: 651 nvlist_free(retprops); 652 return (NULL); 653 } 654 655 /* 656 * Set zpool property : propname=propval. 657 */ 658 int 659 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 660 { 661 zfs_cmd_t zc = { 0 }; 662 int ret = -1; 663 char errbuf[1024]; 664 nvlist_t *nvl = NULL; 665 nvlist_t *realprops; 666 uint64_t version; 667 prop_flags_t flags = { 0 }; 668 669 (void) snprintf(errbuf, sizeof (errbuf), 670 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 671 zhp->zpool_name); 672 673 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 674 return (no_memory(zhp->zpool_hdl)); 675 676 if (nvlist_add_string(nvl, propname, propval) != 0) { 677 nvlist_free(nvl); 678 return (no_memory(zhp->zpool_hdl)); 679 } 680 681 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 682 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 683 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 684 nvlist_free(nvl); 685 return (-1); 686 } 687 688 nvlist_free(nvl); 689 nvl = realprops; 690 691 /* 692 * Execute the corresponding ioctl() to set this property. 693 */ 694 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 695 696 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 697 nvlist_free(nvl); 698 return (-1); 699 } 700 701 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 702 703 zcmd_free_nvlists(&zc); 704 nvlist_free(nvl); 705 706 if (ret) 707 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 708 else 709 (void) zpool_props_refresh(zhp); 710 711 return (ret); 712 } 713 714 int 715 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 716 { 717 libzfs_handle_t *hdl = zhp->zpool_hdl; 718 zprop_list_t *entry; 719 char buf[ZFS_MAXPROPLEN]; 720 nvlist_t *features = NULL; 721 zprop_list_t **last; 722 boolean_t firstexpand = (NULL == *plp); 723 724 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 725 return (-1); 726 727 last = plp; 728 while (*last != NULL) 729 last = &(*last)->pl_next; 730 731 if ((*plp)->pl_all) 732 features = zpool_get_features(zhp); 733 734 if ((*plp)->pl_all && firstexpand) { 735 for (int i = 0; i < SPA_FEATURES; i++) { 736 zprop_list_t *entry = zfs_alloc(hdl, 737 sizeof (zprop_list_t)); 738 entry->pl_prop = ZPROP_INVAL; 739 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 740 spa_feature_table[i].fi_uname); 741 entry->pl_width = strlen(entry->pl_user_prop); 742 entry->pl_all = B_TRUE; 743 744 *last = entry; 745 last = &entry->pl_next; 746 } 747 } 748 749 /* add any unsupported features */ 750 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 751 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 752 char *propname; 753 boolean_t found; 754 zprop_list_t *entry; 755 756 if (zfeature_is_supported(nvpair_name(nvp))) 757 continue; 758 759 propname = zfs_asprintf(hdl, "unsupported@%s", 760 nvpair_name(nvp)); 761 762 /* 763 * Before adding the property to the list make sure that no 764 * other pool already added the same property. 765 */ 766 found = B_FALSE; 767 entry = *plp; 768 while (entry != NULL) { 769 if (entry->pl_user_prop != NULL && 770 strcmp(propname, entry->pl_user_prop) == 0) { 771 found = B_TRUE; 772 break; 773 } 774 entry = entry->pl_next; 775 } 776 if (found) { 777 free(propname); 778 continue; 779 } 780 781 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 782 entry->pl_prop = ZPROP_INVAL; 783 entry->pl_user_prop = propname; 784 entry->pl_width = strlen(entry->pl_user_prop); 785 entry->pl_all = B_TRUE; 786 787 *last = entry; 788 last = &entry->pl_next; 789 } 790 791 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 792 793 if (entry->pl_fixed) 794 continue; 795 796 if (entry->pl_prop != ZPROP_INVAL && 797 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 798 NULL, B_FALSE) == 0) { 799 if (strlen(buf) > entry->pl_width) 800 entry->pl_width = strlen(buf); 801 } 802 } 803 804 return (0); 805 } 806 807 /* 808 * Get the state for the given feature on the given ZFS pool. 809 */ 810 int 811 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 812 size_t len) 813 { 814 uint64_t refcount; 815 boolean_t found = B_FALSE; 816 nvlist_t *features = zpool_get_features(zhp); 817 boolean_t supported; 818 const char *feature = strchr(propname, '@') + 1; 819 820 supported = zpool_prop_feature(propname); 821 ASSERT(supported || zfs_prop_unsupported(propname)); 822 823 /* 824 * Convert from feature name to feature guid. This conversion is 825 * unecessary for unsupported@... properties because they already 826 * use guids. 827 */ 828 if (supported) { 829 int ret; 830 spa_feature_t fid; 831 832 ret = zfeature_lookup_name(feature, &fid); 833 if (ret != 0) { 834 (void) strlcpy(buf, "-", len); 835 return (ENOTSUP); 836 } 837 feature = spa_feature_table[fid].fi_guid; 838 } 839 840 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 841 found = B_TRUE; 842 843 if (supported) { 844 if (!found) { 845 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 846 } else { 847 if (refcount == 0) 848 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 849 else 850 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 851 } 852 } else { 853 if (found) { 854 if (refcount == 0) { 855 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 856 } else { 857 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 858 } 859 } else { 860 (void) strlcpy(buf, "-", len); 861 return (ENOTSUP); 862 } 863 } 864 865 return (0); 866 } 867 868 /* 869 * Don't start the slice at the default block of 34; many storage 870 * devices will use a stripe width of 128k, so start there instead. 871 */ 872 #define NEW_START_BLOCK 256 873 874 /* 875 * Validate the given pool name, optionally putting an extended error message in 876 * 'buf'. 877 */ 878 boolean_t 879 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 880 { 881 namecheck_err_t why; 882 char what; 883 int ret; 884 885 ret = pool_namecheck(pool, &why, &what); 886 887 /* 888 * The rules for reserved pool names were extended at a later point. 889 * But we need to support users with existing pools that may now be 890 * invalid. So we only check for this expanded set of names during a 891 * create (or import), and only in userland. 892 */ 893 if (ret == 0 && !isopen && 894 (strncmp(pool, "mirror", 6) == 0 || 895 strncmp(pool, "raidz", 5) == 0 || 896 strncmp(pool, "spare", 5) == 0 || 897 strcmp(pool, "log") == 0)) { 898 if (hdl != NULL) 899 zfs_error_aux(hdl, 900 dgettext(TEXT_DOMAIN, "name is reserved")); 901 return (B_FALSE); 902 } 903 904 905 if (ret != 0) { 906 if (hdl != NULL) { 907 switch (why) { 908 case NAME_ERR_TOOLONG: 909 zfs_error_aux(hdl, 910 dgettext(TEXT_DOMAIN, "name is too long")); 911 break; 912 913 case NAME_ERR_INVALCHAR: 914 zfs_error_aux(hdl, 915 dgettext(TEXT_DOMAIN, "invalid character " 916 "'%c' in pool name"), what); 917 break; 918 919 case NAME_ERR_NOLETTER: 920 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 921 "name must begin with a letter")); 922 break; 923 924 case NAME_ERR_RESERVED: 925 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 926 "name is reserved")); 927 break; 928 929 case NAME_ERR_DISKLIKE: 930 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 931 "pool name is reserved")); 932 break; 933 934 case NAME_ERR_LEADING_SLASH: 935 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 936 "leading slash in name")); 937 break; 938 939 case NAME_ERR_EMPTY_COMPONENT: 940 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 941 "empty component in name")); 942 break; 943 944 case NAME_ERR_TRAILING_SLASH: 945 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 946 "trailing slash in name")); 947 break; 948 949 case NAME_ERR_MULTIPLE_AT: 950 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 951 "multiple '@' delimiters in name")); 952 break; 953 954 default: 955 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 956 "(%d) not defined"), why); 957 break; 958 } 959 } 960 return (B_FALSE); 961 } 962 963 return (B_TRUE); 964 } 965 966 /* 967 * Open a handle to the given pool, even if the pool is currently in the FAULTED 968 * state. 969 */ 970 zpool_handle_t * 971 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 972 { 973 zpool_handle_t *zhp; 974 boolean_t missing; 975 976 /* 977 * Make sure the pool name is valid. 978 */ 979 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 980 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 981 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 982 pool); 983 return (NULL); 984 } 985 986 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 987 return (NULL); 988 989 zhp->zpool_hdl = hdl; 990 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 991 992 if (zpool_refresh_stats(zhp, &missing) != 0) { 993 zpool_close(zhp); 994 return (NULL); 995 } 996 997 if (missing) { 998 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 999 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1000 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1001 zpool_close(zhp); 1002 return (NULL); 1003 } 1004 1005 return (zhp); 1006 } 1007 1008 /* 1009 * Like the above, but silent on error. Used when iterating over pools (because 1010 * the configuration cache may be out of date). 1011 */ 1012 int 1013 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1014 { 1015 zpool_handle_t *zhp; 1016 boolean_t missing; 1017 1018 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1019 return (-1); 1020 1021 zhp->zpool_hdl = hdl; 1022 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1023 1024 if (zpool_refresh_stats(zhp, &missing) != 0) { 1025 zpool_close(zhp); 1026 return (-1); 1027 } 1028 1029 if (missing) { 1030 zpool_close(zhp); 1031 *ret = NULL; 1032 return (0); 1033 } 1034 1035 *ret = zhp; 1036 return (0); 1037 } 1038 1039 /* 1040 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1041 * state. 1042 */ 1043 zpool_handle_t * 1044 zpool_open(libzfs_handle_t *hdl, const char *pool) 1045 { 1046 zpool_handle_t *zhp; 1047 1048 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1049 return (NULL); 1050 1051 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1052 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1053 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1054 zpool_close(zhp); 1055 return (NULL); 1056 } 1057 1058 return (zhp); 1059 } 1060 1061 /* 1062 * Close the handle. Simply frees the memory associated with the handle. 1063 */ 1064 void 1065 zpool_close(zpool_handle_t *zhp) 1066 { 1067 nvlist_free(zhp->zpool_config); 1068 nvlist_free(zhp->zpool_old_config); 1069 nvlist_free(zhp->zpool_props); 1070 free(zhp); 1071 } 1072 1073 /* 1074 * Return the name of the pool. 1075 */ 1076 const char * 1077 zpool_get_name(zpool_handle_t *zhp) 1078 { 1079 return (zhp->zpool_name); 1080 } 1081 1082 1083 /* 1084 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1085 */ 1086 int 1087 zpool_get_state(zpool_handle_t *zhp) 1088 { 1089 return (zhp->zpool_state); 1090 } 1091 1092 /* 1093 * Create the named pool, using the provided vdev list. It is assumed 1094 * that the consumer has already validated the contents of the nvlist, so we 1095 * don't have to worry about error semantics. 1096 */ 1097 int 1098 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1099 nvlist_t *props, nvlist_t *fsprops) 1100 { 1101 zfs_cmd_t zc = { 0 }; 1102 nvlist_t *zc_fsprops = NULL; 1103 nvlist_t *zc_props = NULL; 1104 char msg[1024]; 1105 int ret = -1; 1106 1107 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1108 "cannot create '%s'"), pool); 1109 1110 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1111 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1112 1113 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1114 return (-1); 1115 1116 if (props) { 1117 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1118 1119 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1120 SPA_VERSION_1, flags, msg)) == NULL) { 1121 goto create_failed; 1122 } 1123 } 1124 1125 if (fsprops) { 1126 uint64_t zoned; 1127 char *zonestr; 1128 1129 zoned = ((nvlist_lookup_string(fsprops, 1130 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1131 strcmp(zonestr, "on") == 0); 1132 1133 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1134 fsprops, zoned, NULL, NULL, msg)) == NULL) { 1135 goto create_failed; 1136 } 1137 if (!zc_props && 1138 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1139 goto create_failed; 1140 } 1141 if (nvlist_add_nvlist(zc_props, 1142 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1143 goto create_failed; 1144 } 1145 } 1146 1147 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1148 goto create_failed; 1149 1150 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1151 1152 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1153 1154 zcmd_free_nvlists(&zc); 1155 nvlist_free(zc_props); 1156 nvlist_free(zc_fsprops); 1157 1158 switch (errno) { 1159 case EBUSY: 1160 /* 1161 * This can happen if the user has specified the same 1162 * device multiple times. We can't reliably detect this 1163 * until we try to add it and see we already have a 1164 * label. 1165 */ 1166 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1167 "one or more vdevs refer to the same device")); 1168 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1169 1170 case ERANGE: 1171 /* 1172 * This happens if the record size is smaller or larger 1173 * than the allowed size range, or not a power of 2. 1174 * 1175 * NOTE: although zfs_valid_proplist is called earlier, 1176 * this case may have slipped through since the 1177 * pool does not exist yet and it is therefore 1178 * impossible to read properties e.g. max blocksize 1179 * from the pool. 1180 */ 1181 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1182 "record size invalid")); 1183 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1184 1185 case EOVERFLOW: 1186 /* 1187 * This occurs when one of the devices is below 1188 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1189 * device was the problem device since there's no 1190 * reliable way to determine device size from userland. 1191 */ 1192 { 1193 char buf[64]; 1194 1195 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1196 1197 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1198 "one or more devices is less than the " 1199 "minimum size (%s)"), buf); 1200 } 1201 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1202 1203 case ENOSPC: 1204 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1205 "one or more devices is out of space")); 1206 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1207 1208 case ENOTBLK: 1209 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1210 "cache device must be a disk or disk slice")); 1211 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1212 1213 default: 1214 return (zpool_standard_error(hdl, errno, msg)); 1215 } 1216 } 1217 1218 create_failed: 1219 zcmd_free_nvlists(&zc); 1220 nvlist_free(zc_props); 1221 nvlist_free(zc_fsprops); 1222 return (ret); 1223 } 1224 1225 /* 1226 * Destroy the given pool. It is up to the caller to ensure that there are no 1227 * datasets left in the pool. 1228 */ 1229 int 1230 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1231 { 1232 zfs_cmd_t zc = { 0 }; 1233 zfs_handle_t *zfp = NULL; 1234 libzfs_handle_t *hdl = zhp->zpool_hdl; 1235 char msg[1024]; 1236 1237 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1238 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1239 return (-1); 1240 1241 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1242 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1243 1244 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1245 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1246 "cannot destroy '%s'"), zhp->zpool_name); 1247 1248 if (errno == EROFS) { 1249 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1250 "one or more devices is read only")); 1251 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1252 } else { 1253 (void) zpool_standard_error(hdl, errno, msg); 1254 } 1255 1256 if (zfp) 1257 zfs_close(zfp); 1258 return (-1); 1259 } 1260 1261 if (zfp) { 1262 remove_mountpoint(zfp); 1263 zfs_close(zfp); 1264 } 1265 1266 return (0); 1267 } 1268 1269 /* 1270 * Add the given vdevs to the pool. The caller must have already performed the 1271 * necessary verification to ensure that the vdev specification is well-formed. 1272 */ 1273 int 1274 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1275 { 1276 zfs_cmd_t zc = { 0 }; 1277 int ret; 1278 libzfs_handle_t *hdl = zhp->zpool_hdl; 1279 char msg[1024]; 1280 nvlist_t **spares, **l2cache; 1281 uint_t nspares, nl2cache; 1282 1283 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1284 "cannot add to '%s'"), zhp->zpool_name); 1285 1286 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1287 SPA_VERSION_SPARES && 1288 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1289 &spares, &nspares) == 0) { 1290 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1291 "upgraded to add hot spares")); 1292 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1293 } 1294 1295 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1296 SPA_VERSION_L2CACHE && 1297 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1298 &l2cache, &nl2cache) == 0) { 1299 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1300 "upgraded to add cache devices")); 1301 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1302 } 1303 1304 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1305 return (-1); 1306 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1307 1308 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1309 switch (errno) { 1310 case EBUSY: 1311 /* 1312 * This can happen if the user has specified the same 1313 * device multiple times. We can't reliably detect this 1314 * until we try to add it and see we already have a 1315 * label. 1316 */ 1317 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1318 "one or more vdevs refer to the same device")); 1319 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1320 break; 1321 1322 case EOVERFLOW: 1323 /* 1324 * This occurrs when one of the devices is below 1325 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1326 * device was the problem device since there's no 1327 * reliable way to determine device size from userland. 1328 */ 1329 { 1330 char buf[64]; 1331 1332 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1333 1334 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1335 "device is less than the minimum " 1336 "size (%s)"), buf); 1337 } 1338 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1339 break; 1340 1341 case ENOTSUP: 1342 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1343 "pool must be upgraded to add these vdevs")); 1344 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1345 break; 1346 1347 case EDOM: 1348 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1349 "root pool can not have multiple vdevs" 1350 " or separate logs")); 1351 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1352 break; 1353 1354 case ENOTBLK: 1355 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1356 "cache device must be a disk or disk slice")); 1357 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1358 break; 1359 1360 default: 1361 (void) zpool_standard_error(hdl, errno, msg); 1362 } 1363 1364 ret = -1; 1365 } else { 1366 ret = 0; 1367 } 1368 1369 zcmd_free_nvlists(&zc); 1370 1371 return (ret); 1372 } 1373 1374 /* 1375 * Exports the pool from the system. The caller must ensure that there are no 1376 * mounted datasets in the pool. 1377 */ 1378 static int 1379 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1380 const char *log_str) 1381 { 1382 zfs_cmd_t zc = { 0 }; 1383 char msg[1024]; 1384 1385 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1386 "cannot export '%s'"), zhp->zpool_name); 1387 1388 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1389 zc.zc_cookie = force; 1390 zc.zc_guid = hardforce; 1391 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1392 1393 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1394 switch (errno) { 1395 case EXDEV: 1396 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1397 "use '-f' to override the following errors:\n" 1398 "'%s' has an active shared spare which could be" 1399 " used by other pools once '%s' is exported."), 1400 zhp->zpool_name, zhp->zpool_name); 1401 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1402 msg)); 1403 default: 1404 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1405 msg)); 1406 } 1407 } 1408 1409 return (0); 1410 } 1411 1412 int 1413 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1414 { 1415 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1416 } 1417 1418 int 1419 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1420 { 1421 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1422 } 1423 1424 static void 1425 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1426 nvlist_t *config) 1427 { 1428 nvlist_t *nv = NULL; 1429 uint64_t rewindto; 1430 int64_t loss = -1; 1431 struct tm t; 1432 char timestr[128]; 1433 1434 if (!hdl->libzfs_printerr || config == NULL) 1435 return; 1436 1437 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1438 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1439 return; 1440 } 1441 1442 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1443 return; 1444 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1445 1446 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1447 strftime(timestr, 128, 0, &t) != 0) { 1448 if (dryrun) { 1449 (void) printf(dgettext(TEXT_DOMAIN, 1450 "Would be able to return %s " 1451 "to its state as of %s.\n"), 1452 name, timestr); 1453 } else { 1454 (void) printf(dgettext(TEXT_DOMAIN, 1455 "Pool %s returned to its state as of %s.\n"), 1456 name, timestr); 1457 } 1458 if (loss > 120) { 1459 (void) printf(dgettext(TEXT_DOMAIN, 1460 "%s approximately %lld "), 1461 dryrun ? "Would discard" : "Discarded", 1462 (loss + 30) / 60); 1463 (void) printf(dgettext(TEXT_DOMAIN, 1464 "minutes of transactions.\n")); 1465 } else if (loss > 0) { 1466 (void) printf(dgettext(TEXT_DOMAIN, 1467 "%s approximately %lld "), 1468 dryrun ? "Would discard" : "Discarded", loss); 1469 (void) printf(dgettext(TEXT_DOMAIN, 1470 "seconds of transactions.\n")); 1471 } 1472 } 1473 } 1474 1475 void 1476 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1477 nvlist_t *config) 1478 { 1479 nvlist_t *nv = NULL; 1480 int64_t loss = -1; 1481 uint64_t edata = UINT64_MAX; 1482 uint64_t rewindto; 1483 struct tm t; 1484 char timestr[128]; 1485 1486 if (!hdl->libzfs_printerr) 1487 return; 1488 1489 if (reason >= 0) 1490 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1491 else 1492 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1493 1494 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1495 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1496 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1497 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1498 goto no_info; 1499 1500 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1501 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1502 &edata); 1503 1504 (void) printf(dgettext(TEXT_DOMAIN, 1505 "Recovery is possible, but will result in some data loss.\n")); 1506 1507 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1508 strftime(timestr, 128, 0, &t) != 0) { 1509 (void) printf(dgettext(TEXT_DOMAIN, 1510 "\tReturning the pool to its state as of %s\n" 1511 "\tshould correct the problem. "), 1512 timestr); 1513 } else { 1514 (void) printf(dgettext(TEXT_DOMAIN, 1515 "\tReverting the pool to an earlier state " 1516 "should correct the problem.\n\t")); 1517 } 1518 1519 if (loss > 120) { 1520 (void) printf(dgettext(TEXT_DOMAIN, 1521 "Approximately %lld minutes of data\n" 1522 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1523 } else if (loss > 0) { 1524 (void) printf(dgettext(TEXT_DOMAIN, 1525 "Approximately %lld seconds of data\n" 1526 "\tmust be discarded, irreversibly. "), loss); 1527 } 1528 if (edata != 0 && edata != UINT64_MAX) { 1529 if (edata == 1) { 1530 (void) printf(dgettext(TEXT_DOMAIN, 1531 "After rewind, at least\n" 1532 "\tone persistent user-data error will remain. ")); 1533 } else { 1534 (void) printf(dgettext(TEXT_DOMAIN, 1535 "After rewind, several\n" 1536 "\tpersistent user-data errors will remain. ")); 1537 } 1538 } 1539 (void) printf(dgettext(TEXT_DOMAIN, 1540 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1541 reason >= 0 ? "clear" : "import", name); 1542 1543 (void) printf(dgettext(TEXT_DOMAIN, 1544 "A scrub of the pool\n" 1545 "\tis strongly recommended after recovery.\n")); 1546 return; 1547 1548 no_info: 1549 (void) printf(dgettext(TEXT_DOMAIN, 1550 "Destroy and re-create the pool from\n\ta backup source.\n")); 1551 } 1552 1553 /* 1554 * zpool_import() is a contracted interface. Should be kept the same 1555 * if possible. 1556 * 1557 * Applications should use zpool_import_props() to import a pool with 1558 * new properties value to be set. 1559 */ 1560 int 1561 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1562 char *altroot) 1563 { 1564 nvlist_t *props = NULL; 1565 int ret; 1566 1567 if (altroot != NULL) { 1568 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1569 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1570 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1571 newname)); 1572 } 1573 1574 if (nvlist_add_string(props, 1575 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1576 nvlist_add_string(props, 1577 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1578 nvlist_free(props); 1579 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1580 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1581 newname)); 1582 } 1583 } 1584 1585 ret = zpool_import_props(hdl, config, newname, props, 1586 ZFS_IMPORT_NORMAL); 1587 nvlist_free(props); 1588 return (ret); 1589 } 1590 1591 static void 1592 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1593 int indent) 1594 { 1595 nvlist_t **child; 1596 uint_t c, children; 1597 char *vname; 1598 uint64_t is_log = 0; 1599 1600 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1601 &is_log); 1602 1603 if (name != NULL) 1604 (void) printf("\t%*s%s%s\n", indent, "", name, 1605 is_log ? " [log]" : ""); 1606 1607 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1608 &child, &children) != 0) 1609 return; 1610 1611 for (c = 0; c < children; c++) { 1612 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1613 print_vdev_tree(hdl, vname, child[c], indent + 2); 1614 free(vname); 1615 } 1616 } 1617 1618 void 1619 zpool_print_unsup_feat(nvlist_t *config) 1620 { 1621 nvlist_t *nvinfo, *unsup_feat; 1622 1623 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1624 0); 1625 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1626 &unsup_feat) == 0); 1627 1628 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1629 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1630 char *desc; 1631 1632 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1633 verify(nvpair_value_string(nvp, &desc) == 0); 1634 1635 if (strlen(desc) > 0) 1636 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1637 else 1638 (void) printf("\t%s\n", nvpair_name(nvp)); 1639 } 1640 } 1641 1642 /* 1643 * Import the given pool using the known configuration and a list of 1644 * properties to be set. The configuration should have come from 1645 * zpool_find_import(). The 'newname' parameters control whether the pool 1646 * is imported with a different name. 1647 */ 1648 int 1649 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1650 nvlist_t *props, int flags) 1651 { 1652 zfs_cmd_t zc = { 0 }; 1653 zpool_rewind_policy_t policy; 1654 nvlist_t *nv = NULL; 1655 nvlist_t *nvinfo = NULL; 1656 nvlist_t *missing = NULL; 1657 char *thename; 1658 char *origname; 1659 int ret; 1660 int error = 0; 1661 char errbuf[1024]; 1662 1663 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1664 &origname) == 0); 1665 1666 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1667 "cannot import pool '%s'"), origname); 1668 1669 if (newname != NULL) { 1670 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1671 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1672 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1673 newname)); 1674 thename = (char *)newname; 1675 } else { 1676 thename = origname; 1677 } 1678 1679 if (props != NULL) { 1680 uint64_t version; 1681 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1682 1683 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1684 &version) == 0); 1685 1686 if ((props = zpool_valid_proplist(hdl, origname, 1687 props, version, flags, errbuf)) == NULL) 1688 return (-1); 1689 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1690 nvlist_free(props); 1691 return (-1); 1692 } 1693 nvlist_free(props); 1694 } 1695 1696 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1697 1698 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1699 &zc.zc_guid) == 0); 1700 1701 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1702 zcmd_free_nvlists(&zc); 1703 return (-1); 1704 } 1705 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1706 zcmd_free_nvlists(&zc); 1707 return (-1); 1708 } 1709 1710 zc.zc_cookie = flags; 1711 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1712 errno == ENOMEM) { 1713 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1714 zcmd_free_nvlists(&zc); 1715 return (-1); 1716 } 1717 } 1718 if (ret != 0) 1719 error = errno; 1720 1721 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1722 1723 zcmd_free_nvlists(&zc); 1724 1725 zpool_get_rewind_policy(config, &policy); 1726 1727 if (error) { 1728 char desc[1024]; 1729 1730 /* 1731 * Dry-run failed, but we print out what success 1732 * looks like if we found a best txg 1733 */ 1734 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1735 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1736 B_TRUE, nv); 1737 nvlist_free(nv); 1738 return (-1); 1739 } 1740 1741 if (newname == NULL) 1742 (void) snprintf(desc, sizeof (desc), 1743 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1744 thename); 1745 else 1746 (void) snprintf(desc, sizeof (desc), 1747 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1748 origname, thename); 1749 1750 switch (error) { 1751 case ENOTSUP: 1752 if (nv != NULL && nvlist_lookup_nvlist(nv, 1753 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1754 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1755 (void) printf(dgettext(TEXT_DOMAIN, "This " 1756 "pool uses the following feature(s) not " 1757 "supported by this system:\n")); 1758 zpool_print_unsup_feat(nv); 1759 if (nvlist_exists(nvinfo, 1760 ZPOOL_CONFIG_CAN_RDONLY)) { 1761 (void) printf(dgettext(TEXT_DOMAIN, 1762 "All unsupported features are only " 1763 "required for writing to the pool." 1764 "\nThe pool can be imported using " 1765 "'-o readonly=on'.\n")); 1766 } 1767 } 1768 /* 1769 * Unsupported version. 1770 */ 1771 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1772 break; 1773 1774 case EINVAL: 1775 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1776 break; 1777 1778 case EROFS: 1779 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1780 "one or more devices is read only")); 1781 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1782 break; 1783 1784 case ENXIO: 1785 if (nv && nvlist_lookup_nvlist(nv, 1786 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1787 nvlist_lookup_nvlist(nvinfo, 1788 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1789 (void) printf(dgettext(TEXT_DOMAIN, 1790 "The devices below are missing, use " 1791 "'-m' to import the pool anyway:\n")); 1792 print_vdev_tree(hdl, NULL, missing, 2); 1793 (void) printf("\n"); 1794 } 1795 (void) zpool_standard_error(hdl, error, desc); 1796 break; 1797 1798 case EEXIST: 1799 (void) zpool_standard_error(hdl, error, desc); 1800 break; 1801 case ENAMETOOLONG: 1802 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1803 "new name of at least one dataset is longer than " 1804 "the maximum allowable length")); 1805 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 1806 break; 1807 default: 1808 (void) zpool_standard_error(hdl, error, desc); 1809 zpool_explain_recover(hdl, 1810 newname ? origname : thename, -error, nv); 1811 break; 1812 } 1813 1814 nvlist_free(nv); 1815 ret = -1; 1816 } else { 1817 zpool_handle_t *zhp; 1818 1819 /* 1820 * This should never fail, but play it safe anyway. 1821 */ 1822 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1823 ret = -1; 1824 else if (zhp != NULL) 1825 zpool_close(zhp); 1826 if (policy.zrp_request & 1827 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1828 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1829 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1830 } 1831 nvlist_free(nv); 1832 return (0); 1833 } 1834 1835 return (ret); 1836 } 1837 1838 /* 1839 * Scan the pool. 1840 */ 1841 int 1842 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1843 { 1844 zfs_cmd_t zc = { 0 }; 1845 char msg[1024]; 1846 libzfs_handle_t *hdl = zhp->zpool_hdl; 1847 1848 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1849 zc.zc_cookie = func; 1850 1851 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1852 (errno == ENOENT && func != POOL_SCAN_NONE)) 1853 return (0); 1854 1855 if (func == POOL_SCAN_SCRUB) { 1856 (void) snprintf(msg, sizeof (msg), 1857 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1858 } else if (func == POOL_SCAN_NONE) { 1859 (void) snprintf(msg, sizeof (msg), 1860 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1861 zc.zc_name); 1862 } else { 1863 assert(!"unexpected result"); 1864 } 1865 1866 if (errno == EBUSY) { 1867 nvlist_t *nvroot; 1868 pool_scan_stat_t *ps = NULL; 1869 uint_t psc; 1870 1871 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1872 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1873 (void) nvlist_lookup_uint64_array(nvroot, 1874 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1875 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1876 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1877 else 1878 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1879 } else if (errno == ENOENT) { 1880 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1881 } else { 1882 return (zpool_standard_error(hdl, errno, msg)); 1883 } 1884 } 1885 1886 /* 1887 * This provides a very minimal check whether a given string is likely a 1888 * c#t#d# style string. Users of this are expected to do their own 1889 * verification of the s# part. 1890 */ 1891 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1892 1893 /* 1894 * More elaborate version for ones which may start with "/dev/dsk/" 1895 * and the like. 1896 */ 1897 static int 1898 ctd_check_path(char *str) 1899 { 1900 /* 1901 * If it starts with a slash, check the last component. 1902 */ 1903 if (str && str[0] == '/') { 1904 char *tmp = strrchr(str, '/'); 1905 1906 /* 1907 * If it ends in "/old", check the second-to-last 1908 * component of the string instead. 1909 */ 1910 if (tmp != str && strcmp(tmp, "/old") == 0) { 1911 for (tmp--; *tmp != '/'; tmp--) 1912 ; 1913 } 1914 str = tmp + 1; 1915 } 1916 return (CTD_CHECK(str)); 1917 } 1918 1919 /* 1920 * Find a vdev that matches the search criteria specified. We use the 1921 * the nvpair name to determine how we should look for the device. 1922 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1923 * spare; but FALSE if its an INUSE spare. 1924 */ 1925 static nvlist_t * 1926 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1927 boolean_t *l2cache, boolean_t *log) 1928 { 1929 uint_t c, children; 1930 nvlist_t **child; 1931 nvlist_t *ret; 1932 uint64_t is_log; 1933 char *srchkey; 1934 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1935 1936 /* Nothing to look for */ 1937 if (search == NULL || pair == NULL) 1938 return (NULL); 1939 1940 /* Obtain the key we will use to search */ 1941 srchkey = nvpair_name(pair); 1942 1943 switch (nvpair_type(pair)) { 1944 case DATA_TYPE_UINT64: 1945 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1946 uint64_t srchval, theguid; 1947 1948 verify(nvpair_value_uint64(pair, &srchval) == 0); 1949 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1950 &theguid) == 0); 1951 if (theguid == srchval) 1952 return (nv); 1953 } 1954 break; 1955 1956 case DATA_TYPE_STRING: { 1957 char *srchval, *val; 1958 1959 verify(nvpair_value_string(pair, &srchval) == 0); 1960 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1961 break; 1962 1963 /* 1964 * Search for the requested value. Special cases: 1965 * 1966 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1967 * "s0" or "s0/old". The "s0" part is hidden from the user, 1968 * but included in the string, so this matches around it. 1969 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1970 * 1971 * Otherwise, all other searches are simple string compares. 1972 */ 1973 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1974 ctd_check_path(val)) { 1975 uint64_t wholedisk = 0; 1976 1977 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1978 &wholedisk); 1979 if (wholedisk) { 1980 int slen = strlen(srchval); 1981 int vlen = strlen(val); 1982 1983 if (slen != vlen - 2) 1984 break; 1985 1986 /* 1987 * make_leaf_vdev() should only set 1988 * wholedisk for ZPOOL_CONFIG_PATHs which 1989 * will include "/dev/dsk/", giving plenty of 1990 * room for the indices used next. 1991 */ 1992 ASSERT(vlen >= 6); 1993 1994 /* 1995 * strings identical except trailing "s0" 1996 */ 1997 if (strcmp(&val[vlen - 2], "s0") == 0 && 1998 strncmp(srchval, val, slen) == 0) 1999 return (nv); 2000 2001 /* 2002 * strings identical except trailing "s0/old" 2003 */ 2004 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 2005 strcmp(&srchval[slen - 4], "/old") == 0 && 2006 strncmp(srchval, val, slen - 4) == 0) 2007 return (nv); 2008 2009 break; 2010 } 2011 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2012 char *type, *idx, *end, *p; 2013 uint64_t id, vdev_id; 2014 2015 /* 2016 * Determine our vdev type, keeping in mind 2017 * that the srchval is composed of a type and 2018 * vdev id pair (i.e. mirror-4). 2019 */ 2020 if ((type = strdup(srchval)) == NULL) 2021 return (NULL); 2022 2023 if ((p = strrchr(type, '-')) == NULL) { 2024 free(type); 2025 break; 2026 } 2027 idx = p + 1; 2028 *p = '\0'; 2029 2030 /* 2031 * If the types don't match then keep looking. 2032 */ 2033 if (strncmp(val, type, strlen(val)) != 0) { 2034 free(type); 2035 break; 2036 } 2037 2038 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2039 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2040 strncmp(type, VDEV_TYPE_MIRROR, 2041 strlen(VDEV_TYPE_MIRROR)) == 0); 2042 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2043 &id) == 0); 2044 2045 errno = 0; 2046 vdev_id = strtoull(idx, &end, 10); 2047 2048 free(type); 2049 if (errno != 0) 2050 return (NULL); 2051 2052 /* 2053 * Now verify that we have the correct vdev id. 2054 */ 2055 if (vdev_id == id) 2056 return (nv); 2057 } 2058 2059 /* 2060 * Common case 2061 */ 2062 if (strcmp(srchval, val) == 0) 2063 return (nv); 2064 break; 2065 } 2066 2067 default: 2068 break; 2069 } 2070 2071 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2072 &child, &children) != 0) 2073 return (NULL); 2074 2075 for (c = 0; c < children; c++) { 2076 if ((ret = vdev_to_nvlist_iter(child[c], search, 2077 avail_spare, l2cache, NULL)) != NULL) { 2078 /* 2079 * The 'is_log' value is only set for the toplevel 2080 * vdev, not the leaf vdevs. So we always lookup the 2081 * log device from the root of the vdev tree (where 2082 * 'log' is non-NULL). 2083 */ 2084 if (log != NULL && 2085 nvlist_lookup_uint64(child[c], 2086 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2087 is_log) { 2088 *log = B_TRUE; 2089 } 2090 return (ret); 2091 } 2092 } 2093 2094 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2095 &child, &children) == 0) { 2096 for (c = 0; c < children; c++) { 2097 if ((ret = vdev_to_nvlist_iter(child[c], search, 2098 avail_spare, l2cache, NULL)) != NULL) { 2099 *avail_spare = B_TRUE; 2100 return (ret); 2101 } 2102 } 2103 } 2104 2105 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2106 &child, &children) == 0) { 2107 for (c = 0; c < children; c++) { 2108 if ((ret = vdev_to_nvlist_iter(child[c], search, 2109 avail_spare, l2cache, NULL)) != NULL) { 2110 *l2cache = B_TRUE; 2111 return (ret); 2112 } 2113 } 2114 } 2115 2116 return (NULL); 2117 } 2118 2119 /* 2120 * Given a physical path (minus the "/devices" prefix), find the 2121 * associated vdev. 2122 */ 2123 nvlist_t * 2124 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2125 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2126 { 2127 nvlist_t *search, *nvroot, *ret; 2128 2129 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2130 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2131 2132 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2133 &nvroot) == 0); 2134 2135 *avail_spare = B_FALSE; 2136 *l2cache = B_FALSE; 2137 if (log != NULL) 2138 *log = B_FALSE; 2139 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2140 nvlist_free(search); 2141 2142 return (ret); 2143 } 2144 2145 /* 2146 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2147 */ 2148 boolean_t 2149 zpool_vdev_is_interior(const char *name) 2150 { 2151 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2152 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2153 return (B_TRUE); 2154 return (B_FALSE); 2155 } 2156 2157 nvlist_t * 2158 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2159 boolean_t *l2cache, boolean_t *log) 2160 { 2161 char buf[MAXPATHLEN]; 2162 char *end; 2163 nvlist_t *nvroot, *search, *ret; 2164 uint64_t guid; 2165 2166 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2167 2168 guid = strtoull(path, &end, 10); 2169 if (guid != 0 && *end == '\0') { 2170 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2171 } else if (zpool_vdev_is_interior(path)) { 2172 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2173 } else if (path[0] != '/') { 2174 (void) snprintf(buf, sizeof (buf), "%s/%s", ZFS_DISK_ROOT, 2175 path); 2176 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2177 } else { 2178 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2179 } 2180 2181 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2182 &nvroot) == 0); 2183 2184 *avail_spare = B_FALSE; 2185 *l2cache = B_FALSE; 2186 if (log != NULL) 2187 *log = B_FALSE; 2188 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2189 nvlist_free(search); 2190 2191 return (ret); 2192 } 2193 2194 static int 2195 vdev_online(nvlist_t *nv) 2196 { 2197 uint64_t ival; 2198 2199 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2200 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2201 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2202 return (0); 2203 2204 return (1); 2205 } 2206 2207 /* 2208 * Helper function for zpool_get_physpaths(). 2209 */ 2210 static int 2211 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2212 size_t *bytes_written) 2213 { 2214 size_t bytes_left, pos, rsz; 2215 char *tmppath; 2216 const char *format; 2217 2218 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2219 &tmppath) != 0) 2220 return (EZFS_NODEVICE); 2221 2222 pos = *bytes_written; 2223 bytes_left = physpath_size - pos; 2224 format = (pos == 0) ? "%s" : " %s"; 2225 2226 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2227 *bytes_written += rsz; 2228 2229 if (rsz >= bytes_left) { 2230 /* if physpath was not copied properly, clear it */ 2231 if (bytes_left != 0) { 2232 physpath[pos] = 0; 2233 } 2234 return (EZFS_NOSPC); 2235 } 2236 return (0); 2237 } 2238 2239 static int 2240 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2241 size_t *rsz, boolean_t is_spare) 2242 { 2243 char *type; 2244 int ret; 2245 2246 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2247 return (EZFS_INVALCONFIG); 2248 2249 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2250 /* 2251 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2252 * For a spare vdev, we only want to boot from the active 2253 * spare device. 2254 */ 2255 if (is_spare) { 2256 uint64_t spare = 0; 2257 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2258 &spare); 2259 if (!spare) 2260 return (EZFS_INVALCONFIG); 2261 } 2262 2263 if (vdev_online(nv)) { 2264 if ((ret = vdev_get_one_physpath(nv, physpath, 2265 phypath_size, rsz)) != 0) 2266 return (ret); 2267 } 2268 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2269 strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2270 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2271 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2272 nvlist_t **child; 2273 uint_t count; 2274 int i, ret; 2275 2276 if (nvlist_lookup_nvlist_array(nv, 2277 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2278 return (EZFS_INVALCONFIG); 2279 2280 for (i = 0; i < count; i++) { 2281 ret = vdev_get_physpaths(child[i], physpath, 2282 phypath_size, rsz, is_spare); 2283 if (ret == EZFS_NOSPC) 2284 return (ret); 2285 } 2286 } 2287 2288 return (EZFS_POOL_INVALARG); 2289 } 2290 2291 /* 2292 * Get phys_path for a root pool config. 2293 * Return 0 on success; non-zero on failure. 2294 */ 2295 static int 2296 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2297 { 2298 size_t rsz; 2299 nvlist_t *vdev_root; 2300 nvlist_t **child; 2301 uint_t count; 2302 char *type; 2303 2304 rsz = 0; 2305 2306 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2307 &vdev_root) != 0) 2308 return (EZFS_INVALCONFIG); 2309 2310 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2311 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2312 &child, &count) != 0) 2313 return (EZFS_INVALCONFIG); 2314 2315 /* 2316 * root pool can only have a single top-level vdev. 2317 */ 2318 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2319 return (EZFS_POOL_INVALARG); 2320 2321 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2322 B_FALSE); 2323 2324 /* No online devices */ 2325 if (rsz == 0) 2326 return (EZFS_NODEVICE); 2327 2328 return (0); 2329 } 2330 2331 /* 2332 * Get phys_path for a root pool 2333 * Return 0 on success; non-zero on failure. 2334 */ 2335 int 2336 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2337 { 2338 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2339 phypath_size)); 2340 } 2341 2342 /* 2343 * If the device has being dynamically expanded then we need to relabel 2344 * the disk to use the new unallocated space. 2345 */ 2346 static int 2347 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2348 { 2349 char path[MAXPATHLEN]; 2350 char errbuf[1024]; 2351 int fd, error; 2352 int (*_efi_use_whole_disk)(int); 2353 2354 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2355 "efi_use_whole_disk")) == NULL) 2356 return (-1); 2357 2358 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name); 2359 2360 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2361 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2362 "relabel '%s': unable to open device"), name); 2363 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2364 } 2365 2366 /* 2367 * It's possible that we might encounter an error if the device 2368 * does not have any unallocated space left. If so, we simply 2369 * ignore that error and continue on. 2370 */ 2371 error = _efi_use_whole_disk(fd); 2372 (void) close(fd); 2373 if (error && error != VT_ENOSPC) { 2374 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2375 "relabel '%s': unable to read disk capacity"), name); 2376 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2377 } 2378 return (0); 2379 } 2380 2381 /* 2382 * Bring the specified vdev online. The 'flags' parameter is a set of the 2383 * ZFS_ONLINE_* flags. 2384 */ 2385 int 2386 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2387 vdev_state_t *newstate) 2388 { 2389 zfs_cmd_t zc = { 0 }; 2390 char msg[1024]; 2391 nvlist_t *tgt; 2392 boolean_t avail_spare, l2cache, islog; 2393 libzfs_handle_t *hdl = zhp->zpool_hdl; 2394 2395 if (flags & ZFS_ONLINE_EXPAND) { 2396 (void) snprintf(msg, sizeof (msg), 2397 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2398 } else { 2399 (void) snprintf(msg, sizeof (msg), 2400 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2401 } 2402 2403 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2404 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2405 &islog)) == NULL) 2406 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2407 2408 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2409 2410 if (avail_spare) 2411 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2412 2413 if (flags & ZFS_ONLINE_EXPAND || 2414 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2415 char *pathname = NULL; 2416 uint64_t wholedisk = 0; 2417 2418 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2419 &wholedisk); 2420 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2421 &pathname) == 0); 2422 2423 /* 2424 * XXX - L2ARC 1.0 devices can't support expansion. 2425 */ 2426 if (l2cache) { 2427 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2428 "cannot expand cache devices")); 2429 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2430 } 2431 2432 if (wholedisk) { 2433 pathname += strlen(ZFS_DISK_ROOT) + 1; 2434 (void) zpool_relabel_disk(hdl, pathname); 2435 } 2436 } 2437 2438 zc.zc_cookie = VDEV_STATE_ONLINE; 2439 zc.zc_obj = flags; 2440 2441 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2442 if (errno == EINVAL) { 2443 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2444 "from this pool into a new one. Use '%s' " 2445 "instead"), "zpool detach"); 2446 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2447 } 2448 return (zpool_standard_error(hdl, errno, msg)); 2449 } 2450 2451 *newstate = zc.zc_cookie; 2452 return (0); 2453 } 2454 2455 /* 2456 * Take the specified vdev offline 2457 */ 2458 int 2459 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2460 { 2461 zfs_cmd_t zc = { 0 }; 2462 char msg[1024]; 2463 nvlist_t *tgt; 2464 boolean_t avail_spare, l2cache; 2465 libzfs_handle_t *hdl = zhp->zpool_hdl; 2466 2467 (void) snprintf(msg, sizeof (msg), 2468 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2469 2470 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2471 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2472 NULL)) == NULL) 2473 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2474 2475 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2476 2477 if (avail_spare) 2478 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2479 2480 zc.zc_cookie = VDEV_STATE_OFFLINE; 2481 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2482 2483 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2484 return (0); 2485 2486 switch (errno) { 2487 case EBUSY: 2488 2489 /* 2490 * There are no other replicas of this device. 2491 */ 2492 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2493 2494 case EEXIST: 2495 /* 2496 * The log device has unplayed logs 2497 */ 2498 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2499 2500 default: 2501 return (zpool_standard_error(hdl, errno, msg)); 2502 } 2503 } 2504 2505 /* 2506 * Mark the given vdev faulted. 2507 */ 2508 int 2509 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2510 { 2511 zfs_cmd_t zc = { 0 }; 2512 char msg[1024]; 2513 libzfs_handle_t *hdl = zhp->zpool_hdl; 2514 2515 (void) snprintf(msg, sizeof (msg), 2516 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2517 2518 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2519 zc.zc_guid = guid; 2520 zc.zc_cookie = VDEV_STATE_FAULTED; 2521 zc.zc_obj = aux; 2522 2523 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2524 return (0); 2525 2526 switch (errno) { 2527 case EBUSY: 2528 2529 /* 2530 * There are no other replicas of this device. 2531 */ 2532 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2533 2534 default: 2535 return (zpool_standard_error(hdl, errno, msg)); 2536 } 2537 2538 } 2539 2540 /* 2541 * Mark the given vdev degraded. 2542 */ 2543 int 2544 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2545 { 2546 zfs_cmd_t zc = { 0 }; 2547 char msg[1024]; 2548 libzfs_handle_t *hdl = zhp->zpool_hdl; 2549 2550 (void) snprintf(msg, sizeof (msg), 2551 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2552 2553 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2554 zc.zc_guid = guid; 2555 zc.zc_cookie = VDEV_STATE_DEGRADED; 2556 zc.zc_obj = aux; 2557 2558 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2559 return (0); 2560 2561 return (zpool_standard_error(hdl, errno, msg)); 2562 } 2563 2564 /* 2565 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2566 * a hot spare. 2567 */ 2568 static boolean_t 2569 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2570 { 2571 nvlist_t **child; 2572 uint_t c, children; 2573 char *type; 2574 2575 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2576 &children) == 0) { 2577 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2578 &type) == 0); 2579 2580 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2581 children == 2 && child[which] == tgt) 2582 return (B_TRUE); 2583 2584 for (c = 0; c < children; c++) 2585 if (is_replacing_spare(child[c], tgt, which)) 2586 return (B_TRUE); 2587 } 2588 2589 return (B_FALSE); 2590 } 2591 2592 /* 2593 * Attach new_disk (fully described by nvroot) to old_disk. 2594 * If 'replacing' is specified, the new disk will replace the old one. 2595 */ 2596 int 2597 zpool_vdev_attach(zpool_handle_t *zhp, 2598 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2599 { 2600 zfs_cmd_t zc = { 0 }; 2601 char msg[1024]; 2602 int ret; 2603 nvlist_t *tgt; 2604 boolean_t avail_spare, l2cache, islog; 2605 uint64_t val; 2606 char *newname; 2607 nvlist_t **child; 2608 uint_t children; 2609 nvlist_t *config_root; 2610 libzfs_handle_t *hdl = zhp->zpool_hdl; 2611 boolean_t rootpool = zpool_is_bootable(zhp); 2612 2613 if (replacing) 2614 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2615 "cannot replace %s with %s"), old_disk, new_disk); 2616 else 2617 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2618 "cannot attach %s to %s"), new_disk, old_disk); 2619 2620 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2621 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2622 &islog)) == 0) 2623 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2624 2625 if (avail_spare) 2626 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2627 2628 if (l2cache) 2629 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2630 2631 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2632 zc.zc_cookie = replacing; 2633 2634 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2635 &child, &children) != 0 || children != 1) { 2636 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2637 "new device must be a single disk")); 2638 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2639 } 2640 2641 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2642 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2643 2644 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2645 return (-1); 2646 2647 /* 2648 * If the target is a hot spare that has been swapped in, we can only 2649 * replace it with another hot spare. 2650 */ 2651 if (replacing && 2652 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2653 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2654 NULL) == NULL || !avail_spare) && 2655 is_replacing_spare(config_root, tgt, 1)) { 2656 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2657 "can only be replaced by another hot spare")); 2658 free(newname); 2659 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2660 } 2661 2662 free(newname); 2663 2664 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2665 return (-1); 2666 2667 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2668 2669 zcmd_free_nvlists(&zc); 2670 2671 if (ret == 0) { 2672 if (rootpool) { 2673 /* 2674 * XXX need a better way to prevent user from 2675 * booting up a half-baked vdev. 2676 */ 2677 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2678 "sure to wait until resilver is done " 2679 "before rebooting.\n")); 2680 } 2681 return (0); 2682 } 2683 2684 switch (errno) { 2685 case ENOTSUP: 2686 /* 2687 * Can't attach to or replace this type of vdev. 2688 */ 2689 if (replacing) { 2690 uint64_t version = zpool_get_prop_int(zhp, 2691 ZPOOL_PROP_VERSION, NULL); 2692 2693 if (islog) 2694 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2695 "cannot replace a log with a spare")); 2696 else if (version >= SPA_VERSION_MULTI_REPLACE) 2697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2698 "already in replacing/spare config; wait " 2699 "for completion or use 'zpool detach'")); 2700 else 2701 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2702 "cannot replace a replacing device")); 2703 } else { 2704 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2705 "can only attach to mirrors and top-level " 2706 "disks")); 2707 } 2708 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2709 break; 2710 2711 case EINVAL: 2712 /* 2713 * The new device must be a single disk. 2714 */ 2715 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2716 "new device must be a single disk")); 2717 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2718 break; 2719 2720 case EBUSY: 2721 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2722 new_disk); 2723 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2724 break; 2725 2726 case EOVERFLOW: 2727 /* 2728 * The new device is too small. 2729 */ 2730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2731 "device is too small")); 2732 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2733 break; 2734 2735 case EDOM: 2736 /* 2737 * The new device has a different alignment requirement. 2738 */ 2739 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2740 "devices have different sector alignment")); 2741 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2742 break; 2743 2744 case ENAMETOOLONG: 2745 /* 2746 * The resulting top-level vdev spec won't fit in the label. 2747 */ 2748 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2749 break; 2750 2751 default: 2752 (void) zpool_standard_error(hdl, errno, msg); 2753 } 2754 2755 return (-1); 2756 } 2757 2758 /* 2759 * Detach the specified device. 2760 */ 2761 int 2762 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2763 { 2764 zfs_cmd_t zc = { 0 }; 2765 char msg[1024]; 2766 nvlist_t *tgt; 2767 boolean_t avail_spare, l2cache; 2768 libzfs_handle_t *hdl = zhp->zpool_hdl; 2769 2770 (void) snprintf(msg, sizeof (msg), 2771 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2772 2773 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2774 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2775 NULL)) == 0) 2776 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2777 2778 if (avail_spare) 2779 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2780 2781 if (l2cache) 2782 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2783 2784 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2785 2786 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2787 return (0); 2788 2789 switch (errno) { 2790 2791 case ENOTSUP: 2792 /* 2793 * Can't detach from this type of vdev. 2794 */ 2795 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2796 "applicable to mirror and replacing vdevs")); 2797 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2798 break; 2799 2800 case EBUSY: 2801 /* 2802 * There are no other replicas of this device. 2803 */ 2804 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2805 break; 2806 2807 default: 2808 (void) zpool_standard_error(hdl, errno, msg); 2809 } 2810 2811 return (-1); 2812 } 2813 2814 /* 2815 * Find a mirror vdev in the source nvlist. 2816 * 2817 * The mchild array contains a list of disks in one of the top-level mirrors 2818 * of the source pool. The schild array contains a list of disks that the 2819 * user specified on the command line. We loop over the mchild array to 2820 * see if any entry in the schild array matches. 2821 * 2822 * If a disk in the mchild array is found in the schild array, we return 2823 * the index of that entry. Otherwise we return -1. 2824 */ 2825 static int 2826 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2827 nvlist_t **schild, uint_t schildren) 2828 { 2829 uint_t mc; 2830 2831 for (mc = 0; mc < mchildren; mc++) { 2832 uint_t sc; 2833 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2834 mchild[mc], B_FALSE); 2835 2836 for (sc = 0; sc < schildren; sc++) { 2837 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2838 schild[sc], B_FALSE); 2839 boolean_t result = (strcmp(mpath, spath) == 0); 2840 2841 free(spath); 2842 if (result) { 2843 free(mpath); 2844 return (mc); 2845 } 2846 } 2847 2848 free(mpath); 2849 } 2850 2851 return (-1); 2852 } 2853 2854 /* 2855 * Split a mirror pool. If newroot points to null, then a new nvlist 2856 * is generated and it is the responsibility of the caller to free it. 2857 */ 2858 int 2859 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2860 nvlist_t *props, splitflags_t flags) 2861 { 2862 zfs_cmd_t zc = { 0 }; 2863 char msg[1024]; 2864 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2865 nvlist_t **varray = NULL, *zc_props = NULL; 2866 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2867 libzfs_handle_t *hdl = zhp->zpool_hdl; 2868 uint64_t vers; 2869 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2870 int retval = 0; 2871 2872 (void) snprintf(msg, sizeof (msg), 2873 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2874 2875 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2876 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2877 2878 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2879 (void) fprintf(stderr, gettext("Internal error: unable to " 2880 "retrieve pool configuration\n")); 2881 return (-1); 2882 } 2883 2884 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2885 == 0); 2886 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2887 2888 if (props) { 2889 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2890 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2891 props, vers, flags, msg)) == NULL) 2892 return (-1); 2893 } 2894 2895 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2896 &children) != 0) { 2897 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2898 "Source pool is missing vdev tree")); 2899 nvlist_free(zc_props); 2900 return (-1); 2901 } 2902 2903 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2904 vcount = 0; 2905 2906 if (*newroot == NULL || 2907 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2908 &newchild, &newchildren) != 0) 2909 newchildren = 0; 2910 2911 for (c = 0; c < children; c++) { 2912 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2913 char *type; 2914 nvlist_t **mchild, *vdev; 2915 uint_t mchildren; 2916 int entry; 2917 2918 /* 2919 * Unlike cache & spares, slogs are stored in the 2920 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2921 */ 2922 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2923 &is_log); 2924 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2925 &is_hole); 2926 if (is_log || is_hole) { 2927 /* 2928 * Create a hole vdev and put it in the config. 2929 */ 2930 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2931 goto out; 2932 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2933 VDEV_TYPE_HOLE) != 0) 2934 goto out; 2935 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2936 1) != 0) 2937 goto out; 2938 if (lastlog == 0) 2939 lastlog = vcount; 2940 varray[vcount++] = vdev; 2941 continue; 2942 } 2943 lastlog = 0; 2944 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2945 == 0); 2946 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2947 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2948 "Source pool must be composed only of mirrors\n")); 2949 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2950 goto out; 2951 } 2952 2953 verify(nvlist_lookup_nvlist_array(child[c], 2954 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2955 2956 /* find or add an entry for this top-level vdev */ 2957 if (newchildren > 0 && 2958 (entry = find_vdev_entry(zhp, mchild, mchildren, 2959 newchild, newchildren)) >= 0) { 2960 /* We found a disk that the user specified. */ 2961 vdev = mchild[entry]; 2962 ++found; 2963 } else { 2964 /* User didn't specify a disk for this vdev. */ 2965 vdev = mchild[mchildren - 1]; 2966 } 2967 2968 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2969 goto out; 2970 } 2971 2972 /* did we find every disk the user specified? */ 2973 if (found != newchildren) { 2974 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2975 "include at most one disk from each mirror")); 2976 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2977 goto out; 2978 } 2979 2980 /* Prepare the nvlist for populating. */ 2981 if (*newroot == NULL) { 2982 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2983 goto out; 2984 freelist = B_TRUE; 2985 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2986 VDEV_TYPE_ROOT) != 0) 2987 goto out; 2988 } else { 2989 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 2990 } 2991 2992 /* Add all the children we found */ 2993 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 2994 lastlog == 0 ? vcount : lastlog) != 0) 2995 goto out; 2996 2997 /* 2998 * If we're just doing a dry run, exit now with success. 2999 */ 3000 if (flags.dryrun) { 3001 memory_err = B_FALSE; 3002 freelist = B_FALSE; 3003 goto out; 3004 } 3005 3006 /* now build up the config list & call the ioctl */ 3007 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3008 goto out; 3009 3010 if (nvlist_add_nvlist(newconfig, 3011 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3012 nvlist_add_string(newconfig, 3013 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3014 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3015 goto out; 3016 3017 /* 3018 * The new pool is automatically part of the namespace unless we 3019 * explicitly export it. 3020 */ 3021 if (!flags.import) 3022 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3023 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3024 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3025 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3026 goto out; 3027 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3028 goto out; 3029 3030 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3031 retval = zpool_standard_error(hdl, errno, msg); 3032 goto out; 3033 } 3034 3035 freelist = B_FALSE; 3036 memory_err = B_FALSE; 3037 3038 out: 3039 if (varray != NULL) { 3040 int v; 3041 3042 for (v = 0; v < vcount; v++) 3043 nvlist_free(varray[v]); 3044 free(varray); 3045 } 3046 zcmd_free_nvlists(&zc); 3047 nvlist_free(zc_props); 3048 nvlist_free(newconfig); 3049 if (freelist) { 3050 nvlist_free(*newroot); 3051 *newroot = NULL; 3052 } 3053 3054 if (retval != 0) 3055 return (retval); 3056 3057 if (memory_err) 3058 return (no_memory(hdl)); 3059 3060 return (0); 3061 } 3062 3063 /* 3064 * Remove the given device. Currently, this is supported only for hot spares 3065 * and level 2 cache devices. 3066 */ 3067 int 3068 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3069 { 3070 zfs_cmd_t zc = { 0 }; 3071 char msg[1024]; 3072 nvlist_t *tgt; 3073 boolean_t avail_spare, l2cache, islog; 3074 libzfs_handle_t *hdl = zhp->zpool_hdl; 3075 uint64_t version; 3076 3077 (void) snprintf(msg, sizeof (msg), 3078 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3079 3080 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3081 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3082 &islog)) == 0) 3083 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3084 /* 3085 * XXX - this should just go away. 3086 */ 3087 if (!avail_spare && !l2cache && !islog) { 3088 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3089 "only inactive hot spares, cache, top-level, " 3090 "or log devices can be removed")); 3091 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3092 } 3093 3094 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3095 if (islog && version < SPA_VERSION_HOLES) { 3096 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3097 "pool must be upgrade to support log removal")); 3098 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3099 } 3100 3101 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3102 3103 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3104 return (0); 3105 3106 return (zpool_standard_error(hdl, errno, msg)); 3107 } 3108 3109 /* 3110 * Clear the errors for the pool, or the particular device if specified. 3111 */ 3112 int 3113 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3114 { 3115 zfs_cmd_t zc = { 0 }; 3116 char msg[1024]; 3117 nvlist_t *tgt; 3118 zpool_rewind_policy_t policy; 3119 boolean_t avail_spare, l2cache; 3120 libzfs_handle_t *hdl = zhp->zpool_hdl; 3121 nvlist_t *nvi = NULL; 3122 int error; 3123 3124 if (path) 3125 (void) snprintf(msg, sizeof (msg), 3126 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3127 path); 3128 else 3129 (void) snprintf(msg, sizeof (msg), 3130 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3131 zhp->zpool_name); 3132 3133 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3134 if (path) { 3135 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3136 &l2cache, NULL)) == 0) 3137 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3138 3139 /* 3140 * Don't allow error clearing for hot spares. Do allow 3141 * error clearing for l2cache devices. 3142 */ 3143 if (avail_spare) 3144 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3145 3146 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3147 &zc.zc_guid) == 0); 3148 } 3149 3150 zpool_get_rewind_policy(rewindnvl, &policy); 3151 zc.zc_cookie = policy.zrp_request; 3152 3153 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3154 return (-1); 3155 3156 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3157 return (-1); 3158 3159 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3160 errno == ENOMEM) { 3161 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3162 zcmd_free_nvlists(&zc); 3163 return (-1); 3164 } 3165 } 3166 3167 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3168 errno != EPERM && errno != EACCES)) { 3169 if (policy.zrp_request & 3170 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3171 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3172 zpool_rewind_exclaim(hdl, zc.zc_name, 3173 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3174 nvi); 3175 nvlist_free(nvi); 3176 } 3177 zcmd_free_nvlists(&zc); 3178 return (0); 3179 } 3180 3181 zcmd_free_nvlists(&zc); 3182 return (zpool_standard_error(hdl, errno, msg)); 3183 } 3184 3185 /* 3186 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3187 */ 3188 int 3189 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3190 { 3191 zfs_cmd_t zc = { 0 }; 3192 char msg[1024]; 3193 libzfs_handle_t *hdl = zhp->zpool_hdl; 3194 3195 (void) snprintf(msg, sizeof (msg), 3196 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3197 guid); 3198 3199 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3200 zc.zc_guid = guid; 3201 zc.zc_cookie = ZPOOL_NO_REWIND; 3202 3203 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3204 return (0); 3205 3206 return (zpool_standard_error(hdl, errno, msg)); 3207 } 3208 3209 /* 3210 * Change the GUID for a pool. 3211 */ 3212 int 3213 zpool_reguid(zpool_handle_t *zhp) 3214 { 3215 char msg[1024]; 3216 libzfs_handle_t *hdl = zhp->zpool_hdl; 3217 zfs_cmd_t zc = { 0 }; 3218 3219 (void) snprintf(msg, sizeof (msg), 3220 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3221 3222 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3223 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3224 return (0); 3225 3226 return (zpool_standard_error(hdl, errno, msg)); 3227 } 3228 3229 /* 3230 * Reopen the pool. 3231 */ 3232 int 3233 zpool_reopen(zpool_handle_t *zhp) 3234 { 3235 zfs_cmd_t zc = { 0 }; 3236 char msg[1024]; 3237 libzfs_handle_t *hdl = zhp->zpool_hdl; 3238 3239 (void) snprintf(msg, sizeof (msg), 3240 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3241 zhp->zpool_name); 3242 3243 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3244 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3245 return (0); 3246 return (zpool_standard_error(hdl, errno, msg)); 3247 } 3248 3249 /* 3250 * Convert from a devid string to a path. 3251 */ 3252 static char * 3253 devid_to_path(char *devid_str) 3254 { 3255 ddi_devid_t devid; 3256 char *minor; 3257 char *path; 3258 devid_nmlist_t *list = NULL; 3259 int ret; 3260 3261 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3262 return (NULL); 3263 3264 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3265 3266 devid_str_free(minor); 3267 devid_free(devid); 3268 3269 if (ret != 0) 3270 return (NULL); 3271 3272 /* 3273 * In a case the strdup() fails, we will just return NULL below. 3274 */ 3275 path = strdup(list[0].devname); 3276 3277 devid_free_nmlist(list); 3278 3279 return (path); 3280 } 3281 3282 /* 3283 * Convert from a path to a devid string. 3284 */ 3285 static char * 3286 path_to_devid(const char *path) 3287 { 3288 int fd; 3289 ddi_devid_t devid; 3290 char *minor, *ret; 3291 3292 if ((fd = open(path, O_RDONLY)) < 0) 3293 return (NULL); 3294 3295 minor = NULL; 3296 ret = NULL; 3297 if (devid_get(fd, &devid) == 0) { 3298 if (devid_get_minor_name(fd, &minor) == 0) 3299 ret = devid_str_encode(devid, minor); 3300 if (minor != NULL) 3301 devid_str_free(minor); 3302 devid_free(devid); 3303 } 3304 (void) close(fd); 3305 3306 return (ret); 3307 } 3308 3309 /* 3310 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3311 * ignore any failure here, since a common case is for an unprivileged user to 3312 * type 'zpool status', and we'll display the correct information anyway. 3313 */ 3314 static void 3315 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3316 { 3317 zfs_cmd_t zc = { 0 }; 3318 3319 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3320 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3321 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3322 &zc.zc_guid) == 0); 3323 3324 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3325 } 3326 3327 /* 3328 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3329 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3330 * We also check if this is a whole disk, in which case we strip off the 3331 * trailing 's0' slice name. 3332 * 3333 * This routine is also responsible for identifying when disks have been 3334 * reconfigured in a new location. The kernel will have opened the device by 3335 * devid, but the path will still refer to the old location. To catch this, we 3336 * first do a path -> devid translation (which is fast for the common case). If 3337 * the devid matches, we're done. If not, we do a reverse devid -> path 3338 * translation and issue the appropriate ioctl() to update the path of the vdev. 3339 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3340 * of these checks. 3341 */ 3342 char * 3343 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3344 boolean_t verbose) 3345 { 3346 char *path, *devid; 3347 uint64_t value; 3348 char buf[64]; 3349 vdev_stat_t *vs; 3350 uint_t vsc; 3351 3352 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3353 &value) == 0) { 3354 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3355 &value) == 0); 3356 (void) snprintf(buf, sizeof (buf), "%llu", 3357 (u_longlong_t)value); 3358 path = buf; 3359 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3360 3361 /* 3362 * If the device is dead (faulted, offline, etc) then don't 3363 * bother opening it. Otherwise we may be forcing the user to 3364 * open a misbehaving device, which can have undesirable 3365 * effects. 3366 */ 3367 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3368 (uint64_t **)&vs, &vsc) != 0 || 3369 vs->vs_state >= VDEV_STATE_DEGRADED) && 3370 zhp != NULL && 3371 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3372 /* 3373 * Determine if the current path is correct. 3374 */ 3375 char *newdevid = path_to_devid(path); 3376 3377 if (newdevid == NULL || 3378 strcmp(devid, newdevid) != 0) { 3379 char *newpath; 3380 3381 if ((newpath = devid_to_path(devid)) != NULL) { 3382 /* 3383 * Update the path appropriately. 3384 */ 3385 set_path(zhp, nv, newpath); 3386 if (nvlist_add_string(nv, 3387 ZPOOL_CONFIG_PATH, newpath) == 0) 3388 verify(nvlist_lookup_string(nv, 3389 ZPOOL_CONFIG_PATH, 3390 &path) == 0); 3391 free(newpath); 3392 } 3393 } 3394 3395 if (newdevid) 3396 devid_str_free(newdevid); 3397 } 3398 3399 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0) 3400 path += strlen(ZFS_DISK_ROOTD); 3401 3402 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3403 &value) == 0 && value) { 3404 int pathlen = strlen(path); 3405 char *tmp = zfs_strdup(hdl, path); 3406 3407 /* 3408 * If it starts with c#, and ends with "s0", chop 3409 * the "s0" off, or if it ends with "s0/old", remove 3410 * the "s0" from the middle. 3411 */ 3412 if (CTD_CHECK(tmp)) { 3413 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3414 tmp[pathlen - 2] = '\0'; 3415 } else if (pathlen > 6 && 3416 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3417 (void) strcpy(&tmp[pathlen - 6], 3418 "/old"); 3419 } 3420 } 3421 return (tmp); 3422 } 3423 } else { 3424 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3425 3426 /* 3427 * If it's a raidz device, we need to stick in the parity level. 3428 */ 3429 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3430 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3431 &value) == 0); 3432 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3433 (u_longlong_t)value); 3434 path = buf; 3435 } 3436 3437 /* 3438 * We identify each top-level vdev by using a <type-id> 3439 * naming convention. 3440 */ 3441 if (verbose) { 3442 uint64_t id; 3443 3444 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3445 &id) == 0); 3446 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3447 (u_longlong_t)id); 3448 path = buf; 3449 } 3450 } 3451 3452 return (zfs_strdup(hdl, path)); 3453 } 3454 3455 static int 3456 zbookmark_mem_compare(const void *a, const void *b) 3457 { 3458 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3459 } 3460 3461 /* 3462 * Retrieve the persistent error log, uniquify the members, and return to the 3463 * caller. 3464 */ 3465 int 3466 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3467 { 3468 zfs_cmd_t zc = { 0 }; 3469 uint64_t count; 3470 zbookmark_phys_t *zb = NULL; 3471 int i; 3472 3473 /* 3474 * Retrieve the raw error list from the kernel. If the number of errors 3475 * has increased, allocate more space and continue until we get the 3476 * entire list. 3477 */ 3478 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3479 &count) == 0); 3480 if (count == 0) 3481 return (0); 3482 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3483 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3484 return (-1); 3485 zc.zc_nvlist_dst_size = count; 3486 (void) strcpy(zc.zc_name, zhp->zpool_name); 3487 for (;;) { 3488 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3489 &zc) != 0) { 3490 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3491 if (errno == ENOMEM) { 3492 void *dst; 3493 3494 count = zc.zc_nvlist_dst_size; 3495 dst = zfs_alloc(zhp->zpool_hdl, count * 3496 sizeof (zbookmark_phys_t)); 3497 if (dst == NULL) 3498 return (-1); 3499 zc.zc_nvlist_dst = (uintptr_t)dst; 3500 } else { 3501 return (-1); 3502 } 3503 } else { 3504 break; 3505 } 3506 } 3507 3508 /* 3509 * Sort the resulting bookmarks. This is a little confusing due to the 3510 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3511 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3512 * _not_ copied as part of the process. So we point the start of our 3513 * array appropriate and decrement the total number of elements. 3514 */ 3515 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3516 zc.zc_nvlist_dst_size; 3517 count -= zc.zc_nvlist_dst_size; 3518 3519 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 3520 3521 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3522 3523 /* 3524 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3525 */ 3526 for (i = 0; i < count; i++) { 3527 nvlist_t *nv; 3528 3529 /* ignoring zb_blkid and zb_level for now */ 3530 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3531 zb[i-1].zb_object == zb[i].zb_object) 3532 continue; 3533 3534 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3535 goto nomem; 3536 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3537 zb[i].zb_objset) != 0) { 3538 nvlist_free(nv); 3539 goto nomem; 3540 } 3541 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3542 zb[i].zb_object) != 0) { 3543 nvlist_free(nv); 3544 goto nomem; 3545 } 3546 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3547 nvlist_free(nv); 3548 goto nomem; 3549 } 3550 nvlist_free(nv); 3551 } 3552 3553 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3554 return (0); 3555 3556 nomem: 3557 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3558 return (no_memory(zhp->zpool_hdl)); 3559 } 3560 3561 /* 3562 * Upgrade a ZFS pool to the latest on-disk version. 3563 */ 3564 int 3565 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3566 { 3567 zfs_cmd_t zc = { 0 }; 3568 libzfs_handle_t *hdl = zhp->zpool_hdl; 3569 3570 (void) strcpy(zc.zc_name, zhp->zpool_name); 3571 zc.zc_cookie = new_version; 3572 3573 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3574 return (zpool_standard_error_fmt(hdl, errno, 3575 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3576 zhp->zpool_name)); 3577 return (0); 3578 } 3579 3580 void 3581 zfs_save_arguments(int argc, char **argv, char *string, int len) 3582 { 3583 (void) strlcpy(string, basename(argv[0]), len); 3584 for (int i = 1; i < argc; i++) { 3585 (void) strlcat(string, " ", len); 3586 (void) strlcat(string, argv[i], len); 3587 } 3588 } 3589 3590 int 3591 zpool_log_history(libzfs_handle_t *hdl, const char *message) 3592 { 3593 zfs_cmd_t zc = { 0 }; 3594 nvlist_t *args; 3595 int err; 3596 3597 args = fnvlist_alloc(); 3598 fnvlist_add_string(args, "message", message); 3599 err = zcmd_write_src_nvlist(hdl, &zc, args); 3600 if (err == 0) 3601 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3602 nvlist_free(args); 3603 zcmd_free_nvlists(&zc); 3604 return (err); 3605 } 3606 3607 /* 3608 * Perform ioctl to get some command history of a pool. 3609 * 3610 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3611 * logical offset of the history buffer to start reading from. 3612 * 3613 * Upon return, 'off' is the next logical offset to read from and 3614 * 'len' is the actual amount of bytes read into 'buf'. 3615 */ 3616 static int 3617 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3618 { 3619 zfs_cmd_t zc = { 0 }; 3620 libzfs_handle_t *hdl = zhp->zpool_hdl; 3621 3622 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3623 3624 zc.zc_history = (uint64_t)(uintptr_t)buf; 3625 zc.zc_history_len = *len; 3626 zc.zc_history_offset = *off; 3627 3628 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3629 switch (errno) { 3630 case EPERM: 3631 return (zfs_error_fmt(hdl, EZFS_PERM, 3632 dgettext(TEXT_DOMAIN, 3633 "cannot show history for pool '%s'"), 3634 zhp->zpool_name)); 3635 case ENOENT: 3636 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3637 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3638 "'%s'"), zhp->zpool_name)); 3639 case ENOTSUP: 3640 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3641 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3642 "'%s', pool must be upgraded"), zhp->zpool_name)); 3643 default: 3644 return (zpool_standard_error_fmt(hdl, errno, 3645 dgettext(TEXT_DOMAIN, 3646 "cannot get history for '%s'"), zhp->zpool_name)); 3647 } 3648 } 3649 3650 *len = zc.zc_history_len; 3651 *off = zc.zc_history_offset; 3652 3653 return (0); 3654 } 3655 3656 /* 3657 * Process the buffer of nvlists, unpacking and storing each nvlist record 3658 * into 'records'. 'leftover' is set to the number of bytes that weren't 3659 * processed as there wasn't a complete record. 3660 */ 3661 int 3662 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3663 nvlist_t ***records, uint_t *numrecords) 3664 { 3665 uint64_t reclen; 3666 nvlist_t *nv; 3667 int i; 3668 3669 while (bytes_read > sizeof (reclen)) { 3670 3671 /* get length of packed record (stored as little endian) */ 3672 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3673 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3674 3675 if (bytes_read < sizeof (reclen) + reclen) 3676 break; 3677 3678 /* unpack record */ 3679 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3680 return (ENOMEM); 3681 bytes_read -= sizeof (reclen) + reclen; 3682 buf += sizeof (reclen) + reclen; 3683 3684 /* add record to nvlist array */ 3685 (*numrecords)++; 3686 if (ISP2(*numrecords + 1)) { 3687 *records = realloc(*records, 3688 *numrecords * 2 * sizeof (nvlist_t *)); 3689 } 3690 (*records)[*numrecords - 1] = nv; 3691 } 3692 3693 *leftover = bytes_read; 3694 return (0); 3695 } 3696 3697 /* 3698 * Retrieve the command history of a pool. 3699 */ 3700 int 3701 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3702 { 3703 char *buf; 3704 int buflen = 128 * 1024; 3705 uint64_t off = 0; 3706 nvlist_t **records = NULL; 3707 uint_t numrecords = 0; 3708 int err, i; 3709 3710 buf = malloc(buflen); 3711 if (buf == NULL) 3712 return (ENOMEM); 3713 do { 3714 uint64_t bytes_read = buflen; 3715 uint64_t leftover; 3716 3717 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3718 break; 3719 3720 /* if nothing else was read in, we're at EOF, just return */ 3721 if (!bytes_read) 3722 break; 3723 3724 if ((err = zpool_history_unpack(buf, bytes_read, 3725 &leftover, &records, &numrecords)) != 0) 3726 break; 3727 off -= leftover; 3728 if (leftover == bytes_read) { 3729 /* 3730 * no progress made, because buffer is not big enough 3731 * to hold this record; resize and retry. 3732 */ 3733 buflen *= 2; 3734 free(buf); 3735 buf = malloc(buflen); 3736 if (buf == NULL) 3737 return (ENOMEM); 3738 } 3739 3740 /* CONSTCOND */ 3741 } while (1); 3742 3743 free(buf); 3744 3745 if (!err) { 3746 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3747 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3748 records, numrecords) == 0); 3749 } 3750 for (i = 0; i < numrecords; i++) 3751 nvlist_free(records[i]); 3752 free(records); 3753 3754 return (err); 3755 } 3756 3757 void 3758 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3759 char *pathname, size_t len) 3760 { 3761 zfs_cmd_t zc = { 0 }; 3762 boolean_t mounted = B_FALSE; 3763 char *mntpnt = NULL; 3764 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 3765 3766 if (dsobj == 0) { 3767 /* special case for the MOS */ 3768 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3769 return; 3770 } 3771 3772 /* get the dataset's name */ 3773 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3774 zc.zc_obj = dsobj; 3775 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3776 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3777 /* just write out a path of two object numbers */ 3778 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3779 dsobj, obj); 3780 return; 3781 } 3782 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3783 3784 /* find out if the dataset is mounted */ 3785 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3786 3787 /* get the corrupted object's path */ 3788 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3789 zc.zc_obj = obj; 3790 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3791 &zc) == 0) { 3792 if (mounted) { 3793 (void) snprintf(pathname, len, "%s%s", mntpnt, 3794 zc.zc_value); 3795 } else { 3796 (void) snprintf(pathname, len, "%s:%s", 3797 dsname, zc.zc_value); 3798 } 3799 } else { 3800 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3801 } 3802 free(mntpnt); 3803 } 3804 3805 /* 3806 * Read the EFI label from the config, if a label does not exist then 3807 * pass back the error to the caller. If the caller has passed a non-NULL 3808 * diskaddr argument then we set it to the starting address of the EFI 3809 * partition. 3810 */ 3811 static int 3812 read_efi_label(nvlist_t *config, diskaddr_t *sb) 3813 { 3814 char *path; 3815 int fd; 3816 char diskname[MAXPATHLEN]; 3817 int err = -1; 3818 3819 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3820 return (err); 3821 3822 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT, 3823 strrchr(path, '/')); 3824 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3825 struct dk_gpt *vtoc; 3826 3827 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3828 if (sb != NULL) 3829 *sb = vtoc->efi_parts[0].p_start; 3830 efi_free(vtoc); 3831 } 3832 (void) close(fd); 3833 } 3834 return (err); 3835 } 3836 3837 /* 3838 * determine where a partition starts on a disk in the current 3839 * configuration 3840 */ 3841 static diskaddr_t 3842 find_start_block(nvlist_t *config) 3843 { 3844 nvlist_t **child; 3845 uint_t c, children; 3846 diskaddr_t sb = MAXOFFSET_T; 3847 uint64_t wholedisk; 3848 3849 if (nvlist_lookup_nvlist_array(config, 3850 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3851 if (nvlist_lookup_uint64(config, 3852 ZPOOL_CONFIG_WHOLE_DISK, 3853 &wholedisk) != 0 || !wholedisk) { 3854 return (MAXOFFSET_T); 3855 } 3856 if (read_efi_label(config, &sb) < 0) 3857 sb = MAXOFFSET_T; 3858 return (sb); 3859 } 3860 3861 for (c = 0; c < children; c++) { 3862 sb = find_start_block(child[c]); 3863 if (sb != MAXOFFSET_T) { 3864 return (sb); 3865 } 3866 } 3867 return (MAXOFFSET_T); 3868 } 3869 3870 /* 3871 * Label an individual disk. The name provided is the short name, 3872 * stripped of any leading /dev path. 3873 */ 3874 int 3875 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3876 { 3877 char path[MAXPATHLEN]; 3878 struct dk_gpt *vtoc; 3879 int fd; 3880 size_t resv = EFI_MIN_RESV_SIZE; 3881 uint64_t slice_size; 3882 diskaddr_t start_block; 3883 char errbuf[1024]; 3884 3885 /* prepare an error message just in case */ 3886 (void) snprintf(errbuf, sizeof (errbuf), 3887 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3888 3889 if (zhp) { 3890 nvlist_t *nvroot; 3891 3892 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3893 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3894 3895 if (zhp->zpool_start_block == 0) 3896 start_block = find_start_block(nvroot); 3897 else 3898 start_block = zhp->zpool_start_block; 3899 zhp->zpool_start_block = start_block; 3900 } else { 3901 /* new pool */ 3902 start_block = NEW_START_BLOCK; 3903 } 3904 3905 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name, 3906 BACKUP_SLICE); 3907 3908 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3909 /* 3910 * This shouldn't happen. We've long since verified that this 3911 * is a valid device. 3912 */ 3913 zfs_error_aux(hdl, 3914 dgettext(TEXT_DOMAIN, "unable to open device")); 3915 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3916 } 3917 3918 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3919 /* 3920 * The only way this can fail is if we run out of memory, or we 3921 * were unable to read the disk's capacity 3922 */ 3923 if (errno == ENOMEM) 3924 (void) no_memory(hdl); 3925 3926 (void) close(fd); 3927 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3928 "unable to read disk capacity"), name); 3929 3930 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3931 } 3932 3933 slice_size = vtoc->efi_last_u_lba + 1; 3934 slice_size -= EFI_MIN_RESV_SIZE; 3935 if (start_block == MAXOFFSET_T) 3936 start_block = NEW_START_BLOCK; 3937 slice_size -= start_block; 3938 3939 vtoc->efi_parts[0].p_start = start_block; 3940 vtoc->efi_parts[0].p_size = slice_size; 3941 3942 /* 3943 * Why we use V_USR: V_BACKUP confuses users, and is considered 3944 * disposable by some EFI utilities (since EFI doesn't have a backup 3945 * slice). V_UNASSIGNED is supposed to be used only for zero size 3946 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3947 * etc. were all pretty specific. V_USR is as close to reality as we 3948 * can get, in the absence of V_OTHER. 3949 */ 3950 vtoc->efi_parts[0].p_tag = V_USR; 3951 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3952 3953 vtoc->efi_parts[8].p_start = slice_size + start_block; 3954 vtoc->efi_parts[8].p_size = resv; 3955 vtoc->efi_parts[8].p_tag = V_RESERVED; 3956 3957 if (efi_write(fd, vtoc) != 0) { 3958 /* 3959 * Some block drivers (like pcata) may not support EFI 3960 * GPT labels. Print out a helpful error message dir- 3961 * ecting the user to manually label the disk and give 3962 * a specific slice. 3963 */ 3964 (void) close(fd); 3965 efi_free(vtoc); 3966 3967 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3968 "try using fdisk(1M) and then provide a specific slice")); 3969 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3970 } 3971 3972 (void) close(fd); 3973 efi_free(vtoc); 3974 return (0); 3975 } 3976 3977 static boolean_t 3978 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3979 { 3980 char *type; 3981 nvlist_t **child; 3982 uint_t children, c; 3983 3984 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3985 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 3986 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3987 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3988 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3989 "vdev type '%s' is not supported"), type); 3990 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3991 return (B_FALSE); 3992 } 3993 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3994 &child, &children) == 0) { 3995 for (c = 0; c < children; c++) { 3996 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3997 return (B_FALSE); 3998 } 3999 } 4000 return (B_TRUE); 4001 } 4002 4003 /* 4004 * Check if this zvol is allowable for use as a dump device; zero if 4005 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4006 * 4007 * Allowable storage configurations include mirrors, all raidz variants, and 4008 * pools with log, cache, and spare devices. Pools which are backed by files or 4009 * have missing/hole vdevs are not suitable. 4010 */ 4011 int 4012 zvol_check_dump_config(char *arg) 4013 { 4014 zpool_handle_t *zhp = NULL; 4015 nvlist_t *config, *nvroot; 4016 char *p, *volname; 4017 nvlist_t **top; 4018 uint_t toplevels; 4019 libzfs_handle_t *hdl; 4020 char errbuf[1024]; 4021 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 4022 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4023 int ret = 1; 4024 4025 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4026 return (-1); 4027 } 4028 4029 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4030 "dump is not supported on device '%s'"), arg); 4031 4032 if ((hdl = libzfs_init()) == NULL) 4033 return (1); 4034 libzfs_print_on_error(hdl, B_TRUE); 4035 4036 volname = arg + pathlen; 4037 4038 /* check the configuration of the pool */ 4039 if ((p = strchr(volname, '/')) == NULL) { 4040 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4041 "malformed dataset name")); 4042 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4043 return (1); 4044 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) { 4045 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4046 "dataset name is too long")); 4047 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4048 return (1); 4049 } else { 4050 (void) strncpy(poolname, volname, p - volname); 4051 poolname[p - volname] = '\0'; 4052 } 4053 4054 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4055 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4056 "could not open pool '%s'"), poolname); 4057 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4058 goto out; 4059 } 4060 config = zpool_get_config(zhp, NULL); 4061 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4062 &nvroot) != 0) { 4063 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4064 "could not obtain vdev configuration for '%s'"), poolname); 4065 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4066 goto out; 4067 } 4068 4069 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4070 &top, &toplevels) == 0); 4071 4072 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4073 goto out; 4074 } 4075 ret = 0; 4076 4077 out: 4078 if (zhp) 4079 zpool_close(zhp); 4080 libzfs_fini(hdl); 4081 return (ret); 4082 } 4083