1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <alloca.h> 30 #include <assert.h> 31 #include <ctype.h> 32 #include <errno.h> 33 #include <devid.h> 34 #include <dirent.h> 35 #include <fcntl.h> 36 #include <libintl.h> 37 #include <stdio.h> 38 #include <stdlib.h> 39 #include <strings.h> 40 #include <unistd.h> 41 #include <zone.h> 42 #include <sys/efi_partition.h> 43 #include <sys/vtoc.h> 44 #include <sys/zfs_ioctl.h> 45 #include <sys/zio.h> 46 #include <strings.h> 47 48 #include "zfs_namecheck.h" 49 #include "zfs_prop.h" 50 #include "libzfs_impl.h" 51 52 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 53 54 /* 55 * ==================================================================== 56 * zpool property functions 57 * ==================================================================== 58 */ 59 60 static int 61 zpool_get_all_props(zpool_handle_t *zhp) 62 { 63 zfs_cmd_t zc = { 0 }; 64 libzfs_handle_t *hdl = zhp->zpool_hdl; 65 66 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 67 68 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 69 return (-1); 70 71 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 72 if (errno == ENOMEM) { 73 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 74 zcmd_free_nvlists(&zc); 75 return (-1); 76 } 77 } else { 78 zcmd_free_nvlists(&zc); 79 return (-1); 80 } 81 } 82 83 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 84 zcmd_free_nvlists(&zc); 85 return (-1); 86 } 87 88 zcmd_free_nvlists(&zc); 89 90 return (0); 91 } 92 93 static int 94 zpool_props_refresh(zpool_handle_t *zhp) 95 { 96 nvlist_t *old_props; 97 98 old_props = zhp->zpool_props; 99 100 if (zpool_get_all_props(zhp) != 0) 101 return (-1); 102 103 nvlist_free(old_props); 104 return (0); 105 } 106 107 static char * 108 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 109 zprop_source_t *src) 110 { 111 nvlist_t *nv, *nvl; 112 uint64_t ival; 113 char *value; 114 zprop_source_t source; 115 116 nvl = zhp->zpool_props; 117 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 118 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 119 source = ival; 120 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 121 } else { 122 source = ZPROP_SRC_DEFAULT; 123 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 124 value = "-"; 125 } 126 127 if (src) 128 *src = source; 129 130 return (value); 131 } 132 133 uint64_t 134 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 135 { 136 nvlist_t *nv, *nvl; 137 uint64_t value; 138 zprop_source_t source; 139 140 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) 141 return (zpool_prop_default_numeric(prop)); 142 143 nvl = zhp->zpool_props; 144 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 145 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 146 source = value; 147 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 148 } else { 149 source = ZPROP_SRC_DEFAULT; 150 value = zpool_prop_default_numeric(prop); 151 } 152 153 if (src) 154 *src = source; 155 156 return (value); 157 } 158 159 /* 160 * Map VDEV STATE to printed strings. 161 */ 162 char * 163 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 164 { 165 switch (state) { 166 case VDEV_STATE_CLOSED: 167 case VDEV_STATE_OFFLINE: 168 return (gettext("OFFLINE")); 169 case VDEV_STATE_REMOVED: 170 return (gettext("REMOVED")); 171 case VDEV_STATE_CANT_OPEN: 172 if (aux == VDEV_AUX_CORRUPT_DATA) 173 return (gettext("FAULTED")); 174 else 175 return (gettext("UNAVAIL")); 176 case VDEV_STATE_FAULTED: 177 return (gettext("FAULTED")); 178 case VDEV_STATE_DEGRADED: 179 return (gettext("DEGRADED")); 180 case VDEV_STATE_HEALTHY: 181 return (gettext("ONLINE")); 182 } 183 184 return (gettext("UNKNOWN")); 185 } 186 187 /* 188 * Get a zpool property value for 'prop' and return the value in 189 * a pre-allocated buffer. 190 */ 191 int 192 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 193 zprop_source_t *srctype) 194 { 195 uint64_t intval; 196 const char *strval; 197 zprop_source_t src = ZPROP_SRC_NONE; 198 nvlist_t *nvroot; 199 vdev_stat_t *vs; 200 uint_t vsc; 201 202 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 203 if (prop == ZPOOL_PROP_NAME) 204 (void) strlcpy(buf, zpool_get_name(zhp), len); 205 else if (prop == ZPOOL_PROP_HEALTH) 206 (void) strlcpy(buf, "FAULTED", len); 207 else 208 (void) strlcpy(buf, "-", len); 209 return (0); 210 } 211 212 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 213 prop != ZPOOL_PROP_NAME) 214 return (-1); 215 216 switch (zpool_prop_get_type(prop)) { 217 case PROP_TYPE_STRING: 218 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 219 len); 220 break; 221 222 case PROP_TYPE_NUMBER: 223 intval = zpool_get_prop_int(zhp, prop, &src); 224 225 switch (prop) { 226 case ZPOOL_PROP_SIZE: 227 case ZPOOL_PROP_USED: 228 case ZPOOL_PROP_AVAILABLE: 229 (void) zfs_nicenum(intval, buf, len); 230 break; 231 232 case ZPOOL_PROP_CAPACITY: 233 (void) snprintf(buf, len, "%llu%%", 234 (u_longlong_t)intval); 235 break; 236 237 case ZPOOL_PROP_HEALTH: 238 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 239 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 240 verify(nvlist_lookup_uint64_array(nvroot, 241 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 242 243 (void) strlcpy(buf, zpool_state_to_name(intval, 244 vs->vs_aux), len); 245 break; 246 default: 247 (void) snprintf(buf, len, "%llu", intval); 248 } 249 break; 250 251 case PROP_TYPE_INDEX: 252 intval = zpool_get_prop_int(zhp, prop, &src); 253 if (zpool_prop_index_to_string(prop, intval, &strval) 254 != 0) 255 return (-1); 256 (void) strlcpy(buf, strval, len); 257 break; 258 259 default: 260 abort(); 261 } 262 263 if (srctype) 264 *srctype = src; 265 266 return (0); 267 } 268 269 /* 270 * Check if the bootfs name has the same pool name as it is set to. 271 * Assuming bootfs is a valid dataset name. 272 */ 273 static boolean_t 274 bootfs_name_valid(const char *pool, char *bootfs) 275 { 276 int len = strlen(pool); 277 278 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM)) 279 return (B_FALSE); 280 281 if (strncmp(pool, bootfs, len) == 0 && 282 (bootfs[len] == '/' || bootfs[len] == '\0')) 283 return (B_TRUE); 284 285 return (B_FALSE); 286 } 287 288 /* 289 * Inspect the configuration to determine if any of the devices contain 290 * an EFI label. 291 */ 292 static boolean_t 293 pool_uses_efi(nvlist_t *config) 294 { 295 nvlist_t **child; 296 uint_t c, children; 297 298 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 299 &child, &children) != 0) 300 return (read_efi_label(config, NULL) >= 0); 301 302 for (c = 0; c < children; c++) { 303 if (pool_uses_efi(child[c])) 304 return (B_TRUE); 305 } 306 return (B_FALSE); 307 } 308 309 /* 310 * Given an nvlist of zpool properties to be set, validate that they are 311 * correct, and parse any numeric properties (index, boolean, etc) if they are 312 * specified as strings. 313 */ 314 static nvlist_t * 315 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 316 nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf) 317 { 318 nvpair_t *elem; 319 nvlist_t *retprops; 320 zpool_prop_t prop; 321 char *strval; 322 uint64_t intval; 323 char *slash; 324 struct stat64 statbuf; 325 zpool_handle_t *zhp; 326 nvlist_t *nvroot; 327 328 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 329 (void) no_memory(hdl); 330 return (NULL); 331 } 332 333 elem = NULL; 334 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 335 const char *propname = nvpair_name(elem); 336 337 /* 338 * Make sure this property is valid and applies to this type. 339 */ 340 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { 341 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 342 "invalid property '%s'"), propname); 343 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 344 goto error; 345 } 346 347 if (zpool_prop_readonly(prop)) { 348 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 349 "is readonly"), propname); 350 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 351 goto error; 352 } 353 354 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 355 &strval, &intval, errbuf) != 0) 356 goto error; 357 358 /* 359 * Perform additional checking for specific properties. 360 */ 361 switch (prop) { 362 case ZPOOL_PROP_VERSION: 363 if (intval < version || intval > SPA_VERSION) { 364 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 365 "property '%s' number %d is invalid."), 366 propname, intval); 367 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 368 goto error; 369 } 370 break; 371 372 case ZPOOL_PROP_BOOTFS: 373 if (create_or_import) { 374 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 375 "property '%s' cannot be set at creation " 376 "or import time"), propname); 377 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 378 goto error; 379 } 380 381 if (version < SPA_VERSION_BOOTFS) { 382 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 383 "pool must be upgraded to support " 384 "'%s' property"), propname); 385 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 386 goto error; 387 } 388 389 /* 390 * bootfs property value has to be a dataset name and 391 * the dataset has to be in the same pool as it sets to. 392 */ 393 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 394 strval)) { 395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 396 "is an invalid name"), strval); 397 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 398 goto error; 399 } 400 401 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 402 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 403 "could not open pool '%s'"), poolname); 404 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 405 goto error; 406 } 407 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 408 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 409 410 /* 411 * bootfs property cannot be set on a disk which has 412 * been EFI labeled. 413 */ 414 if (pool_uses_efi(nvroot)) { 415 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 416 "property '%s' not supported on " 417 "EFI labeled devices"), propname); 418 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 419 zpool_close(zhp); 420 goto error; 421 } 422 zpool_close(zhp); 423 break; 424 425 case ZPOOL_PROP_ALTROOT: 426 if (!create_or_import) { 427 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 428 "property '%s' can only be set during pool " 429 "creation or import"), propname); 430 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 431 goto error; 432 } 433 434 if (strval[0] != '/') { 435 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 436 "bad alternate root '%s'"), strval); 437 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 438 goto error; 439 } 440 break; 441 442 case ZPOOL_PROP_CACHEFILE: 443 if (strval[0] == '\0') 444 break; 445 446 if (strcmp(strval, "none") == 0) 447 break; 448 449 if (strval[0] != '/') { 450 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 451 "property '%s' must be empty, an " 452 "absolute path, or 'none'"), propname); 453 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 454 goto error; 455 } 456 457 slash = strrchr(strval, '/'); 458 459 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 460 strcmp(slash, "/..") == 0) { 461 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 462 "'%s' is not a valid file"), strval); 463 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 464 goto error; 465 } 466 467 *slash = '\0'; 468 469 if (strval[0] != '\0' && 470 (stat64(strval, &statbuf) != 0 || 471 !S_ISDIR(statbuf.st_mode))) { 472 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 473 "'%s' is not a valid directory"), 474 strval); 475 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 476 goto error; 477 } 478 479 *slash = '/'; 480 break; 481 } 482 } 483 484 return (retprops); 485 error: 486 nvlist_free(retprops); 487 return (NULL); 488 } 489 490 /* 491 * Set zpool property : propname=propval. 492 */ 493 int 494 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 495 { 496 zfs_cmd_t zc = { 0 }; 497 int ret = -1; 498 char errbuf[1024]; 499 nvlist_t *nvl = NULL; 500 nvlist_t *realprops; 501 uint64_t version; 502 503 (void) snprintf(errbuf, sizeof (errbuf), 504 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 505 zhp->zpool_name); 506 507 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) 508 return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf)); 509 510 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 511 return (no_memory(zhp->zpool_hdl)); 512 513 if (nvlist_add_string(nvl, propname, propval) != 0) { 514 nvlist_free(nvl); 515 return (no_memory(zhp->zpool_hdl)); 516 } 517 518 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 519 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 520 zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) { 521 nvlist_free(nvl); 522 return (-1); 523 } 524 525 nvlist_free(nvl); 526 nvl = realprops; 527 528 /* 529 * Execute the corresponding ioctl() to set this property. 530 */ 531 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 532 533 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 534 nvlist_free(nvl); 535 return (-1); 536 } 537 538 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 539 540 zcmd_free_nvlists(&zc); 541 nvlist_free(nvl); 542 543 if (ret) 544 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 545 else 546 (void) zpool_props_refresh(zhp); 547 548 return (ret); 549 } 550 551 int 552 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 553 { 554 libzfs_handle_t *hdl = zhp->zpool_hdl; 555 zprop_list_t *entry; 556 char buf[ZFS_MAXPROPLEN]; 557 558 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 559 return (-1); 560 561 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 562 563 if (entry->pl_fixed) 564 continue; 565 566 if (entry->pl_prop != ZPROP_INVAL && 567 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 568 NULL) == 0) { 569 if (strlen(buf) > entry->pl_width) 570 entry->pl_width = strlen(buf); 571 } 572 } 573 574 return (0); 575 } 576 577 578 /* 579 * Validate the given pool name, optionally putting an extended error message in 580 * 'buf'. 581 */ 582 boolean_t 583 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 584 { 585 namecheck_err_t why; 586 char what; 587 int ret; 588 589 ret = pool_namecheck(pool, &why, &what); 590 591 /* 592 * The rules for reserved pool names were extended at a later point. 593 * But we need to support users with existing pools that may now be 594 * invalid. So we only check for this expanded set of names during a 595 * create (or import), and only in userland. 596 */ 597 if (ret == 0 && !isopen && 598 (strncmp(pool, "mirror", 6) == 0 || 599 strncmp(pool, "raidz", 5) == 0 || 600 strncmp(pool, "spare", 5) == 0 || 601 strcmp(pool, "log") == 0)) { 602 if (hdl != NULL) 603 zfs_error_aux(hdl, 604 dgettext(TEXT_DOMAIN, "name is reserved")); 605 return (B_FALSE); 606 } 607 608 609 if (ret != 0) { 610 if (hdl != NULL) { 611 switch (why) { 612 case NAME_ERR_TOOLONG: 613 zfs_error_aux(hdl, 614 dgettext(TEXT_DOMAIN, "name is too long")); 615 break; 616 617 case NAME_ERR_INVALCHAR: 618 zfs_error_aux(hdl, 619 dgettext(TEXT_DOMAIN, "invalid character " 620 "'%c' in pool name"), what); 621 break; 622 623 case NAME_ERR_NOLETTER: 624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 625 "name must begin with a letter")); 626 break; 627 628 case NAME_ERR_RESERVED: 629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 630 "name is reserved")); 631 break; 632 633 case NAME_ERR_DISKLIKE: 634 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 635 "pool name is reserved")); 636 break; 637 638 case NAME_ERR_LEADING_SLASH: 639 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 640 "leading slash in name")); 641 break; 642 643 case NAME_ERR_EMPTY_COMPONENT: 644 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 645 "empty component in name")); 646 break; 647 648 case NAME_ERR_TRAILING_SLASH: 649 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 650 "trailing slash in name")); 651 break; 652 653 case NAME_ERR_MULTIPLE_AT: 654 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 655 "multiple '@' delimiters in name")); 656 break; 657 658 } 659 } 660 return (B_FALSE); 661 } 662 663 return (B_TRUE); 664 } 665 666 /* 667 * Open a handle to the given pool, even if the pool is currently in the FAULTED 668 * state. 669 */ 670 zpool_handle_t * 671 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 672 { 673 zpool_handle_t *zhp; 674 boolean_t missing; 675 676 /* 677 * Make sure the pool name is valid. 678 */ 679 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 680 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 681 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 682 pool); 683 return (NULL); 684 } 685 686 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 687 return (NULL); 688 689 zhp->zpool_hdl = hdl; 690 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 691 692 if (zpool_refresh_stats(zhp, &missing) != 0) { 693 zpool_close(zhp); 694 return (NULL); 695 } 696 697 if (missing) { 698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 699 (void) zfs_error_fmt(hdl, EZFS_NOENT, 700 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 701 zpool_close(zhp); 702 return (NULL); 703 } 704 705 return (zhp); 706 } 707 708 /* 709 * Like the above, but silent on error. Used when iterating over pools (because 710 * the configuration cache may be out of date). 711 */ 712 int 713 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 714 { 715 zpool_handle_t *zhp; 716 boolean_t missing; 717 718 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 719 return (-1); 720 721 zhp->zpool_hdl = hdl; 722 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 723 724 if (zpool_refresh_stats(zhp, &missing) != 0) { 725 zpool_close(zhp); 726 return (-1); 727 } 728 729 if (missing) { 730 zpool_close(zhp); 731 *ret = NULL; 732 return (0); 733 } 734 735 *ret = zhp; 736 return (0); 737 } 738 739 /* 740 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 741 * state. 742 */ 743 zpool_handle_t * 744 zpool_open(libzfs_handle_t *hdl, const char *pool) 745 { 746 zpool_handle_t *zhp; 747 748 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 749 return (NULL); 750 751 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 752 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 753 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 754 zpool_close(zhp); 755 return (NULL); 756 } 757 758 return (zhp); 759 } 760 761 /* 762 * Close the handle. Simply frees the memory associated with the handle. 763 */ 764 void 765 zpool_close(zpool_handle_t *zhp) 766 { 767 if (zhp->zpool_config) 768 nvlist_free(zhp->zpool_config); 769 if (zhp->zpool_old_config) 770 nvlist_free(zhp->zpool_old_config); 771 if (zhp->zpool_props) 772 nvlist_free(zhp->zpool_props); 773 free(zhp); 774 } 775 776 /* 777 * Return the name of the pool. 778 */ 779 const char * 780 zpool_get_name(zpool_handle_t *zhp) 781 { 782 return (zhp->zpool_name); 783 } 784 785 786 /* 787 * Return the state of the pool (ACTIVE or UNAVAILABLE) 788 */ 789 int 790 zpool_get_state(zpool_handle_t *zhp) 791 { 792 return (zhp->zpool_state); 793 } 794 795 /* 796 * Create the named pool, using the provided vdev list. It is assumed 797 * that the consumer has already validated the contents of the nvlist, so we 798 * don't have to worry about error semantics. 799 */ 800 int 801 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 802 nvlist_t *props, nvlist_t *fsprops) 803 { 804 zfs_cmd_t zc = { 0 }; 805 nvlist_t *zc_fsprops = NULL; 806 nvlist_t *zc_props = NULL; 807 char msg[1024]; 808 char *altroot; 809 int ret = -1; 810 811 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 812 "cannot create '%s'"), pool); 813 814 if (!zpool_name_valid(hdl, B_FALSE, pool)) 815 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 816 817 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 818 return (-1); 819 820 if (props) { 821 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 822 SPA_VERSION_1, B_TRUE, msg)) == NULL) { 823 goto create_failed; 824 } 825 } 826 827 if (fsprops) { 828 uint64_t zoned; 829 char *zonestr; 830 831 zoned = ((nvlist_lookup_string(fsprops, 832 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 833 strcmp(zonestr, "on") == 0); 834 835 if ((zc_fsprops = zfs_valid_proplist(hdl, 836 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 837 goto create_failed; 838 } 839 if (!zc_props && 840 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 841 goto create_failed; 842 } 843 if (nvlist_add_nvlist(zc_props, 844 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 845 goto create_failed; 846 } 847 } 848 849 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 850 goto create_failed; 851 852 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 853 854 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 855 856 zcmd_free_nvlists(&zc); 857 nvlist_free(zc_props); 858 nvlist_free(zc_fsprops); 859 860 switch (errno) { 861 case EBUSY: 862 /* 863 * This can happen if the user has specified the same 864 * device multiple times. We can't reliably detect this 865 * until we try to add it and see we already have a 866 * label. 867 */ 868 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 869 "one or more vdevs refer to the same device")); 870 return (zfs_error(hdl, EZFS_BADDEV, msg)); 871 872 case EOVERFLOW: 873 /* 874 * This occurs when one of the devices is below 875 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 876 * device was the problem device since there's no 877 * reliable way to determine device size from userland. 878 */ 879 { 880 char buf[64]; 881 882 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 883 884 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 885 "one or more devices is less than the " 886 "minimum size (%s)"), buf); 887 } 888 return (zfs_error(hdl, EZFS_BADDEV, msg)); 889 890 case ENOSPC: 891 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 892 "one or more devices is out of space")); 893 return (zfs_error(hdl, EZFS_BADDEV, msg)); 894 895 case ENOTBLK: 896 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 897 "cache device must be a disk or disk slice")); 898 return (zfs_error(hdl, EZFS_BADDEV, msg)); 899 900 default: 901 return (zpool_standard_error(hdl, errno, msg)); 902 } 903 } 904 905 /* 906 * If this is an alternate root pool, then we automatically set the 907 * mountpoint of the root dataset to be '/'. 908 */ 909 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), 910 &altroot) == 0) { 911 zfs_handle_t *zhp; 912 913 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); 914 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 915 "/") == 0); 916 917 zfs_close(zhp); 918 } 919 920 create_failed: 921 zcmd_free_nvlists(&zc); 922 nvlist_free(zc_props); 923 nvlist_free(zc_fsprops); 924 return (ret); 925 } 926 927 /* 928 * Destroy the given pool. It is up to the caller to ensure that there are no 929 * datasets left in the pool. 930 */ 931 int 932 zpool_destroy(zpool_handle_t *zhp) 933 { 934 zfs_cmd_t zc = { 0 }; 935 zfs_handle_t *zfp = NULL; 936 libzfs_handle_t *hdl = zhp->zpool_hdl; 937 char msg[1024]; 938 939 if (zhp->zpool_state == POOL_STATE_ACTIVE && 940 (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, 941 ZFS_TYPE_FILESYSTEM)) == NULL) 942 return (-1); 943 944 if (zpool_remove_zvol_links(zhp) != 0) 945 return (-1); 946 947 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 948 949 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 950 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 951 "cannot destroy '%s'"), zhp->zpool_name); 952 953 if (errno == EROFS) { 954 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 955 "one or more devices is read only")); 956 (void) zfs_error(hdl, EZFS_BADDEV, msg); 957 } else { 958 (void) zpool_standard_error(hdl, errno, msg); 959 } 960 961 if (zfp) 962 zfs_close(zfp); 963 return (-1); 964 } 965 966 if (zfp) { 967 remove_mountpoint(zfp); 968 zfs_close(zfp); 969 } 970 971 return (0); 972 } 973 974 /* 975 * Add the given vdevs to the pool. The caller must have already performed the 976 * necessary verification to ensure that the vdev specification is well-formed. 977 */ 978 int 979 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 980 { 981 zfs_cmd_t zc = { 0 }; 982 int ret; 983 libzfs_handle_t *hdl = zhp->zpool_hdl; 984 char msg[1024]; 985 nvlist_t **spares, **l2cache; 986 uint_t nspares, nl2cache; 987 988 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 989 "cannot add to '%s'"), zhp->zpool_name); 990 991 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 992 SPA_VERSION_SPARES && 993 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 994 &spares, &nspares) == 0) { 995 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 996 "upgraded to add hot spares")); 997 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 998 } 999 1000 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1001 SPA_VERSION_L2CACHE && 1002 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1003 &l2cache, &nl2cache) == 0) { 1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1005 "upgraded to add cache devices")); 1006 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1007 } 1008 1009 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1010 return (-1); 1011 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1012 1013 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1014 switch (errno) { 1015 case EBUSY: 1016 /* 1017 * This can happen if the user has specified the same 1018 * device multiple times. We can't reliably detect this 1019 * until we try to add it and see we already have a 1020 * label. 1021 */ 1022 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1023 "one or more vdevs refer to the same device")); 1024 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1025 break; 1026 1027 case EOVERFLOW: 1028 /* 1029 * This occurrs when one of the devices is below 1030 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1031 * device was the problem device since there's no 1032 * reliable way to determine device size from userland. 1033 */ 1034 { 1035 char buf[64]; 1036 1037 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1038 1039 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1040 "device is less than the minimum " 1041 "size (%s)"), buf); 1042 } 1043 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1044 break; 1045 1046 case ENOTSUP: 1047 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1048 "pool must be upgraded to add these vdevs")); 1049 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1050 break; 1051 1052 case EDOM: 1053 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1054 "root pool can not have multiple vdevs" 1055 " or separate logs")); 1056 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1057 break; 1058 1059 case ENOTBLK: 1060 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1061 "cache device must be a disk or disk slice")); 1062 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1063 break; 1064 1065 default: 1066 (void) zpool_standard_error(hdl, errno, msg); 1067 } 1068 1069 ret = -1; 1070 } else { 1071 ret = 0; 1072 } 1073 1074 zcmd_free_nvlists(&zc); 1075 1076 return (ret); 1077 } 1078 1079 /* 1080 * Exports the pool from the system. The caller must ensure that there are no 1081 * mounted datasets in the pool. 1082 */ 1083 int 1084 zpool_export(zpool_handle_t *zhp) 1085 { 1086 zfs_cmd_t zc = { 0 }; 1087 1088 if (zpool_remove_zvol_links(zhp) != 0) 1089 return (-1); 1090 1091 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1092 1093 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) 1094 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1095 dgettext(TEXT_DOMAIN, "cannot export '%s'"), 1096 zhp->zpool_name)); 1097 return (0); 1098 } 1099 1100 /* 1101 * zpool_import() is a contracted interface. Should be kept the same 1102 * if possible. 1103 * 1104 * Applications should use zpool_import_props() to import a pool with 1105 * new properties value to be set. 1106 */ 1107 int 1108 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1109 char *altroot) 1110 { 1111 nvlist_t *props = NULL; 1112 int ret; 1113 1114 if (altroot != NULL) { 1115 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1116 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1117 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1118 newname)); 1119 } 1120 1121 if (nvlist_add_string(props, 1122 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0) { 1123 nvlist_free(props); 1124 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1125 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1126 newname)); 1127 } 1128 } 1129 1130 ret = zpool_import_props(hdl, config, newname, props, B_FALSE); 1131 if (props) 1132 nvlist_free(props); 1133 return (ret); 1134 } 1135 1136 /* 1137 * Import the given pool using the known configuration and a list of 1138 * properties to be set. The configuration should have come from 1139 * zpool_find_import(). The 'newname' parameters control whether the pool 1140 * is imported with a different name. 1141 */ 1142 int 1143 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1144 nvlist_t *props, boolean_t importfaulted) 1145 { 1146 zfs_cmd_t zc = { 0 }; 1147 char *thename; 1148 char *origname; 1149 int ret; 1150 char errbuf[1024]; 1151 1152 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1153 &origname) == 0); 1154 1155 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1156 "cannot import pool '%s'"), origname); 1157 1158 if (newname != NULL) { 1159 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1160 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1161 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1162 newname)); 1163 thename = (char *)newname; 1164 } else { 1165 thename = origname; 1166 } 1167 1168 if (props) { 1169 uint64_t version; 1170 1171 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1172 &version) == 0); 1173 1174 if ((props = zpool_valid_proplist(hdl, origname, 1175 props, version, B_TRUE, errbuf)) == NULL) { 1176 return (-1); 1177 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1178 nvlist_free(props); 1179 return (-1); 1180 } 1181 } 1182 1183 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1184 1185 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1186 &zc.zc_guid) == 0); 1187 1188 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1189 nvlist_free(props); 1190 return (-1); 1191 } 1192 1193 zc.zc_cookie = (uint64_t)importfaulted; 1194 ret = 0; 1195 if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) { 1196 char desc[1024]; 1197 if (newname == NULL) 1198 (void) snprintf(desc, sizeof (desc), 1199 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1200 thename); 1201 else 1202 (void) snprintf(desc, sizeof (desc), 1203 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1204 origname, thename); 1205 1206 switch (errno) { 1207 case ENOTSUP: 1208 /* 1209 * Unsupported version. 1210 */ 1211 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1212 break; 1213 1214 case EINVAL: 1215 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1216 break; 1217 1218 default: 1219 (void) zpool_standard_error(hdl, errno, desc); 1220 } 1221 1222 ret = -1; 1223 } else { 1224 zpool_handle_t *zhp; 1225 1226 /* 1227 * This should never fail, but play it safe anyway. 1228 */ 1229 if (zpool_open_silent(hdl, thename, &zhp) != 0) { 1230 ret = -1; 1231 } else if (zhp != NULL) { 1232 ret = zpool_create_zvol_links(zhp); 1233 zpool_close(zhp); 1234 } 1235 1236 } 1237 1238 zcmd_free_nvlists(&zc); 1239 nvlist_free(props); 1240 1241 return (ret); 1242 } 1243 1244 /* 1245 * Scrub the pool. 1246 */ 1247 int 1248 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 1249 { 1250 zfs_cmd_t zc = { 0 }; 1251 char msg[1024]; 1252 libzfs_handle_t *hdl = zhp->zpool_hdl; 1253 1254 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1255 zc.zc_cookie = type; 1256 1257 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0) 1258 return (0); 1259 1260 (void) snprintf(msg, sizeof (msg), 1261 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1262 1263 if (errno == EBUSY) 1264 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1265 else 1266 return (zpool_standard_error(hdl, errno, msg)); 1267 } 1268 1269 /* 1270 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1271 * spare; but FALSE if its an INUSE spare. 1272 */ 1273 static nvlist_t * 1274 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid, 1275 boolean_t *avail_spare, boolean_t *l2cache) 1276 { 1277 uint_t c, children; 1278 nvlist_t **child; 1279 uint64_t theguid, present; 1280 char *path; 1281 uint64_t wholedisk = 0; 1282 nvlist_t *ret; 1283 1284 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0); 1285 1286 if (search == NULL && 1287 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) { 1288 /* 1289 * If the device has never been present since import, the only 1290 * reliable way to match the vdev is by GUID. 1291 */ 1292 if (theguid == guid) 1293 return (nv); 1294 } else if (search != NULL && 1295 nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 1296 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1297 &wholedisk); 1298 if (wholedisk) { 1299 /* 1300 * For whole disks, the internal path has 's0', but the 1301 * path passed in by the user doesn't. 1302 */ 1303 if (strlen(search) == strlen(path) - 2 && 1304 strncmp(search, path, strlen(search)) == 0) 1305 return (nv); 1306 } else if (strcmp(search, path) == 0) { 1307 return (nv); 1308 } 1309 } 1310 1311 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1312 &child, &children) != 0) 1313 return (NULL); 1314 1315 for (c = 0; c < children; c++) 1316 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1317 avail_spare, l2cache)) != NULL) 1318 return (ret); 1319 1320 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1321 &child, &children) == 0) { 1322 for (c = 0; c < children; c++) { 1323 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1324 avail_spare, l2cache)) != NULL) { 1325 *avail_spare = B_TRUE; 1326 return (ret); 1327 } 1328 } 1329 } 1330 1331 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1332 &child, &children) == 0) { 1333 for (c = 0; c < children; c++) { 1334 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1335 avail_spare, l2cache)) != NULL) { 1336 *l2cache = B_TRUE; 1337 return (ret); 1338 } 1339 } 1340 } 1341 1342 return (NULL); 1343 } 1344 1345 nvlist_t * 1346 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 1347 boolean_t *l2cache) 1348 { 1349 char buf[MAXPATHLEN]; 1350 const char *search; 1351 char *end; 1352 nvlist_t *nvroot; 1353 uint64_t guid; 1354 1355 guid = strtoull(path, &end, 10); 1356 if (guid != 0 && *end == '\0') { 1357 search = NULL; 1358 } else if (path[0] != '/') { 1359 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 1360 search = buf; 1361 } else { 1362 search = path; 1363 } 1364 1365 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1366 &nvroot) == 0); 1367 1368 *avail_spare = B_FALSE; 1369 *l2cache = B_FALSE; 1370 return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare, 1371 l2cache)); 1372 } 1373 1374 /* 1375 * Returns TRUE if the given guid corresponds to the given type. 1376 * This is used to check for hot spares (INUSE or not), and level 2 cache 1377 * devices. 1378 */ 1379 static boolean_t 1380 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type) 1381 { 1382 uint64_t target_guid; 1383 nvlist_t *nvroot; 1384 nvlist_t **list; 1385 uint_t count; 1386 int i; 1387 1388 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1389 &nvroot) == 0); 1390 if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) { 1391 for (i = 0; i < count; i++) { 1392 verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID, 1393 &target_guid) == 0); 1394 if (guid == target_guid) 1395 return (B_TRUE); 1396 } 1397 } 1398 1399 return (B_FALSE); 1400 } 1401 1402 /* 1403 * Bring the specified vdev online. The 'flags' parameter is a set of the 1404 * ZFS_ONLINE_* flags. 1405 */ 1406 int 1407 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 1408 vdev_state_t *newstate) 1409 { 1410 zfs_cmd_t zc = { 0 }; 1411 char msg[1024]; 1412 nvlist_t *tgt; 1413 boolean_t avail_spare, l2cache; 1414 libzfs_handle_t *hdl = zhp->zpool_hdl; 1415 1416 (void) snprintf(msg, sizeof (msg), 1417 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 1418 1419 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1420 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == NULL) 1421 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1422 1423 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1424 1425 if (avail_spare || 1426 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) 1427 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1428 1429 zc.zc_cookie = VDEV_STATE_ONLINE; 1430 zc.zc_obj = flags; 1431 1432 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) 1433 return (zpool_standard_error(hdl, errno, msg)); 1434 1435 *newstate = zc.zc_cookie; 1436 return (0); 1437 } 1438 1439 /* 1440 * Take the specified vdev offline 1441 */ 1442 int 1443 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 1444 { 1445 zfs_cmd_t zc = { 0 }; 1446 char msg[1024]; 1447 nvlist_t *tgt; 1448 boolean_t avail_spare, l2cache; 1449 libzfs_handle_t *hdl = zhp->zpool_hdl; 1450 1451 (void) snprintf(msg, sizeof (msg), 1452 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 1453 1454 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1455 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == NULL) 1456 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1457 1458 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1459 1460 if (avail_spare || 1461 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) 1462 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1463 1464 zc.zc_cookie = VDEV_STATE_OFFLINE; 1465 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 1466 1467 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1468 return (0); 1469 1470 switch (errno) { 1471 case EBUSY: 1472 1473 /* 1474 * There are no other replicas of this device. 1475 */ 1476 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 1477 1478 default: 1479 return (zpool_standard_error(hdl, errno, msg)); 1480 } 1481 } 1482 1483 /* 1484 * Mark the given vdev faulted. 1485 */ 1486 int 1487 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid) 1488 { 1489 zfs_cmd_t zc = { 0 }; 1490 char msg[1024]; 1491 libzfs_handle_t *hdl = zhp->zpool_hdl; 1492 1493 (void) snprintf(msg, sizeof (msg), 1494 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 1495 1496 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1497 zc.zc_guid = guid; 1498 zc.zc_cookie = VDEV_STATE_FAULTED; 1499 1500 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1501 return (0); 1502 1503 switch (errno) { 1504 case EBUSY: 1505 1506 /* 1507 * There are no other replicas of this device. 1508 */ 1509 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 1510 1511 default: 1512 return (zpool_standard_error(hdl, errno, msg)); 1513 } 1514 1515 } 1516 1517 /* 1518 * Mark the given vdev degraded. 1519 */ 1520 int 1521 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid) 1522 { 1523 zfs_cmd_t zc = { 0 }; 1524 char msg[1024]; 1525 libzfs_handle_t *hdl = zhp->zpool_hdl; 1526 1527 (void) snprintf(msg, sizeof (msg), 1528 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 1529 1530 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1531 zc.zc_guid = guid; 1532 zc.zc_cookie = VDEV_STATE_DEGRADED; 1533 1534 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1535 return (0); 1536 1537 return (zpool_standard_error(hdl, errno, msg)); 1538 } 1539 1540 /* 1541 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 1542 * a hot spare. 1543 */ 1544 static boolean_t 1545 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 1546 { 1547 nvlist_t **child; 1548 uint_t c, children; 1549 char *type; 1550 1551 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 1552 &children) == 0) { 1553 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 1554 &type) == 0); 1555 1556 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 1557 children == 2 && child[which] == tgt) 1558 return (B_TRUE); 1559 1560 for (c = 0; c < children; c++) 1561 if (is_replacing_spare(child[c], tgt, which)) 1562 return (B_TRUE); 1563 } 1564 1565 return (B_FALSE); 1566 } 1567 1568 /* 1569 * Attach new_disk (fully described by nvroot) to old_disk. 1570 * If 'replacing' is specified, the new disk will replace the old one. 1571 */ 1572 int 1573 zpool_vdev_attach(zpool_handle_t *zhp, 1574 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 1575 { 1576 zfs_cmd_t zc = { 0 }; 1577 char msg[1024]; 1578 int ret; 1579 nvlist_t *tgt; 1580 boolean_t avail_spare, l2cache; 1581 uint64_t val, is_log; 1582 char *path, *newname; 1583 nvlist_t **child; 1584 uint_t children; 1585 nvlist_t *config_root; 1586 libzfs_handle_t *hdl = zhp->zpool_hdl; 1587 1588 if (replacing) 1589 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1590 "cannot replace %s with %s"), old_disk, new_disk); 1591 else 1592 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1593 "cannot attach %s to %s"), new_disk, old_disk); 1594 1595 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1596 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache)) == 0) 1597 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1598 1599 if (avail_spare) 1600 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1601 1602 if (l2cache) 1603 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 1604 1605 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1606 zc.zc_cookie = replacing; 1607 1608 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1609 &child, &children) != 0 || children != 1) { 1610 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1611 "new device must be a single disk")); 1612 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 1613 } 1614 1615 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 1616 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 1617 1618 if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL) 1619 return (-1); 1620 1621 /* 1622 * If the target is a hot spare that has been swapped in, we can only 1623 * replace it with another hot spare. 1624 */ 1625 if (replacing && 1626 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 1627 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache) == NULL || 1628 !avail_spare) && is_replacing_spare(config_root, tgt, 1)) { 1629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1630 "can only be replaced by another hot spare")); 1631 free(newname); 1632 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1633 } 1634 1635 /* 1636 * If we are attempting to replace a spare, it canot be applied to an 1637 * already spared device. 1638 */ 1639 if (replacing && 1640 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && 1641 zpool_find_vdev(zhp, newname, &avail_spare, &l2cache) != NULL && 1642 avail_spare && is_replacing_spare(config_root, tgt, 0)) { 1643 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1644 "device has already been replaced with a spare")); 1645 free(newname); 1646 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1647 } 1648 1649 free(newname); 1650 1651 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1652 return (-1); 1653 1654 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc); 1655 1656 zcmd_free_nvlists(&zc); 1657 1658 if (ret == 0) 1659 return (0); 1660 1661 switch (errno) { 1662 case ENOTSUP: 1663 /* 1664 * Can't attach to or replace this type of vdev. 1665 */ 1666 if (replacing) { 1667 is_log = B_FALSE; 1668 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_LOG, 1669 &is_log); 1670 if (is_log) 1671 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1672 "cannot replace a log with a spare")); 1673 else 1674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1675 "cannot replace a replacing device")); 1676 } else { 1677 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1678 "can only attach to mirrors and top-level " 1679 "disks")); 1680 } 1681 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 1682 break; 1683 1684 case EINVAL: 1685 /* 1686 * The new device must be a single disk. 1687 */ 1688 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1689 "new device must be a single disk")); 1690 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 1691 break; 1692 1693 case EBUSY: 1694 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 1695 new_disk); 1696 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1697 break; 1698 1699 case EOVERFLOW: 1700 /* 1701 * The new device is too small. 1702 */ 1703 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1704 "device is too small")); 1705 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1706 break; 1707 1708 case EDOM: 1709 /* 1710 * The new device has a different alignment requirement. 1711 */ 1712 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1713 "devices have different sector alignment")); 1714 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1715 break; 1716 1717 case ENAMETOOLONG: 1718 /* 1719 * The resulting top-level vdev spec won't fit in the label. 1720 */ 1721 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 1722 break; 1723 1724 default: 1725 (void) zpool_standard_error(hdl, errno, msg); 1726 } 1727 1728 return (-1); 1729 } 1730 1731 /* 1732 * Detach the specified device. 1733 */ 1734 int 1735 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 1736 { 1737 zfs_cmd_t zc = { 0 }; 1738 char msg[1024]; 1739 nvlist_t *tgt; 1740 boolean_t avail_spare, l2cache; 1741 libzfs_handle_t *hdl = zhp->zpool_hdl; 1742 1743 (void) snprintf(msg, sizeof (msg), 1744 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 1745 1746 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1747 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == 0) 1748 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1749 1750 if (avail_spare) 1751 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1752 1753 if (l2cache) 1754 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 1755 1756 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1757 1758 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 1759 return (0); 1760 1761 switch (errno) { 1762 1763 case ENOTSUP: 1764 /* 1765 * Can't detach from this type of vdev. 1766 */ 1767 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 1768 "applicable to mirror and replacing vdevs")); 1769 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg); 1770 break; 1771 1772 case EBUSY: 1773 /* 1774 * There are no other replicas of this device. 1775 */ 1776 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 1777 break; 1778 1779 default: 1780 (void) zpool_standard_error(hdl, errno, msg); 1781 } 1782 1783 return (-1); 1784 } 1785 1786 /* 1787 * Remove the given device. Currently, this is supported only for hot spares 1788 * and level 2 cache devices. 1789 */ 1790 int 1791 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 1792 { 1793 zfs_cmd_t zc = { 0 }; 1794 char msg[1024]; 1795 nvlist_t *tgt; 1796 boolean_t avail_spare, l2cache; 1797 libzfs_handle_t *hdl = zhp->zpool_hdl; 1798 1799 (void) snprintf(msg, sizeof (msg), 1800 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 1801 1802 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1803 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == 0) 1804 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1805 1806 if (!avail_spare && !l2cache) { 1807 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1808 "only inactive hot spares or cache devices " 1809 "can be removed")); 1810 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1811 } 1812 1813 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1814 1815 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 1816 return (0); 1817 1818 return (zpool_standard_error(hdl, errno, msg)); 1819 } 1820 1821 /* 1822 * Clear the errors for the pool, or the particular device if specified. 1823 */ 1824 int 1825 zpool_clear(zpool_handle_t *zhp, const char *path) 1826 { 1827 zfs_cmd_t zc = { 0 }; 1828 char msg[1024]; 1829 nvlist_t *tgt; 1830 boolean_t avail_spare, l2cache; 1831 libzfs_handle_t *hdl = zhp->zpool_hdl; 1832 1833 if (path) 1834 (void) snprintf(msg, sizeof (msg), 1835 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 1836 path); 1837 else 1838 (void) snprintf(msg, sizeof (msg), 1839 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 1840 zhp->zpool_name); 1841 1842 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1843 if (path) { 1844 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 1845 &l2cache)) == 0) 1846 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1847 1848 /* 1849 * Don't allow error clearing for hot spares. Do allow 1850 * error clearing for l2cache devices. 1851 */ 1852 if (avail_spare) 1853 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1854 1855 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 1856 &zc.zc_guid) == 0); 1857 } 1858 1859 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) 1860 return (0); 1861 1862 return (zpool_standard_error(hdl, errno, msg)); 1863 } 1864 1865 /* 1866 * Similar to zpool_clear(), but takes a GUID (used by fmd). 1867 */ 1868 int 1869 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 1870 { 1871 zfs_cmd_t zc = { 0 }; 1872 char msg[1024]; 1873 libzfs_handle_t *hdl = zhp->zpool_hdl; 1874 1875 (void) snprintf(msg, sizeof (msg), 1876 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 1877 guid); 1878 1879 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1880 zc.zc_guid = guid; 1881 1882 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 1883 return (0); 1884 1885 return (zpool_standard_error(hdl, errno, msg)); 1886 } 1887 1888 /* 1889 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool> 1890 * hierarchy. 1891 */ 1892 int 1893 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *), 1894 void *data) 1895 { 1896 libzfs_handle_t *hdl = zhp->zpool_hdl; 1897 char (*paths)[MAXPATHLEN]; 1898 size_t size = 4; 1899 int curr, fd, base, ret = 0; 1900 DIR *dirp; 1901 struct dirent *dp; 1902 struct stat st; 1903 1904 if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0) 1905 return (errno == ENOENT ? 0 : -1); 1906 1907 if (fstatat(base, zhp->zpool_name, &st, 0) != 0) { 1908 int err = errno; 1909 (void) close(base); 1910 return (err == ENOENT ? 0 : -1); 1911 } 1912 1913 /* 1914 * Oddly this wasn't a directory -- ignore that failure since we 1915 * know there are no links lower in the (non-existant) hierarchy. 1916 */ 1917 if (!S_ISDIR(st.st_mode)) { 1918 (void) close(base); 1919 return (0); 1920 } 1921 1922 if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) { 1923 (void) close(base); 1924 return (-1); 1925 } 1926 1927 (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0])); 1928 curr = 0; 1929 1930 while (curr >= 0) { 1931 if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0) 1932 goto err; 1933 1934 if (S_ISDIR(st.st_mode)) { 1935 if ((fd = openat(base, paths[curr], O_RDONLY)) < 0) 1936 goto err; 1937 1938 if ((dirp = fdopendir(fd)) == NULL) { 1939 (void) close(fd); 1940 goto err; 1941 } 1942 1943 while ((dp = readdir(dirp)) != NULL) { 1944 if (dp->d_name[0] == '.') 1945 continue; 1946 1947 if (curr + 1 == size) { 1948 paths = zfs_realloc(hdl, paths, 1949 size * sizeof (paths[0]), 1950 size * 2 * sizeof (paths[0])); 1951 if (paths == NULL) { 1952 (void) closedir(dirp); 1953 (void) close(fd); 1954 goto err; 1955 } 1956 1957 size *= 2; 1958 } 1959 1960 (void) strlcpy(paths[curr + 1], paths[curr], 1961 sizeof (paths[curr + 1])); 1962 (void) strlcat(paths[curr], "/", 1963 sizeof (paths[curr])); 1964 (void) strlcat(paths[curr], dp->d_name, 1965 sizeof (paths[curr])); 1966 curr++; 1967 } 1968 1969 (void) closedir(dirp); 1970 1971 } else { 1972 if ((ret = cb(paths[curr], data)) != 0) 1973 break; 1974 } 1975 1976 curr--; 1977 } 1978 1979 free(paths); 1980 (void) close(base); 1981 1982 return (ret); 1983 1984 err: 1985 free(paths); 1986 (void) close(base); 1987 return (-1); 1988 } 1989 1990 typedef struct zvol_cb { 1991 zpool_handle_t *zcb_pool; 1992 boolean_t zcb_create; 1993 } zvol_cb_t; 1994 1995 /*ARGSUSED*/ 1996 static int 1997 do_zvol_create(zfs_handle_t *zhp, void *data) 1998 { 1999 int ret = 0; 2000 2001 if (ZFS_IS_VOLUME(zhp)) { 2002 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name); 2003 ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL); 2004 } 2005 2006 if (ret == 0) 2007 ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL); 2008 2009 zfs_close(zhp); 2010 2011 return (ret); 2012 } 2013 2014 /* 2015 * Iterate over all zvols in the pool and make any necessary minor nodes. 2016 */ 2017 int 2018 zpool_create_zvol_links(zpool_handle_t *zhp) 2019 { 2020 zfs_handle_t *zfp; 2021 int ret; 2022 2023 /* 2024 * If the pool is unavailable, just return success. 2025 */ 2026 if ((zfp = make_dataset_handle(zhp->zpool_hdl, 2027 zhp->zpool_name)) == NULL) 2028 return (0); 2029 2030 ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL); 2031 2032 zfs_close(zfp); 2033 return (ret); 2034 } 2035 2036 static int 2037 do_zvol_remove(const char *dataset, void *data) 2038 { 2039 zpool_handle_t *zhp = data; 2040 2041 return (zvol_remove_link(zhp->zpool_hdl, dataset)); 2042 } 2043 2044 /* 2045 * Iterate over all zvols in the pool and remove any minor nodes. We iterate 2046 * by examining the /dev links so that a corrupted pool doesn't impede this 2047 * operation. 2048 */ 2049 int 2050 zpool_remove_zvol_links(zpool_handle_t *zhp) 2051 { 2052 return (zpool_iter_zvol(zhp, do_zvol_remove, zhp)); 2053 } 2054 2055 /* 2056 * Convert from a devid string to a path. 2057 */ 2058 static char * 2059 devid_to_path(char *devid_str) 2060 { 2061 ddi_devid_t devid; 2062 char *minor; 2063 char *path; 2064 devid_nmlist_t *list = NULL; 2065 int ret; 2066 2067 if (devid_str_decode(devid_str, &devid, &minor) != 0) 2068 return (NULL); 2069 2070 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 2071 2072 devid_str_free(minor); 2073 devid_free(devid); 2074 2075 if (ret != 0) 2076 return (NULL); 2077 2078 if ((path = strdup(list[0].devname)) == NULL) 2079 return (NULL); 2080 2081 devid_free_nmlist(list); 2082 2083 return (path); 2084 } 2085 2086 /* 2087 * Convert from a path to a devid string. 2088 */ 2089 static char * 2090 path_to_devid(const char *path) 2091 { 2092 int fd; 2093 ddi_devid_t devid; 2094 char *minor, *ret; 2095 2096 if ((fd = open(path, O_RDONLY)) < 0) 2097 return (NULL); 2098 2099 minor = NULL; 2100 ret = NULL; 2101 if (devid_get(fd, &devid) == 0) { 2102 if (devid_get_minor_name(fd, &minor) == 0) 2103 ret = devid_str_encode(devid, minor); 2104 if (minor != NULL) 2105 devid_str_free(minor); 2106 devid_free(devid); 2107 } 2108 (void) close(fd); 2109 2110 return (ret); 2111 } 2112 2113 /* 2114 * Issue the necessary ioctl() to update the stored path value for the vdev. We 2115 * ignore any failure here, since a common case is for an unprivileged user to 2116 * type 'zpool status', and we'll display the correct information anyway. 2117 */ 2118 static void 2119 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 2120 { 2121 zfs_cmd_t zc = { 0 }; 2122 2123 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2124 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 2125 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2126 &zc.zc_guid) == 0); 2127 2128 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 2129 } 2130 2131 /* 2132 * Given a vdev, return the name to display in iostat. If the vdev has a path, 2133 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 2134 * We also check if this is a whole disk, in which case we strip off the 2135 * trailing 's0' slice name. 2136 * 2137 * This routine is also responsible for identifying when disks have been 2138 * reconfigured in a new location. The kernel will have opened the device by 2139 * devid, but the path will still refer to the old location. To catch this, we 2140 * first do a path -> devid translation (which is fast for the common case). If 2141 * the devid matches, we're done. If not, we do a reverse devid -> path 2142 * translation and issue the appropriate ioctl() to update the path of the vdev. 2143 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 2144 * of these checks. 2145 */ 2146 char * 2147 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv) 2148 { 2149 char *path, *devid; 2150 uint64_t value; 2151 char buf[64]; 2152 vdev_stat_t *vs; 2153 uint_t vsc; 2154 2155 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 2156 &value) == 0) { 2157 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2158 &value) == 0); 2159 (void) snprintf(buf, sizeof (buf), "%llu", 2160 (u_longlong_t)value); 2161 path = buf; 2162 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 2163 2164 /* 2165 * If the device is dead (faulted, offline, etc) then don't 2166 * bother opening it. Otherwise we may be forcing the user to 2167 * open a misbehaving device, which can have undesirable 2168 * effects. 2169 */ 2170 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, 2171 (uint64_t **)&vs, &vsc) != 0 || 2172 vs->vs_state >= VDEV_STATE_DEGRADED) && 2173 zhp != NULL && 2174 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 2175 /* 2176 * Determine if the current path is correct. 2177 */ 2178 char *newdevid = path_to_devid(path); 2179 2180 if (newdevid == NULL || 2181 strcmp(devid, newdevid) != 0) { 2182 char *newpath; 2183 2184 if ((newpath = devid_to_path(devid)) != NULL) { 2185 /* 2186 * Update the path appropriately. 2187 */ 2188 set_path(zhp, nv, newpath); 2189 if (nvlist_add_string(nv, 2190 ZPOOL_CONFIG_PATH, newpath) == 0) 2191 verify(nvlist_lookup_string(nv, 2192 ZPOOL_CONFIG_PATH, 2193 &path) == 0); 2194 free(newpath); 2195 } 2196 } 2197 2198 if (newdevid) 2199 devid_str_free(newdevid); 2200 } 2201 2202 if (strncmp(path, "/dev/dsk/", 9) == 0) 2203 path += 9; 2204 2205 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2206 &value) == 0 && value) { 2207 char *tmp = zfs_strdup(hdl, path); 2208 if (tmp == NULL) 2209 return (NULL); 2210 tmp[strlen(path) - 2] = '\0'; 2211 return (tmp); 2212 } 2213 } else { 2214 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 2215 2216 /* 2217 * If it's a raidz device, we need to stick in the parity level. 2218 */ 2219 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 2220 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 2221 &value) == 0); 2222 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 2223 (u_longlong_t)value); 2224 path = buf; 2225 } 2226 } 2227 2228 return (zfs_strdup(hdl, path)); 2229 } 2230 2231 static int 2232 zbookmark_compare(const void *a, const void *b) 2233 { 2234 return (memcmp(a, b, sizeof (zbookmark_t))); 2235 } 2236 2237 /* 2238 * Retrieve the persistent error log, uniquify the members, and return to the 2239 * caller. 2240 */ 2241 int 2242 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 2243 { 2244 zfs_cmd_t zc = { 0 }; 2245 uint64_t count; 2246 zbookmark_t *zb = NULL; 2247 int i; 2248 2249 /* 2250 * Retrieve the raw error list from the kernel. If the number of errors 2251 * has increased, allocate more space and continue until we get the 2252 * entire list. 2253 */ 2254 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 2255 &count) == 0); 2256 if (count == 0) 2257 return (0); 2258 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 2259 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 2260 return (-1); 2261 zc.zc_nvlist_dst_size = count; 2262 (void) strcpy(zc.zc_name, zhp->zpool_name); 2263 for (;;) { 2264 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 2265 &zc) != 0) { 2266 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2267 if (errno == ENOMEM) { 2268 count = zc.zc_nvlist_dst_size; 2269 if ((zc.zc_nvlist_dst = (uintptr_t) 2270 zfs_alloc(zhp->zpool_hdl, count * 2271 sizeof (zbookmark_t))) == (uintptr_t)NULL) 2272 return (-1); 2273 } else { 2274 return (-1); 2275 } 2276 } else { 2277 break; 2278 } 2279 } 2280 2281 /* 2282 * Sort the resulting bookmarks. This is a little confusing due to the 2283 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 2284 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 2285 * _not_ copied as part of the process. So we point the start of our 2286 * array appropriate and decrement the total number of elements. 2287 */ 2288 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 2289 zc.zc_nvlist_dst_size; 2290 count -= zc.zc_nvlist_dst_size; 2291 2292 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 2293 2294 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 2295 2296 /* 2297 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 2298 */ 2299 for (i = 0; i < count; i++) { 2300 nvlist_t *nv; 2301 2302 /* ignoring zb_blkid and zb_level for now */ 2303 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 2304 zb[i-1].zb_object == zb[i].zb_object) 2305 continue; 2306 2307 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 2308 goto nomem; 2309 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 2310 zb[i].zb_objset) != 0) { 2311 nvlist_free(nv); 2312 goto nomem; 2313 } 2314 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 2315 zb[i].zb_object) != 0) { 2316 nvlist_free(nv); 2317 goto nomem; 2318 } 2319 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 2320 nvlist_free(nv); 2321 goto nomem; 2322 } 2323 nvlist_free(nv); 2324 } 2325 2326 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2327 return (0); 2328 2329 nomem: 2330 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2331 return (no_memory(zhp->zpool_hdl)); 2332 } 2333 2334 /* 2335 * Upgrade a ZFS pool to the latest on-disk version. 2336 */ 2337 int 2338 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 2339 { 2340 zfs_cmd_t zc = { 0 }; 2341 libzfs_handle_t *hdl = zhp->zpool_hdl; 2342 2343 (void) strcpy(zc.zc_name, zhp->zpool_name); 2344 zc.zc_cookie = new_version; 2345 2346 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 2347 return (zpool_standard_error_fmt(hdl, errno, 2348 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 2349 zhp->zpool_name)); 2350 return (0); 2351 } 2352 2353 void 2354 zpool_set_history_str(const char *subcommand, int argc, char **argv, 2355 char *history_str) 2356 { 2357 int i; 2358 2359 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); 2360 for (i = 1; i < argc; i++) { 2361 if (strlen(history_str) + 1 + strlen(argv[i]) > 2362 HIS_MAX_RECORD_LEN) 2363 break; 2364 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); 2365 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); 2366 } 2367 } 2368 2369 /* 2370 * Stage command history for logging. 2371 */ 2372 int 2373 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) 2374 { 2375 if (history_str == NULL) 2376 return (EINVAL); 2377 2378 if (strlen(history_str) > HIS_MAX_RECORD_LEN) 2379 return (EINVAL); 2380 2381 if (hdl->libzfs_log_str != NULL) 2382 free(hdl->libzfs_log_str); 2383 2384 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) 2385 return (no_memory(hdl)); 2386 2387 return (0); 2388 } 2389 2390 /* 2391 * Perform ioctl to get some command history of a pool. 2392 * 2393 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 2394 * logical offset of the history buffer to start reading from. 2395 * 2396 * Upon return, 'off' is the next logical offset to read from and 2397 * 'len' is the actual amount of bytes read into 'buf'. 2398 */ 2399 static int 2400 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 2401 { 2402 zfs_cmd_t zc = { 0 }; 2403 libzfs_handle_t *hdl = zhp->zpool_hdl; 2404 2405 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2406 2407 zc.zc_history = (uint64_t)(uintptr_t)buf; 2408 zc.zc_history_len = *len; 2409 zc.zc_history_offset = *off; 2410 2411 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 2412 switch (errno) { 2413 case EPERM: 2414 return (zfs_error_fmt(hdl, EZFS_PERM, 2415 dgettext(TEXT_DOMAIN, 2416 "cannot show history for pool '%s'"), 2417 zhp->zpool_name)); 2418 case ENOENT: 2419 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 2420 dgettext(TEXT_DOMAIN, "cannot get history for pool " 2421 "'%s'"), zhp->zpool_name)); 2422 case ENOTSUP: 2423 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 2424 dgettext(TEXT_DOMAIN, "cannot get history for pool " 2425 "'%s', pool must be upgraded"), zhp->zpool_name)); 2426 default: 2427 return (zpool_standard_error_fmt(hdl, errno, 2428 dgettext(TEXT_DOMAIN, 2429 "cannot get history for '%s'"), zhp->zpool_name)); 2430 } 2431 } 2432 2433 *len = zc.zc_history_len; 2434 *off = zc.zc_history_offset; 2435 2436 return (0); 2437 } 2438 2439 /* 2440 * Process the buffer of nvlists, unpacking and storing each nvlist record 2441 * into 'records'. 'leftover' is set to the number of bytes that weren't 2442 * processed as there wasn't a complete record. 2443 */ 2444 static int 2445 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 2446 nvlist_t ***records, uint_t *numrecords) 2447 { 2448 uint64_t reclen; 2449 nvlist_t *nv; 2450 int i; 2451 2452 while (bytes_read > sizeof (reclen)) { 2453 2454 /* get length of packed record (stored as little endian) */ 2455 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 2456 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 2457 2458 if (bytes_read < sizeof (reclen) + reclen) 2459 break; 2460 2461 /* unpack record */ 2462 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 2463 return (ENOMEM); 2464 bytes_read -= sizeof (reclen) + reclen; 2465 buf += sizeof (reclen) + reclen; 2466 2467 /* add record to nvlist array */ 2468 (*numrecords)++; 2469 if (ISP2(*numrecords + 1)) { 2470 *records = realloc(*records, 2471 *numrecords * 2 * sizeof (nvlist_t *)); 2472 } 2473 (*records)[*numrecords - 1] = nv; 2474 } 2475 2476 *leftover = bytes_read; 2477 return (0); 2478 } 2479 2480 #define HIS_BUF_LEN (128*1024) 2481 2482 /* 2483 * Retrieve the command history of a pool. 2484 */ 2485 int 2486 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 2487 { 2488 char buf[HIS_BUF_LEN]; 2489 uint64_t off = 0; 2490 nvlist_t **records = NULL; 2491 uint_t numrecords = 0; 2492 int err, i; 2493 2494 do { 2495 uint64_t bytes_read = sizeof (buf); 2496 uint64_t leftover; 2497 2498 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 2499 break; 2500 2501 /* if nothing else was read in, we're at EOF, just return */ 2502 if (!bytes_read) 2503 break; 2504 2505 if ((err = zpool_history_unpack(buf, bytes_read, 2506 &leftover, &records, &numrecords)) != 0) 2507 break; 2508 off -= leftover; 2509 2510 /* CONSTCOND */ 2511 } while (1); 2512 2513 if (!err) { 2514 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 2515 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 2516 records, numrecords) == 0); 2517 } 2518 for (i = 0; i < numrecords; i++) 2519 nvlist_free(records[i]); 2520 free(records); 2521 2522 return (err); 2523 } 2524 2525 void 2526 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 2527 char *pathname, size_t len) 2528 { 2529 zfs_cmd_t zc = { 0 }; 2530 boolean_t mounted = B_FALSE; 2531 char *mntpnt = NULL; 2532 char dsname[MAXNAMELEN]; 2533 2534 if (dsobj == 0) { 2535 /* special case for the MOS */ 2536 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 2537 return; 2538 } 2539 2540 /* get the dataset's name */ 2541 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2542 zc.zc_obj = dsobj; 2543 if (ioctl(zhp->zpool_hdl->libzfs_fd, 2544 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 2545 /* just write out a path of two object numbers */ 2546 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 2547 dsobj, obj); 2548 return; 2549 } 2550 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 2551 2552 /* find out if the dataset is mounted */ 2553 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 2554 2555 /* get the corrupted object's path */ 2556 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 2557 zc.zc_obj = obj; 2558 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 2559 &zc) == 0) { 2560 if (mounted) { 2561 (void) snprintf(pathname, len, "%s%s", mntpnt, 2562 zc.zc_value); 2563 } else { 2564 (void) snprintf(pathname, len, "%s:%s", 2565 dsname, zc.zc_value); 2566 } 2567 } else { 2568 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 2569 } 2570 free(mntpnt); 2571 } 2572 2573 #define RDISK_ROOT "/dev/rdsk" 2574 #define BACKUP_SLICE "s2" 2575 /* 2576 * Don't start the slice at the default block of 34; many storage 2577 * devices will use a stripe width of 128k, so start there instead. 2578 */ 2579 #define NEW_START_BLOCK 256 2580 2581 /* 2582 * Read the EFI label from the config, if a label does not exist then 2583 * pass back the error to the caller. If the caller has passed a non-NULL 2584 * diskaddr argument then we set it to the starting address of the EFI 2585 * partition. 2586 */ 2587 static int 2588 read_efi_label(nvlist_t *config, diskaddr_t *sb) 2589 { 2590 char *path; 2591 int fd; 2592 char diskname[MAXPATHLEN]; 2593 int err = -1; 2594 2595 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 2596 return (err); 2597 2598 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 2599 strrchr(path, '/')); 2600 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 2601 struct dk_gpt *vtoc; 2602 2603 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 2604 if (sb != NULL) 2605 *sb = vtoc->efi_parts[0].p_start; 2606 efi_free(vtoc); 2607 } 2608 (void) close(fd); 2609 } 2610 return (err); 2611 } 2612 2613 /* 2614 * determine where a partition starts on a disk in the current 2615 * configuration 2616 */ 2617 static diskaddr_t 2618 find_start_block(nvlist_t *config) 2619 { 2620 nvlist_t **child; 2621 uint_t c, children; 2622 diskaddr_t sb = MAXOFFSET_T; 2623 uint64_t wholedisk; 2624 2625 if (nvlist_lookup_nvlist_array(config, 2626 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 2627 if (nvlist_lookup_uint64(config, 2628 ZPOOL_CONFIG_WHOLE_DISK, 2629 &wholedisk) != 0 || !wholedisk) { 2630 return (MAXOFFSET_T); 2631 } 2632 if (read_efi_label(config, &sb) < 0) 2633 sb = MAXOFFSET_T; 2634 return (sb); 2635 } 2636 2637 for (c = 0; c < children; c++) { 2638 sb = find_start_block(child[c]); 2639 if (sb != MAXOFFSET_T) { 2640 return (sb); 2641 } 2642 } 2643 return (MAXOFFSET_T); 2644 } 2645 2646 /* 2647 * Label an individual disk. The name provided is the short name, 2648 * stripped of any leading /dev path. 2649 */ 2650 int 2651 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) 2652 { 2653 char path[MAXPATHLEN]; 2654 struct dk_gpt *vtoc; 2655 int fd; 2656 size_t resv = EFI_MIN_RESV_SIZE; 2657 uint64_t slice_size; 2658 diskaddr_t start_block; 2659 char errbuf[1024]; 2660 2661 /* prepare an error message just in case */ 2662 (void) snprintf(errbuf, sizeof (errbuf), 2663 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 2664 2665 if (zhp) { 2666 nvlist_t *nvroot; 2667 2668 verify(nvlist_lookup_nvlist(zhp->zpool_config, 2669 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 2670 2671 if (zhp->zpool_start_block == 0) 2672 start_block = find_start_block(nvroot); 2673 else 2674 start_block = zhp->zpool_start_block; 2675 zhp->zpool_start_block = start_block; 2676 } else { 2677 /* new pool */ 2678 start_block = NEW_START_BLOCK; 2679 } 2680 2681 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 2682 BACKUP_SLICE); 2683 2684 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2685 /* 2686 * This shouldn't happen. We've long since verified that this 2687 * is a valid device. 2688 */ 2689 zfs_error_aux(hdl, 2690 dgettext(TEXT_DOMAIN, "unable to open device")); 2691 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2692 } 2693 2694 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 2695 /* 2696 * The only way this can fail is if we run out of memory, or we 2697 * were unable to read the disk's capacity 2698 */ 2699 if (errno == ENOMEM) 2700 (void) no_memory(hdl); 2701 2702 (void) close(fd); 2703 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2704 "unable to read disk capacity"), name); 2705 2706 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2707 } 2708 2709 slice_size = vtoc->efi_last_u_lba + 1; 2710 slice_size -= EFI_MIN_RESV_SIZE; 2711 if (start_block == MAXOFFSET_T) 2712 start_block = NEW_START_BLOCK; 2713 slice_size -= start_block; 2714 2715 vtoc->efi_parts[0].p_start = start_block; 2716 vtoc->efi_parts[0].p_size = slice_size; 2717 2718 /* 2719 * Why we use V_USR: V_BACKUP confuses users, and is considered 2720 * disposable by some EFI utilities (since EFI doesn't have a backup 2721 * slice). V_UNASSIGNED is supposed to be used only for zero size 2722 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 2723 * etc. were all pretty specific. V_USR is as close to reality as we 2724 * can get, in the absence of V_OTHER. 2725 */ 2726 vtoc->efi_parts[0].p_tag = V_USR; 2727 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 2728 2729 vtoc->efi_parts[8].p_start = slice_size + start_block; 2730 vtoc->efi_parts[8].p_size = resv; 2731 vtoc->efi_parts[8].p_tag = V_RESERVED; 2732 2733 if (efi_write(fd, vtoc) != 0) { 2734 /* 2735 * Some block drivers (like pcata) may not support EFI 2736 * GPT labels. Print out a helpful error message dir- 2737 * ecting the user to manually label the disk and give 2738 * a specific slice. 2739 */ 2740 (void) close(fd); 2741 efi_free(vtoc); 2742 2743 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2744 "try using fdisk(1M) and then provide a specific slice")); 2745 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 2746 } 2747 2748 (void) close(fd); 2749 efi_free(vtoc); 2750 return (0); 2751 } 2752 2753 static boolean_t 2754 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 2755 { 2756 char *type; 2757 nvlist_t **child; 2758 uint_t children, c; 2759 2760 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 2761 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2762 strcmp(type, VDEV_TYPE_FILE) == 0 || 2763 strcmp(type, VDEV_TYPE_LOG) == 0 || 2764 strcmp(type, VDEV_TYPE_MISSING) == 0) { 2765 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2766 "vdev type '%s' is not supported"), type); 2767 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 2768 return (B_FALSE); 2769 } 2770 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 2771 &child, &children) == 0) { 2772 for (c = 0; c < children; c++) { 2773 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 2774 return (B_FALSE); 2775 } 2776 } 2777 return (B_TRUE); 2778 } 2779 2780 /* 2781 * check if this zvol is allowable for use as a dump device; zero if 2782 * it is, > 0 if it isn't, < 0 if it isn't a zvol 2783 */ 2784 int 2785 zvol_check_dump_config(char *arg) 2786 { 2787 zpool_handle_t *zhp = NULL; 2788 nvlist_t *config, *nvroot; 2789 char *p, *volname; 2790 nvlist_t **top; 2791 uint_t toplevels; 2792 libzfs_handle_t *hdl; 2793 char errbuf[1024]; 2794 char poolname[ZPOOL_MAXNAMELEN]; 2795 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 2796 int ret = 1; 2797 2798 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 2799 return (-1); 2800 } 2801 2802 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 2803 "dump is not supported on device '%s'"), arg); 2804 2805 if ((hdl = libzfs_init()) == NULL) 2806 return (1); 2807 libzfs_print_on_error(hdl, B_TRUE); 2808 2809 volname = arg + pathlen; 2810 2811 /* check the configuration of the pool */ 2812 if ((p = strchr(volname, '/')) == NULL) { 2813 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2814 "malformed dataset name")); 2815 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 2816 return (1); 2817 } else if (p - volname >= ZFS_MAXNAMELEN) { 2818 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2819 "dataset name is too long")); 2820 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 2821 return (1); 2822 } else { 2823 (void) strncpy(poolname, volname, p - volname); 2824 poolname[p - volname] = '\0'; 2825 } 2826 2827 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 2828 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2829 "could not open pool '%s'"), poolname); 2830 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 2831 goto out; 2832 } 2833 config = zpool_get_config(zhp, NULL); 2834 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2835 &nvroot) != 0) { 2836 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2837 "could not obtain vdev configuration for '%s'"), poolname); 2838 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 2839 goto out; 2840 } 2841 2842 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2843 &top, &toplevels) == 0); 2844 if (toplevels != 1) { 2845 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2846 "'%s' has multiple top level vdevs"), poolname); 2847 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 2848 goto out; 2849 } 2850 2851 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 2852 goto out; 2853 } 2854 ret = 0; 2855 2856 out: 2857 if (zhp) 2858 zpool_close(zhp); 2859 libzfs_fini(hdl); 2860 return (ret); 2861 } 2862