1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <alloca.h> 28 #include <assert.h> 29 #include <ctype.h> 30 #include <errno.h> 31 #include <devid.h> 32 #include <dirent.h> 33 #include <fcntl.h> 34 #include <libintl.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <strings.h> 38 #include <unistd.h> 39 #include <zone.h> 40 #include <sys/efi_partition.h> 41 #include <sys/vtoc.h> 42 #include <sys/zfs_ioctl.h> 43 #include <sys/zio.h> 44 #include <strings.h> 45 46 #include "zfs_namecheck.h" 47 #include "zfs_prop.h" 48 #include "libzfs_impl.h" 49 50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52 #if defined(__i386) || defined(__amd64) 53 #define BOOTCMD "installgrub(1M)" 54 #else 55 #define BOOTCMD "installboot(1M)" 56 #endif 57 58 /* 59 * ==================================================================== 60 * zpool property functions 61 * ==================================================================== 62 */ 63 64 static int 65 zpool_get_all_props(zpool_handle_t *zhp) 66 { 67 zfs_cmd_t zc = { 0 }; 68 libzfs_handle_t *hdl = zhp->zpool_hdl; 69 70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 71 72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 73 return (-1); 74 75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 76 if (errno == ENOMEM) { 77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 78 zcmd_free_nvlists(&zc); 79 return (-1); 80 } 81 } else { 82 zcmd_free_nvlists(&zc); 83 return (-1); 84 } 85 } 86 87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 88 zcmd_free_nvlists(&zc); 89 return (-1); 90 } 91 92 zcmd_free_nvlists(&zc); 93 94 return (0); 95 } 96 97 static int 98 zpool_props_refresh(zpool_handle_t *zhp) 99 { 100 nvlist_t *old_props; 101 102 old_props = zhp->zpool_props; 103 104 if (zpool_get_all_props(zhp) != 0) 105 return (-1); 106 107 nvlist_free(old_props); 108 return (0); 109 } 110 111 static char * 112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 113 zprop_source_t *src) 114 { 115 nvlist_t *nv, *nvl; 116 uint64_t ival; 117 char *value; 118 zprop_source_t source; 119 120 nvl = zhp->zpool_props; 121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 123 source = ival; 124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 125 } else { 126 source = ZPROP_SRC_DEFAULT; 127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 128 value = "-"; 129 } 130 131 if (src) 132 *src = source; 133 134 return (value); 135 } 136 137 uint64_t 138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 139 { 140 nvlist_t *nv, *nvl; 141 uint64_t value; 142 zprop_source_t source; 143 144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 145 /* 146 * zpool_get_all_props() has most likely failed because 147 * the pool is faulted, but if all we need is the top level 148 * vdev's guid then get it from the zhp config nvlist. 149 */ 150 if ((prop == ZPOOL_PROP_GUID) && 151 (nvlist_lookup_nvlist(zhp->zpool_config, 152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 154 == 0)) { 155 return (value); 156 } 157 return (zpool_prop_default_numeric(prop)); 158 } 159 160 nvl = zhp->zpool_props; 161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 163 source = value; 164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 165 } else { 166 source = ZPROP_SRC_DEFAULT; 167 value = zpool_prop_default_numeric(prop); 168 } 169 170 if (src) 171 *src = source; 172 173 return (value); 174 } 175 176 /* 177 * Map VDEV STATE to printed strings. 178 */ 179 char * 180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 181 { 182 switch (state) { 183 case VDEV_STATE_CLOSED: 184 case VDEV_STATE_OFFLINE: 185 return (gettext("OFFLINE")); 186 case VDEV_STATE_REMOVED: 187 return (gettext("REMOVED")); 188 case VDEV_STATE_CANT_OPEN: 189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 190 return (gettext("FAULTED")); 191 else 192 return (gettext("UNAVAIL")); 193 case VDEV_STATE_FAULTED: 194 return (gettext("FAULTED")); 195 case VDEV_STATE_DEGRADED: 196 return (gettext("DEGRADED")); 197 case VDEV_STATE_HEALTHY: 198 return (gettext("ONLINE")); 199 } 200 201 return (gettext("UNKNOWN")); 202 } 203 204 /* 205 * Get a zpool property value for 'prop' and return the value in 206 * a pre-allocated buffer. 207 */ 208 int 209 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 210 zprop_source_t *srctype) 211 { 212 uint64_t intval; 213 const char *strval; 214 zprop_source_t src = ZPROP_SRC_NONE; 215 nvlist_t *nvroot; 216 vdev_stat_t *vs; 217 uint_t vsc; 218 219 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 220 switch (prop) { 221 case ZPOOL_PROP_NAME: 222 (void) strlcpy(buf, zpool_get_name(zhp), len); 223 break; 224 225 case ZPOOL_PROP_HEALTH: 226 (void) strlcpy(buf, "FAULTED", len); 227 break; 228 229 case ZPOOL_PROP_GUID: 230 intval = zpool_get_prop_int(zhp, prop, &src); 231 (void) snprintf(buf, len, "%llu", intval); 232 break; 233 234 case ZPOOL_PROP_ALTROOT: 235 case ZPOOL_PROP_CACHEFILE: 236 if (zhp->zpool_props != NULL || 237 zpool_get_all_props(zhp) == 0) { 238 (void) strlcpy(buf, 239 zpool_get_prop_string(zhp, prop, &src), 240 len); 241 if (srctype != NULL) 242 *srctype = src; 243 return (0); 244 } 245 /* FALLTHROUGH */ 246 default: 247 (void) strlcpy(buf, "-", len); 248 break; 249 } 250 251 if (srctype != NULL) 252 *srctype = src; 253 return (0); 254 } 255 256 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 257 prop != ZPOOL_PROP_NAME) 258 return (-1); 259 260 switch (zpool_prop_get_type(prop)) { 261 case PROP_TYPE_STRING: 262 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 263 len); 264 break; 265 266 case PROP_TYPE_NUMBER: 267 intval = zpool_get_prop_int(zhp, prop, &src); 268 269 switch (prop) { 270 case ZPOOL_PROP_SIZE: 271 case ZPOOL_PROP_USED: 272 case ZPOOL_PROP_AVAILABLE: 273 (void) zfs_nicenum(intval, buf, len); 274 break; 275 276 case ZPOOL_PROP_CAPACITY: 277 (void) snprintf(buf, len, "%llu%%", 278 (u_longlong_t)intval); 279 break; 280 281 case ZPOOL_PROP_HEALTH: 282 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 283 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 284 verify(nvlist_lookup_uint64_array(nvroot, 285 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 286 287 (void) strlcpy(buf, zpool_state_to_name(intval, 288 vs->vs_aux), len); 289 break; 290 default: 291 (void) snprintf(buf, len, "%llu", intval); 292 } 293 break; 294 295 case PROP_TYPE_INDEX: 296 intval = zpool_get_prop_int(zhp, prop, &src); 297 if (zpool_prop_index_to_string(prop, intval, &strval) 298 != 0) 299 return (-1); 300 (void) strlcpy(buf, strval, len); 301 break; 302 303 default: 304 abort(); 305 } 306 307 if (srctype) 308 *srctype = src; 309 310 return (0); 311 } 312 313 /* 314 * Check if the bootfs name has the same pool name as it is set to. 315 * Assuming bootfs is a valid dataset name. 316 */ 317 static boolean_t 318 bootfs_name_valid(const char *pool, char *bootfs) 319 { 320 int len = strlen(pool); 321 322 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 323 return (B_FALSE); 324 325 if (strncmp(pool, bootfs, len) == 0 && 326 (bootfs[len] == '/' || bootfs[len] == '\0')) 327 return (B_TRUE); 328 329 return (B_FALSE); 330 } 331 332 /* 333 * Inspect the configuration to determine if any of the devices contain 334 * an EFI label. 335 */ 336 static boolean_t 337 pool_uses_efi(nvlist_t *config) 338 { 339 nvlist_t **child; 340 uint_t c, children; 341 342 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 343 &child, &children) != 0) 344 return (read_efi_label(config, NULL) >= 0); 345 346 for (c = 0; c < children; c++) { 347 if (pool_uses_efi(child[c])) 348 return (B_TRUE); 349 } 350 return (B_FALSE); 351 } 352 353 static boolean_t 354 pool_is_bootable(zpool_handle_t *zhp) 355 { 356 char bootfs[ZPOOL_MAXNAMELEN]; 357 358 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 359 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", 360 sizeof (bootfs)) != 0); 361 } 362 363 364 /* 365 * Given an nvlist of zpool properties to be set, validate that they are 366 * correct, and parse any numeric properties (index, boolean, etc) if they are 367 * specified as strings. 368 */ 369 static nvlist_t * 370 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 371 nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf) 372 { 373 nvpair_t *elem; 374 nvlist_t *retprops; 375 zpool_prop_t prop; 376 char *strval; 377 uint64_t intval; 378 char *slash; 379 struct stat64 statbuf; 380 zpool_handle_t *zhp; 381 nvlist_t *nvroot; 382 383 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 384 (void) no_memory(hdl); 385 return (NULL); 386 } 387 388 elem = NULL; 389 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 390 const char *propname = nvpair_name(elem); 391 392 /* 393 * Make sure this property is valid and applies to this type. 394 */ 395 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { 396 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 397 "invalid property '%s'"), propname); 398 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 399 goto error; 400 } 401 402 if (zpool_prop_readonly(prop)) { 403 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 404 "is readonly"), propname); 405 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 406 goto error; 407 } 408 409 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 410 &strval, &intval, errbuf) != 0) 411 goto error; 412 413 /* 414 * Perform additional checking for specific properties. 415 */ 416 switch (prop) { 417 case ZPOOL_PROP_VERSION: 418 if (intval < version || intval > SPA_VERSION) { 419 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 420 "property '%s' number %d is invalid."), 421 propname, intval); 422 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 423 goto error; 424 } 425 break; 426 427 case ZPOOL_PROP_BOOTFS: 428 if (create_or_import) { 429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 430 "property '%s' cannot be set at creation " 431 "or import time"), propname); 432 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 433 goto error; 434 } 435 436 if (version < SPA_VERSION_BOOTFS) { 437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 438 "pool must be upgraded to support " 439 "'%s' property"), propname); 440 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 441 goto error; 442 } 443 444 /* 445 * bootfs property value has to be a dataset name and 446 * the dataset has to be in the same pool as it sets to. 447 */ 448 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 449 strval)) { 450 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 451 "is an invalid name"), strval); 452 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 453 goto error; 454 } 455 456 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 458 "could not open pool '%s'"), poolname); 459 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 460 goto error; 461 } 462 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 463 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 464 465 /* 466 * bootfs property cannot be set on a disk which has 467 * been EFI labeled. 468 */ 469 if (pool_uses_efi(nvroot)) { 470 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 471 "property '%s' not supported on " 472 "EFI labeled devices"), propname); 473 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 474 zpool_close(zhp); 475 goto error; 476 } 477 zpool_close(zhp); 478 break; 479 480 case ZPOOL_PROP_ALTROOT: 481 if (!create_or_import) { 482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 483 "property '%s' can only be set during pool " 484 "creation or import"), propname); 485 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 486 goto error; 487 } 488 489 if (strval[0] != '/') { 490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 491 "bad alternate root '%s'"), strval); 492 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 493 goto error; 494 } 495 break; 496 497 case ZPOOL_PROP_CACHEFILE: 498 if (strval[0] == '\0') 499 break; 500 501 if (strcmp(strval, "none") == 0) 502 break; 503 504 if (strval[0] != '/') { 505 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 506 "property '%s' must be empty, an " 507 "absolute path, or 'none'"), propname); 508 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 509 goto error; 510 } 511 512 slash = strrchr(strval, '/'); 513 514 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 515 strcmp(slash, "/..") == 0) { 516 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 517 "'%s' is not a valid file"), strval); 518 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 519 goto error; 520 } 521 522 *slash = '\0'; 523 524 if (strval[0] != '\0' && 525 (stat64(strval, &statbuf) != 0 || 526 !S_ISDIR(statbuf.st_mode))) { 527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 528 "'%s' is not a valid directory"), 529 strval); 530 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 531 goto error; 532 } 533 534 *slash = '/'; 535 break; 536 } 537 } 538 539 return (retprops); 540 error: 541 nvlist_free(retprops); 542 return (NULL); 543 } 544 545 /* 546 * Set zpool property : propname=propval. 547 */ 548 int 549 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 550 { 551 zfs_cmd_t zc = { 0 }; 552 int ret = -1; 553 char errbuf[1024]; 554 nvlist_t *nvl = NULL; 555 nvlist_t *realprops; 556 uint64_t version; 557 558 (void) snprintf(errbuf, sizeof (errbuf), 559 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 560 zhp->zpool_name); 561 562 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 563 return (no_memory(zhp->zpool_hdl)); 564 565 if (nvlist_add_string(nvl, propname, propval) != 0) { 566 nvlist_free(nvl); 567 return (no_memory(zhp->zpool_hdl)); 568 } 569 570 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 571 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 572 zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) { 573 nvlist_free(nvl); 574 return (-1); 575 } 576 577 nvlist_free(nvl); 578 nvl = realprops; 579 580 /* 581 * Execute the corresponding ioctl() to set this property. 582 */ 583 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 584 585 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 586 nvlist_free(nvl); 587 return (-1); 588 } 589 590 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 591 592 zcmd_free_nvlists(&zc); 593 nvlist_free(nvl); 594 595 if (ret) 596 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 597 else 598 (void) zpool_props_refresh(zhp); 599 600 return (ret); 601 } 602 603 int 604 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 605 { 606 libzfs_handle_t *hdl = zhp->zpool_hdl; 607 zprop_list_t *entry; 608 char buf[ZFS_MAXPROPLEN]; 609 610 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 611 return (-1); 612 613 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 614 615 if (entry->pl_fixed) 616 continue; 617 618 if (entry->pl_prop != ZPROP_INVAL && 619 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 620 NULL) == 0) { 621 if (strlen(buf) > entry->pl_width) 622 entry->pl_width = strlen(buf); 623 } 624 } 625 626 return (0); 627 } 628 629 630 /* 631 * Validate the given pool name, optionally putting an extended error message in 632 * 'buf'. 633 */ 634 boolean_t 635 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 636 { 637 namecheck_err_t why; 638 char what; 639 int ret; 640 641 ret = pool_namecheck(pool, &why, &what); 642 643 /* 644 * The rules for reserved pool names were extended at a later point. 645 * But we need to support users with existing pools that may now be 646 * invalid. So we only check for this expanded set of names during a 647 * create (or import), and only in userland. 648 */ 649 if (ret == 0 && !isopen && 650 (strncmp(pool, "mirror", 6) == 0 || 651 strncmp(pool, "raidz", 5) == 0 || 652 strncmp(pool, "spare", 5) == 0 || 653 strcmp(pool, "log") == 0)) { 654 if (hdl != NULL) 655 zfs_error_aux(hdl, 656 dgettext(TEXT_DOMAIN, "name is reserved")); 657 return (B_FALSE); 658 } 659 660 661 if (ret != 0) { 662 if (hdl != NULL) { 663 switch (why) { 664 case NAME_ERR_TOOLONG: 665 zfs_error_aux(hdl, 666 dgettext(TEXT_DOMAIN, "name is too long")); 667 break; 668 669 case NAME_ERR_INVALCHAR: 670 zfs_error_aux(hdl, 671 dgettext(TEXT_DOMAIN, "invalid character " 672 "'%c' in pool name"), what); 673 break; 674 675 case NAME_ERR_NOLETTER: 676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 677 "name must begin with a letter")); 678 break; 679 680 case NAME_ERR_RESERVED: 681 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 682 "name is reserved")); 683 break; 684 685 case NAME_ERR_DISKLIKE: 686 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 687 "pool name is reserved")); 688 break; 689 690 case NAME_ERR_LEADING_SLASH: 691 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 692 "leading slash in name")); 693 break; 694 695 case NAME_ERR_EMPTY_COMPONENT: 696 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 697 "empty component in name")); 698 break; 699 700 case NAME_ERR_TRAILING_SLASH: 701 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 702 "trailing slash in name")); 703 break; 704 705 case NAME_ERR_MULTIPLE_AT: 706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 707 "multiple '@' delimiters in name")); 708 break; 709 710 } 711 } 712 return (B_FALSE); 713 } 714 715 return (B_TRUE); 716 } 717 718 /* 719 * Open a handle to the given pool, even if the pool is currently in the FAULTED 720 * state. 721 */ 722 zpool_handle_t * 723 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 724 { 725 zpool_handle_t *zhp; 726 boolean_t missing; 727 728 /* 729 * Make sure the pool name is valid. 730 */ 731 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 732 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 733 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 734 pool); 735 return (NULL); 736 } 737 738 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 739 return (NULL); 740 741 zhp->zpool_hdl = hdl; 742 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 743 744 if (zpool_refresh_stats(zhp, &missing) != 0) { 745 zpool_close(zhp); 746 return (NULL); 747 } 748 749 if (missing) { 750 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 751 (void) zfs_error_fmt(hdl, EZFS_NOENT, 752 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 753 zpool_close(zhp); 754 return (NULL); 755 } 756 757 return (zhp); 758 } 759 760 /* 761 * Like the above, but silent on error. Used when iterating over pools (because 762 * the configuration cache may be out of date). 763 */ 764 int 765 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 766 { 767 zpool_handle_t *zhp; 768 boolean_t missing; 769 770 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 771 return (-1); 772 773 zhp->zpool_hdl = hdl; 774 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 775 776 if (zpool_refresh_stats(zhp, &missing) != 0) { 777 zpool_close(zhp); 778 return (-1); 779 } 780 781 if (missing) { 782 zpool_close(zhp); 783 *ret = NULL; 784 return (0); 785 } 786 787 *ret = zhp; 788 return (0); 789 } 790 791 /* 792 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 793 * state. 794 */ 795 zpool_handle_t * 796 zpool_open(libzfs_handle_t *hdl, const char *pool) 797 { 798 zpool_handle_t *zhp; 799 800 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 801 return (NULL); 802 803 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 804 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 805 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 806 zpool_close(zhp); 807 return (NULL); 808 } 809 810 return (zhp); 811 } 812 813 /* 814 * Close the handle. Simply frees the memory associated with the handle. 815 */ 816 void 817 zpool_close(zpool_handle_t *zhp) 818 { 819 if (zhp->zpool_config) 820 nvlist_free(zhp->zpool_config); 821 if (zhp->zpool_old_config) 822 nvlist_free(zhp->zpool_old_config); 823 if (zhp->zpool_props) 824 nvlist_free(zhp->zpool_props); 825 free(zhp); 826 } 827 828 /* 829 * Return the name of the pool. 830 */ 831 const char * 832 zpool_get_name(zpool_handle_t *zhp) 833 { 834 return (zhp->zpool_name); 835 } 836 837 838 /* 839 * Return the state of the pool (ACTIVE or UNAVAILABLE) 840 */ 841 int 842 zpool_get_state(zpool_handle_t *zhp) 843 { 844 return (zhp->zpool_state); 845 } 846 847 /* 848 * Create the named pool, using the provided vdev list. It is assumed 849 * that the consumer has already validated the contents of the nvlist, so we 850 * don't have to worry about error semantics. 851 */ 852 int 853 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 854 nvlist_t *props, nvlist_t *fsprops) 855 { 856 zfs_cmd_t zc = { 0 }; 857 nvlist_t *zc_fsprops = NULL; 858 nvlist_t *zc_props = NULL; 859 char msg[1024]; 860 char *altroot; 861 int ret = -1; 862 863 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 864 "cannot create '%s'"), pool); 865 866 if (!zpool_name_valid(hdl, B_FALSE, pool)) 867 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 868 869 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 870 return (-1); 871 872 if (props) { 873 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 874 SPA_VERSION_1, B_TRUE, msg)) == NULL) { 875 goto create_failed; 876 } 877 } 878 879 if (fsprops) { 880 uint64_t zoned; 881 char *zonestr; 882 883 zoned = ((nvlist_lookup_string(fsprops, 884 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 885 strcmp(zonestr, "on") == 0); 886 887 if ((zc_fsprops = zfs_valid_proplist(hdl, 888 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 889 goto create_failed; 890 } 891 if (!zc_props && 892 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 893 goto create_failed; 894 } 895 if (nvlist_add_nvlist(zc_props, 896 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 897 goto create_failed; 898 } 899 } 900 901 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 902 goto create_failed; 903 904 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 905 906 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 907 908 zcmd_free_nvlists(&zc); 909 nvlist_free(zc_props); 910 nvlist_free(zc_fsprops); 911 912 switch (errno) { 913 case EBUSY: 914 /* 915 * This can happen if the user has specified the same 916 * device multiple times. We can't reliably detect this 917 * until we try to add it and see we already have a 918 * label. 919 */ 920 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 921 "one or more vdevs refer to the same device")); 922 return (zfs_error(hdl, EZFS_BADDEV, msg)); 923 924 case EOVERFLOW: 925 /* 926 * This occurs when one of the devices is below 927 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 928 * device was the problem device since there's no 929 * reliable way to determine device size from userland. 930 */ 931 { 932 char buf[64]; 933 934 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 935 936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 937 "one or more devices is less than the " 938 "minimum size (%s)"), buf); 939 } 940 return (zfs_error(hdl, EZFS_BADDEV, msg)); 941 942 case ENOSPC: 943 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 944 "one or more devices is out of space")); 945 return (zfs_error(hdl, EZFS_BADDEV, msg)); 946 947 case ENOTBLK: 948 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 949 "cache device must be a disk or disk slice")); 950 return (zfs_error(hdl, EZFS_BADDEV, msg)); 951 952 default: 953 return (zpool_standard_error(hdl, errno, msg)); 954 } 955 } 956 957 /* 958 * If this is an alternate root pool, then we automatically set the 959 * mountpoint of the root dataset to be '/'. 960 */ 961 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), 962 &altroot) == 0) { 963 zfs_handle_t *zhp; 964 965 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); 966 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 967 "/") == 0); 968 969 zfs_close(zhp); 970 } 971 972 create_failed: 973 zcmd_free_nvlists(&zc); 974 nvlist_free(zc_props); 975 nvlist_free(zc_fsprops); 976 return (ret); 977 } 978 979 /* 980 * Destroy the given pool. It is up to the caller to ensure that there are no 981 * datasets left in the pool. 982 */ 983 int 984 zpool_destroy(zpool_handle_t *zhp) 985 { 986 zfs_cmd_t zc = { 0 }; 987 zfs_handle_t *zfp = NULL; 988 libzfs_handle_t *hdl = zhp->zpool_hdl; 989 char msg[1024]; 990 991 if (zhp->zpool_state == POOL_STATE_ACTIVE && 992 (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, 993 ZFS_TYPE_FILESYSTEM)) == NULL) 994 return (-1); 995 996 if (zpool_remove_zvol_links(zhp) != 0) 997 return (-1); 998 999 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1000 1001 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1002 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1003 "cannot destroy '%s'"), zhp->zpool_name); 1004 1005 if (errno == EROFS) { 1006 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1007 "one or more devices is read only")); 1008 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1009 } else { 1010 (void) zpool_standard_error(hdl, errno, msg); 1011 } 1012 1013 if (zfp) 1014 zfs_close(zfp); 1015 return (-1); 1016 } 1017 1018 if (zfp) { 1019 remove_mountpoint(zfp); 1020 zfs_close(zfp); 1021 } 1022 1023 return (0); 1024 } 1025 1026 /* 1027 * Add the given vdevs to the pool. The caller must have already performed the 1028 * necessary verification to ensure that the vdev specification is well-formed. 1029 */ 1030 int 1031 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1032 { 1033 zfs_cmd_t zc = { 0 }; 1034 int ret; 1035 libzfs_handle_t *hdl = zhp->zpool_hdl; 1036 char msg[1024]; 1037 nvlist_t **spares, **l2cache; 1038 uint_t nspares, nl2cache; 1039 1040 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1041 "cannot add to '%s'"), zhp->zpool_name); 1042 1043 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1044 SPA_VERSION_SPARES && 1045 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1046 &spares, &nspares) == 0) { 1047 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1048 "upgraded to add hot spares")); 1049 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1050 } 1051 1052 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1053 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1054 uint64_t s; 1055 1056 for (s = 0; s < nspares; s++) { 1057 char *path; 1058 1059 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1060 &path) == 0 && pool_uses_efi(spares[s])) { 1061 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1062 "device '%s' contains an EFI label and " 1063 "cannot be used on root pools."), 1064 zpool_vdev_name(hdl, NULL, spares[s])); 1065 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1066 } 1067 } 1068 } 1069 1070 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1071 SPA_VERSION_L2CACHE && 1072 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1073 &l2cache, &nl2cache) == 0) { 1074 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1075 "upgraded to add cache devices")); 1076 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1077 } 1078 1079 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1080 return (-1); 1081 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1082 1083 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1084 switch (errno) { 1085 case EBUSY: 1086 /* 1087 * This can happen if the user has specified the same 1088 * device multiple times. We can't reliably detect this 1089 * until we try to add it and see we already have a 1090 * label. 1091 */ 1092 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1093 "one or more vdevs refer to the same device")); 1094 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1095 break; 1096 1097 case EOVERFLOW: 1098 /* 1099 * This occurrs when one of the devices is below 1100 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1101 * device was the problem device since there's no 1102 * reliable way to determine device size from userland. 1103 */ 1104 { 1105 char buf[64]; 1106 1107 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1108 1109 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1110 "device is less than the minimum " 1111 "size (%s)"), buf); 1112 } 1113 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1114 break; 1115 1116 case ENOTSUP: 1117 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1118 "pool must be upgraded to add these vdevs")); 1119 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1120 break; 1121 1122 case EDOM: 1123 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1124 "root pool can not have multiple vdevs" 1125 " or separate logs")); 1126 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1127 break; 1128 1129 case ENOTBLK: 1130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1131 "cache device must be a disk or disk slice")); 1132 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1133 break; 1134 1135 default: 1136 (void) zpool_standard_error(hdl, errno, msg); 1137 } 1138 1139 ret = -1; 1140 } else { 1141 ret = 0; 1142 } 1143 1144 zcmd_free_nvlists(&zc); 1145 1146 return (ret); 1147 } 1148 1149 /* 1150 * Exports the pool from the system. The caller must ensure that there are no 1151 * mounted datasets in the pool. 1152 */ 1153 int 1154 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce) 1155 { 1156 zfs_cmd_t zc = { 0 }; 1157 char msg[1024]; 1158 1159 if (zpool_remove_zvol_links(zhp) != 0) 1160 return (-1); 1161 1162 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1163 "cannot export '%s'"), zhp->zpool_name); 1164 1165 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1166 zc.zc_cookie = force; 1167 zc.zc_guid = hardforce; 1168 1169 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1170 switch (errno) { 1171 case EXDEV: 1172 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1173 "use '-f' to override the following errors:\n" 1174 "'%s' has an active shared spare which could be" 1175 " used by other pools once '%s' is exported."), 1176 zhp->zpool_name, zhp->zpool_name); 1177 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1178 msg)); 1179 default: 1180 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1181 msg)); 1182 } 1183 } 1184 1185 return (0); 1186 } 1187 1188 int 1189 zpool_export(zpool_handle_t *zhp, boolean_t force) 1190 { 1191 return (zpool_export_common(zhp, force, B_FALSE)); 1192 } 1193 1194 int 1195 zpool_export_force(zpool_handle_t *zhp) 1196 { 1197 return (zpool_export_common(zhp, B_TRUE, B_TRUE)); 1198 } 1199 1200 /* 1201 * zpool_import() is a contracted interface. Should be kept the same 1202 * if possible. 1203 * 1204 * Applications should use zpool_import_props() to import a pool with 1205 * new properties value to be set. 1206 */ 1207 int 1208 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1209 char *altroot) 1210 { 1211 nvlist_t *props = NULL; 1212 int ret; 1213 1214 if (altroot != NULL) { 1215 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1216 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1217 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1218 newname)); 1219 } 1220 1221 if (nvlist_add_string(props, 1222 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1223 nvlist_add_string(props, 1224 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1225 nvlist_free(props); 1226 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1227 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1228 newname)); 1229 } 1230 } 1231 1232 ret = zpool_import_props(hdl, config, newname, props, B_FALSE); 1233 if (props) 1234 nvlist_free(props); 1235 return (ret); 1236 } 1237 1238 /* 1239 * Import the given pool using the known configuration and a list of 1240 * properties to be set. The configuration should have come from 1241 * zpool_find_import(). The 'newname' parameters control whether the pool 1242 * is imported with a different name. 1243 */ 1244 int 1245 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1246 nvlist_t *props, boolean_t importfaulted) 1247 { 1248 zfs_cmd_t zc = { 0 }; 1249 char *thename; 1250 char *origname; 1251 int ret; 1252 char errbuf[1024]; 1253 1254 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1255 &origname) == 0); 1256 1257 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1258 "cannot import pool '%s'"), origname); 1259 1260 if (newname != NULL) { 1261 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1262 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1263 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1264 newname)); 1265 thename = (char *)newname; 1266 } else { 1267 thename = origname; 1268 } 1269 1270 if (props) { 1271 uint64_t version; 1272 1273 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1274 &version) == 0); 1275 1276 if ((props = zpool_valid_proplist(hdl, origname, 1277 props, version, B_TRUE, errbuf)) == NULL) { 1278 return (-1); 1279 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1280 nvlist_free(props); 1281 return (-1); 1282 } 1283 } 1284 1285 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1286 1287 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1288 &zc.zc_guid) == 0); 1289 1290 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1291 nvlist_free(props); 1292 return (-1); 1293 } 1294 1295 zc.zc_cookie = (uint64_t)importfaulted; 1296 ret = 0; 1297 if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) { 1298 char desc[1024]; 1299 if (newname == NULL) 1300 (void) snprintf(desc, sizeof (desc), 1301 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1302 thename); 1303 else 1304 (void) snprintf(desc, sizeof (desc), 1305 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1306 origname, thename); 1307 1308 switch (errno) { 1309 case ENOTSUP: 1310 /* 1311 * Unsupported version. 1312 */ 1313 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1314 break; 1315 1316 case EINVAL: 1317 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1318 break; 1319 1320 default: 1321 (void) zpool_standard_error(hdl, errno, desc); 1322 } 1323 1324 ret = -1; 1325 } else { 1326 zpool_handle_t *zhp; 1327 1328 /* 1329 * This should never fail, but play it safe anyway. 1330 */ 1331 if (zpool_open_silent(hdl, thename, &zhp) != 0) { 1332 ret = -1; 1333 } else if (zhp != NULL) { 1334 ret = zpool_create_zvol_links(zhp); 1335 zpool_close(zhp); 1336 } 1337 1338 } 1339 1340 zcmd_free_nvlists(&zc); 1341 nvlist_free(props); 1342 1343 return (ret); 1344 } 1345 1346 /* 1347 * Scrub the pool. 1348 */ 1349 int 1350 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 1351 { 1352 zfs_cmd_t zc = { 0 }; 1353 char msg[1024]; 1354 libzfs_handle_t *hdl = zhp->zpool_hdl; 1355 1356 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1357 zc.zc_cookie = type; 1358 1359 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0) 1360 return (0); 1361 1362 (void) snprintf(msg, sizeof (msg), 1363 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1364 1365 if (errno == EBUSY) 1366 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1367 else 1368 return (zpool_standard_error(hdl, errno, msg)); 1369 } 1370 1371 /* 1372 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1373 * spare; but FALSE if its an INUSE spare. 1374 */ 1375 static nvlist_t * 1376 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid, 1377 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 1378 { 1379 uint_t c, children; 1380 nvlist_t **child; 1381 uint64_t theguid, present; 1382 char *path; 1383 uint64_t wholedisk = 0; 1384 nvlist_t *ret; 1385 uint64_t is_log; 1386 1387 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0); 1388 1389 if (search == NULL && 1390 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) { 1391 /* 1392 * If the device has never been present since import, the only 1393 * reliable way to match the vdev is by GUID. 1394 */ 1395 if (theguid == guid) 1396 return (nv); 1397 } else if (search != NULL && 1398 nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 1399 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1400 &wholedisk); 1401 if (wholedisk) { 1402 /* 1403 * For whole disks, the internal path has 's0', but the 1404 * path passed in by the user doesn't. 1405 */ 1406 if (strlen(search) == strlen(path) - 2 && 1407 strncmp(search, path, strlen(search)) == 0) 1408 return (nv); 1409 } else if (strcmp(search, path) == 0) { 1410 return (nv); 1411 } 1412 } 1413 1414 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1415 &child, &children) != 0) 1416 return (NULL); 1417 1418 for (c = 0; c < children; c++) { 1419 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1420 avail_spare, l2cache, NULL)) != NULL) { 1421 /* 1422 * The 'is_log' value is only set for the toplevel 1423 * vdev, not the leaf vdevs. So we always lookup the 1424 * log device from the root of the vdev tree (where 1425 * 'log' is non-NULL). 1426 */ 1427 if (log != NULL && 1428 nvlist_lookup_uint64(child[c], 1429 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 1430 is_log) { 1431 *log = B_TRUE; 1432 } 1433 return (ret); 1434 } 1435 } 1436 1437 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1438 &child, &children) == 0) { 1439 for (c = 0; c < children; c++) { 1440 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1441 avail_spare, l2cache, NULL)) != NULL) { 1442 *avail_spare = B_TRUE; 1443 return (ret); 1444 } 1445 } 1446 } 1447 1448 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1449 &child, &children) == 0) { 1450 for (c = 0; c < children; c++) { 1451 if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1452 avail_spare, l2cache, NULL)) != NULL) { 1453 *l2cache = B_TRUE; 1454 return (ret); 1455 } 1456 } 1457 } 1458 1459 return (NULL); 1460 } 1461 1462 nvlist_t * 1463 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 1464 boolean_t *l2cache, boolean_t *log) 1465 { 1466 char buf[MAXPATHLEN]; 1467 const char *search; 1468 char *end; 1469 nvlist_t *nvroot; 1470 uint64_t guid; 1471 1472 guid = strtoull(path, &end, 10); 1473 if (guid != 0 && *end == '\0') { 1474 search = NULL; 1475 } else if (path[0] != '/') { 1476 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 1477 search = buf; 1478 } else { 1479 search = path; 1480 } 1481 1482 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1483 &nvroot) == 0); 1484 1485 *avail_spare = B_FALSE; 1486 *l2cache = B_FALSE; 1487 if (log != NULL) 1488 *log = B_FALSE; 1489 return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare, 1490 l2cache, log)); 1491 } 1492 1493 static int 1494 vdev_online(nvlist_t *nv) 1495 { 1496 uint64_t ival; 1497 1498 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 1499 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 1500 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 1501 return (0); 1502 1503 return (1); 1504 } 1505 1506 /* 1507 * Helper function for zpool_get_physpaths(). 1508 */ 1509 static int 1510 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 1511 size_t *bytes_written) 1512 { 1513 size_t bytes_left, pos, rsz; 1514 char *tmppath; 1515 const char *format; 1516 1517 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 1518 &tmppath) != 0) 1519 return (EZFS_NODEVICE); 1520 1521 pos = *bytes_written; 1522 bytes_left = physpath_size - pos; 1523 format = (pos == 0) ? "%s" : " %s"; 1524 1525 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 1526 *bytes_written += rsz; 1527 1528 if (rsz >= bytes_left) { 1529 /* if physpath was not copied properly, clear it */ 1530 if (bytes_left != 0) { 1531 physpath[pos] = 0; 1532 } 1533 return (EZFS_NOSPC); 1534 } 1535 return (0); 1536 } 1537 1538 static int 1539 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 1540 size_t *rsz, boolean_t is_spare) 1541 { 1542 char *type; 1543 int ret; 1544 1545 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 1546 return (EZFS_INVALCONFIG); 1547 1548 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 1549 /* 1550 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 1551 * For a spare vdev, we only want to boot from the active 1552 * spare device. 1553 */ 1554 if (is_spare) { 1555 uint64_t spare = 0; 1556 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1557 &spare); 1558 if (!spare) 1559 return (EZFS_INVALCONFIG); 1560 } 1561 1562 if (vdev_online(nv)) { 1563 if ((ret = vdev_get_one_physpath(nv, physpath, 1564 phypath_size, rsz)) != 0) 1565 return (ret); 1566 } 1567 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 1568 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 1569 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 1570 nvlist_t **child; 1571 uint_t count; 1572 int i, ret; 1573 1574 if (nvlist_lookup_nvlist_array(nv, 1575 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 1576 return (EZFS_INVALCONFIG); 1577 1578 for (i = 0; i < count; i++) { 1579 ret = vdev_get_physpaths(child[i], physpath, 1580 phypath_size, rsz, is_spare); 1581 if (ret == EZFS_NOSPC) 1582 return (ret); 1583 } 1584 } 1585 1586 return (EZFS_POOL_INVALARG); 1587 } 1588 1589 /* 1590 * Get phys_path for a root pool config. 1591 * Return 0 on success; non-zero on failure. 1592 */ 1593 static int 1594 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 1595 { 1596 size_t rsz; 1597 nvlist_t *vdev_root; 1598 nvlist_t **child; 1599 uint_t count; 1600 char *type; 1601 1602 rsz = 0; 1603 1604 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1605 &vdev_root) != 0) 1606 return (EZFS_INVALCONFIG); 1607 1608 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 1609 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 1610 &child, &count) != 0) 1611 return (EZFS_INVALCONFIG); 1612 1613 /* 1614 * root pool can not have EFI labeled disks and can only have 1615 * a single top-level vdev. 1616 */ 1617 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 1618 pool_uses_efi(vdev_root)) 1619 return (EZFS_POOL_INVALARG); 1620 1621 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 1622 B_FALSE); 1623 1624 /* No online devices */ 1625 if (rsz == 0) 1626 return (EZFS_NODEVICE); 1627 1628 return (0); 1629 } 1630 1631 /* 1632 * Get phys_path for a root pool 1633 * Return 0 on success; non-zero on failure. 1634 */ 1635 int 1636 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 1637 { 1638 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 1639 phypath_size)); 1640 } 1641 1642 /* 1643 * Returns TRUE if the given guid corresponds to the given type. 1644 * This is used to check for hot spares (INUSE or not), and level 2 cache 1645 * devices. 1646 */ 1647 static boolean_t 1648 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type) 1649 { 1650 uint64_t target_guid; 1651 nvlist_t *nvroot; 1652 nvlist_t **list; 1653 uint_t count; 1654 int i; 1655 1656 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1657 &nvroot) == 0); 1658 if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) { 1659 for (i = 0; i < count; i++) { 1660 verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID, 1661 &target_guid) == 0); 1662 if (guid == target_guid) 1663 return (B_TRUE); 1664 } 1665 } 1666 1667 return (B_FALSE); 1668 } 1669 1670 /* 1671 * Bring the specified vdev online. The 'flags' parameter is a set of the 1672 * ZFS_ONLINE_* flags. 1673 */ 1674 int 1675 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 1676 vdev_state_t *newstate) 1677 { 1678 zfs_cmd_t zc = { 0 }; 1679 char msg[1024]; 1680 nvlist_t *tgt; 1681 boolean_t avail_spare, l2cache; 1682 libzfs_handle_t *hdl = zhp->zpool_hdl; 1683 1684 (void) snprintf(msg, sizeof (msg), 1685 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 1686 1687 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1688 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1689 NULL)) == NULL) 1690 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1691 1692 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1693 1694 if (avail_spare || 1695 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) 1696 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1697 1698 zc.zc_cookie = VDEV_STATE_ONLINE; 1699 zc.zc_obj = flags; 1700 1701 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) 1702 return (zpool_standard_error(hdl, errno, msg)); 1703 1704 *newstate = zc.zc_cookie; 1705 return (0); 1706 } 1707 1708 /* 1709 * Take the specified vdev offline 1710 */ 1711 int 1712 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 1713 { 1714 zfs_cmd_t zc = { 0 }; 1715 char msg[1024]; 1716 nvlist_t *tgt; 1717 boolean_t avail_spare, l2cache; 1718 libzfs_handle_t *hdl = zhp->zpool_hdl; 1719 1720 (void) snprintf(msg, sizeof (msg), 1721 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 1722 1723 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1724 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1725 NULL)) == NULL) 1726 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1727 1728 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1729 1730 if (avail_spare || 1731 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) 1732 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1733 1734 zc.zc_cookie = VDEV_STATE_OFFLINE; 1735 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 1736 1737 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1738 return (0); 1739 1740 switch (errno) { 1741 case EBUSY: 1742 1743 /* 1744 * There are no other replicas of this device. 1745 */ 1746 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 1747 1748 case EEXIST: 1749 /* 1750 * The log device has unplayed logs 1751 */ 1752 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 1753 1754 default: 1755 return (zpool_standard_error(hdl, errno, msg)); 1756 } 1757 } 1758 1759 /* 1760 * Mark the given vdev faulted. 1761 */ 1762 int 1763 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid) 1764 { 1765 zfs_cmd_t zc = { 0 }; 1766 char msg[1024]; 1767 libzfs_handle_t *hdl = zhp->zpool_hdl; 1768 1769 (void) snprintf(msg, sizeof (msg), 1770 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 1771 1772 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1773 zc.zc_guid = guid; 1774 zc.zc_cookie = VDEV_STATE_FAULTED; 1775 1776 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1777 return (0); 1778 1779 switch (errno) { 1780 case EBUSY: 1781 1782 /* 1783 * There are no other replicas of this device. 1784 */ 1785 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 1786 1787 default: 1788 return (zpool_standard_error(hdl, errno, msg)); 1789 } 1790 1791 } 1792 1793 /* 1794 * Mark the given vdev degraded. 1795 */ 1796 int 1797 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid) 1798 { 1799 zfs_cmd_t zc = { 0 }; 1800 char msg[1024]; 1801 libzfs_handle_t *hdl = zhp->zpool_hdl; 1802 1803 (void) snprintf(msg, sizeof (msg), 1804 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 1805 1806 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1807 zc.zc_guid = guid; 1808 zc.zc_cookie = VDEV_STATE_DEGRADED; 1809 1810 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1811 return (0); 1812 1813 return (zpool_standard_error(hdl, errno, msg)); 1814 } 1815 1816 /* 1817 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 1818 * a hot spare. 1819 */ 1820 static boolean_t 1821 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 1822 { 1823 nvlist_t **child; 1824 uint_t c, children; 1825 char *type; 1826 1827 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 1828 &children) == 0) { 1829 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 1830 &type) == 0); 1831 1832 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 1833 children == 2 && child[which] == tgt) 1834 return (B_TRUE); 1835 1836 for (c = 0; c < children; c++) 1837 if (is_replacing_spare(child[c], tgt, which)) 1838 return (B_TRUE); 1839 } 1840 1841 return (B_FALSE); 1842 } 1843 1844 /* 1845 * Attach new_disk (fully described by nvroot) to old_disk. 1846 * If 'replacing' is specified, the new disk will replace the old one. 1847 */ 1848 int 1849 zpool_vdev_attach(zpool_handle_t *zhp, 1850 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 1851 { 1852 zfs_cmd_t zc = { 0 }; 1853 char msg[1024]; 1854 int ret; 1855 nvlist_t *tgt; 1856 boolean_t avail_spare, l2cache, islog; 1857 uint64_t val; 1858 char *path, *newname; 1859 nvlist_t **child; 1860 uint_t children; 1861 nvlist_t *config_root; 1862 libzfs_handle_t *hdl = zhp->zpool_hdl; 1863 boolean_t rootpool = pool_is_bootable(zhp); 1864 1865 if (replacing) 1866 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1867 "cannot replace %s with %s"), old_disk, new_disk); 1868 else 1869 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1870 "cannot attach %s to %s"), new_disk, old_disk); 1871 1872 /* 1873 * If this is a root pool, make sure that we're not attaching an 1874 * EFI labeled device. 1875 */ 1876 if (rootpool && pool_uses_efi(nvroot)) { 1877 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1878 "EFI labeled devices are not supported on root pools.")); 1879 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1880 } 1881 1882 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1883 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 1884 &islog)) == 0) 1885 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1886 1887 if (avail_spare) 1888 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1889 1890 if (l2cache) 1891 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 1892 1893 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1894 zc.zc_cookie = replacing; 1895 1896 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1897 &child, &children) != 0 || children != 1) { 1898 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1899 "new device must be a single disk")); 1900 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 1901 } 1902 1903 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 1904 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 1905 1906 if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL) 1907 return (-1); 1908 1909 /* 1910 * If the target is a hot spare that has been swapped in, we can only 1911 * replace it with another hot spare. 1912 */ 1913 if (replacing && 1914 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 1915 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 1916 NULL) == NULL || !avail_spare) && 1917 is_replacing_spare(config_root, tgt, 1)) { 1918 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1919 "can only be replaced by another hot spare")); 1920 free(newname); 1921 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1922 } 1923 1924 /* 1925 * If we are attempting to replace a spare, it canot be applied to an 1926 * already spared device. 1927 */ 1928 if (replacing && 1929 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && 1930 zpool_find_vdev(zhp, newname, &avail_spare, 1931 &l2cache, NULL) != NULL && avail_spare && 1932 is_replacing_spare(config_root, tgt, 0)) { 1933 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1934 "device has already been replaced with a spare")); 1935 free(newname); 1936 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1937 } 1938 1939 free(newname); 1940 1941 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1942 return (-1); 1943 1944 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc); 1945 1946 zcmd_free_nvlists(&zc); 1947 1948 if (ret == 0) { 1949 if (rootpool) { 1950 /* 1951 * XXX - This should be removed once we can 1952 * automatically install the bootblocks on the 1953 * newly attached disk. 1954 */ 1955 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please " 1956 "be sure to invoke %s to make '%s' bootable.\n"), 1957 BOOTCMD, new_disk); 1958 1959 /* 1960 * XXX need a better way to prevent user from 1961 * booting up a half-baked vdev. 1962 */ 1963 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 1964 "sure to wait until resilver is done " 1965 "before rebooting.\n")); 1966 } 1967 return (0); 1968 } 1969 1970 switch (errno) { 1971 case ENOTSUP: 1972 /* 1973 * Can't attach to or replace this type of vdev. 1974 */ 1975 if (replacing) { 1976 if (islog) 1977 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1978 "cannot replace a log with a spare")); 1979 else 1980 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1981 "cannot replace a replacing device")); 1982 } else { 1983 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1984 "can only attach to mirrors and top-level " 1985 "disks")); 1986 } 1987 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 1988 break; 1989 1990 case EINVAL: 1991 /* 1992 * The new device must be a single disk. 1993 */ 1994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1995 "new device must be a single disk")); 1996 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 1997 break; 1998 1999 case EBUSY: 2000 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2001 new_disk); 2002 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2003 break; 2004 2005 case EOVERFLOW: 2006 /* 2007 * The new device is too small. 2008 */ 2009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2010 "device is too small")); 2011 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2012 break; 2013 2014 case EDOM: 2015 /* 2016 * The new device has a different alignment requirement. 2017 */ 2018 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2019 "devices have different sector alignment")); 2020 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2021 break; 2022 2023 case ENAMETOOLONG: 2024 /* 2025 * The resulting top-level vdev spec won't fit in the label. 2026 */ 2027 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2028 break; 2029 2030 default: 2031 (void) zpool_standard_error(hdl, errno, msg); 2032 } 2033 2034 return (-1); 2035 } 2036 2037 /* 2038 * Detach the specified device. 2039 */ 2040 int 2041 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2042 { 2043 zfs_cmd_t zc = { 0 }; 2044 char msg[1024]; 2045 nvlist_t *tgt; 2046 boolean_t avail_spare, l2cache; 2047 libzfs_handle_t *hdl = zhp->zpool_hdl; 2048 2049 (void) snprintf(msg, sizeof (msg), 2050 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2051 2052 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2053 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2054 NULL)) == 0) 2055 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2056 2057 if (avail_spare) 2058 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2059 2060 if (l2cache) 2061 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2062 2063 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2064 2065 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2066 return (0); 2067 2068 switch (errno) { 2069 2070 case ENOTSUP: 2071 /* 2072 * Can't detach from this type of vdev. 2073 */ 2074 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2075 "applicable to mirror and replacing vdevs")); 2076 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg); 2077 break; 2078 2079 case EBUSY: 2080 /* 2081 * There are no other replicas of this device. 2082 */ 2083 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2084 break; 2085 2086 default: 2087 (void) zpool_standard_error(hdl, errno, msg); 2088 } 2089 2090 return (-1); 2091 } 2092 2093 /* 2094 * Remove the given device. Currently, this is supported only for hot spares 2095 * and level 2 cache devices. 2096 */ 2097 int 2098 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 2099 { 2100 zfs_cmd_t zc = { 0 }; 2101 char msg[1024]; 2102 nvlist_t *tgt; 2103 boolean_t avail_spare, l2cache; 2104 libzfs_handle_t *hdl = zhp->zpool_hdl; 2105 2106 (void) snprintf(msg, sizeof (msg), 2107 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 2108 2109 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2110 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2111 NULL)) == 0) 2112 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2113 2114 if (!avail_spare && !l2cache) { 2115 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2116 "only inactive hot spares or cache devices " 2117 "can be removed")); 2118 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2119 } 2120 2121 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2122 2123 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 2124 return (0); 2125 2126 return (zpool_standard_error(hdl, errno, msg)); 2127 } 2128 2129 /* 2130 * Clear the errors for the pool, or the particular device if specified. 2131 */ 2132 int 2133 zpool_clear(zpool_handle_t *zhp, const char *path) 2134 { 2135 zfs_cmd_t zc = { 0 }; 2136 char msg[1024]; 2137 nvlist_t *tgt; 2138 boolean_t avail_spare, l2cache; 2139 libzfs_handle_t *hdl = zhp->zpool_hdl; 2140 2141 if (path) 2142 (void) snprintf(msg, sizeof (msg), 2143 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2144 path); 2145 else 2146 (void) snprintf(msg, sizeof (msg), 2147 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2148 zhp->zpool_name); 2149 2150 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2151 if (path) { 2152 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 2153 &l2cache, NULL)) == 0) 2154 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2155 2156 /* 2157 * Don't allow error clearing for hot spares. Do allow 2158 * error clearing for l2cache devices. 2159 */ 2160 if (avail_spare) 2161 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2162 2163 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 2164 &zc.zc_guid) == 0); 2165 } 2166 2167 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) 2168 return (0); 2169 2170 return (zpool_standard_error(hdl, errno, msg)); 2171 } 2172 2173 /* 2174 * Similar to zpool_clear(), but takes a GUID (used by fmd). 2175 */ 2176 int 2177 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 2178 { 2179 zfs_cmd_t zc = { 0 }; 2180 char msg[1024]; 2181 libzfs_handle_t *hdl = zhp->zpool_hdl; 2182 2183 (void) snprintf(msg, sizeof (msg), 2184 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 2185 guid); 2186 2187 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2188 zc.zc_guid = guid; 2189 2190 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 2191 return (0); 2192 2193 return (zpool_standard_error(hdl, errno, msg)); 2194 } 2195 2196 /* 2197 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool> 2198 * hierarchy. 2199 */ 2200 int 2201 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *), 2202 void *data) 2203 { 2204 libzfs_handle_t *hdl = zhp->zpool_hdl; 2205 char (*paths)[MAXPATHLEN]; 2206 size_t size = 4; 2207 int curr, fd, base, ret = 0; 2208 DIR *dirp; 2209 struct dirent *dp; 2210 struct stat st; 2211 2212 if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0) 2213 return (errno == ENOENT ? 0 : -1); 2214 2215 if (fstatat(base, zhp->zpool_name, &st, 0) != 0) { 2216 int err = errno; 2217 (void) close(base); 2218 return (err == ENOENT ? 0 : -1); 2219 } 2220 2221 /* 2222 * Oddly this wasn't a directory -- ignore that failure since we 2223 * know there are no links lower in the (non-existant) hierarchy. 2224 */ 2225 if (!S_ISDIR(st.st_mode)) { 2226 (void) close(base); 2227 return (0); 2228 } 2229 2230 if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) { 2231 (void) close(base); 2232 return (-1); 2233 } 2234 2235 (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0])); 2236 curr = 0; 2237 2238 while (curr >= 0) { 2239 if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0) 2240 goto err; 2241 2242 if (S_ISDIR(st.st_mode)) { 2243 if ((fd = openat(base, paths[curr], O_RDONLY)) < 0) 2244 goto err; 2245 2246 if ((dirp = fdopendir(fd)) == NULL) { 2247 (void) close(fd); 2248 goto err; 2249 } 2250 2251 while ((dp = readdir(dirp)) != NULL) { 2252 if (dp->d_name[0] == '.') 2253 continue; 2254 2255 if (curr + 1 == size) { 2256 paths = zfs_realloc(hdl, paths, 2257 size * sizeof (paths[0]), 2258 size * 2 * sizeof (paths[0])); 2259 if (paths == NULL) { 2260 (void) closedir(dirp); 2261 (void) close(fd); 2262 goto err; 2263 } 2264 2265 size *= 2; 2266 } 2267 2268 (void) strlcpy(paths[curr + 1], paths[curr], 2269 sizeof (paths[curr + 1])); 2270 (void) strlcat(paths[curr], "/", 2271 sizeof (paths[curr])); 2272 (void) strlcat(paths[curr], dp->d_name, 2273 sizeof (paths[curr])); 2274 curr++; 2275 } 2276 2277 (void) closedir(dirp); 2278 2279 } else { 2280 if ((ret = cb(paths[curr], data)) != 0) 2281 break; 2282 } 2283 2284 curr--; 2285 } 2286 2287 free(paths); 2288 (void) close(base); 2289 2290 return (ret); 2291 2292 err: 2293 free(paths); 2294 (void) close(base); 2295 return (-1); 2296 } 2297 2298 typedef struct zvol_cb { 2299 zpool_handle_t *zcb_pool; 2300 boolean_t zcb_create; 2301 } zvol_cb_t; 2302 2303 /*ARGSUSED*/ 2304 static int 2305 do_zvol_create(zfs_handle_t *zhp, void *data) 2306 { 2307 int ret = 0; 2308 2309 if (ZFS_IS_VOLUME(zhp)) { 2310 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name); 2311 ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL); 2312 } 2313 2314 if (ret == 0) 2315 ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL); 2316 2317 zfs_close(zhp); 2318 2319 return (ret); 2320 } 2321 2322 /* 2323 * Iterate over all zvols in the pool and make any necessary minor nodes. 2324 */ 2325 int 2326 zpool_create_zvol_links(zpool_handle_t *zhp) 2327 { 2328 zfs_handle_t *zfp; 2329 int ret; 2330 2331 /* 2332 * If the pool is unavailable, just return success. 2333 */ 2334 if ((zfp = make_dataset_handle(zhp->zpool_hdl, 2335 zhp->zpool_name)) == NULL) 2336 return (0); 2337 2338 ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL); 2339 2340 zfs_close(zfp); 2341 return (ret); 2342 } 2343 2344 static int 2345 do_zvol_remove(const char *dataset, void *data) 2346 { 2347 zpool_handle_t *zhp = data; 2348 2349 return (zvol_remove_link(zhp->zpool_hdl, dataset)); 2350 } 2351 2352 /* 2353 * Iterate over all zvols in the pool and remove any minor nodes. We iterate 2354 * by examining the /dev links so that a corrupted pool doesn't impede this 2355 * operation. 2356 */ 2357 int 2358 zpool_remove_zvol_links(zpool_handle_t *zhp) 2359 { 2360 return (zpool_iter_zvol(zhp, do_zvol_remove, zhp)); 2361 } 2362 2363 /* 2364 * Convert from a devid string to a path. 2365 */ 2366 static char * 2367 devid_to_path(char *devid_str) 2368 { 2369 ddi_devid_t devid; 2370 char *minor; 2371 char *path; 2372 devid_nmlist_t *list = NULL; 2373 int ret; 2374 2375 if (devid_str_decode(devid_str, &devid, &minor) != 0) 2376 return (NULL); 2377 2378 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 2379 2380 devid_str_free(minor); 2381 devid_free(devid); 2382 2383 if (ret != 0) 2384 return (NULL); 2385 2386 if ((path = strdup(list[0].devname)) == NULL) 2387 return (NULL); 2388 2389 devid_free_nmlist(list); 2390 2391 return (path); 2392 } 2393 2394 /* 2395 * Convert from a path to a devid string. 2396 */ 2397 static char * 2398 path_to_devid(const char *path) 2399 { 2400 int fd; 2401 ddi_devid_t devid; 2402 char *minor, *ret; 2403 2404 if ((fd = open(path, O_RDONLY)) < 0) 2405 return (NULL); 2406 2407 minor = NULL; 2408 ret = NULL; 2409 if (devid_get(fd, &devid) == 0) { 2410 if (devid_get_minor_name(fd, &minor) == 0) 2411 ret = devid_str_encode(devid, minor); 2412 if (minor != NULL) 2413 devid_str_free(minor); 2414 devid_free(devid); 2415 } 2416 (void) close(fd); 2417 2418 return (ret); 2419 } 2420 2421 /* 2422 * Issue the necessary ioctl() to update the stored path value for the vdev. We 2423 * ignore any failure here, since a common case is for an unprivileged user to 2424 * type 'zpool status', and we'll display the correct information anyway. 2425 */ 2426 static void 2427 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 2428 { 2429 zfs_cmd_t zc = { 0 }; 2430 2431 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2432 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 2433 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2434 &zc.zc_guid) == 0); 2435 2436 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 2437 } 2438 2439 /* 2440 * Given a vdev, return the name to display in iostat. If the vdev has a path, 2441 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 2442 * We also check if this is a whole disk, in which case we strip off the 2443 * trailing 's0' slice name. 2444 * 2445 * This routine is also responsible for identifying when disks have been 2446 * reconfigured in a new location. The kernel will have opened the device by 2447 * devid, but the path will still refer to the old location. To catch this, we 2448 * first do a path -> devid translation (which is fast for the common case). If 2449 * the devid matches, we're done. If not, we do a reverse devid -> path 2450 * translation and issue the appropriate ioctl() to update the path of the vdev. 2451 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 2452 * of these checks. 2453 */ 2454 char * 2455 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv) 2456 { 2457 char *path, *devid; 2458 uint64_t value; 2459 char buf[64]; 2460 vdev_stat_t *vs; 2461 uint_t vsc; 2462 2463 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 2464 &value) == 0) { 2465 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2466 &value) == 0); 2467 (void) snprintf(buf, sizeof (buf), "%llu", 2468 (u_longlong_t)value); 2469 path = buf; 2470 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 2471 2472 /* 2473 * If the device is dead (faulted, offline, etc) then don't 2474 * bother opening it. Otherwise we may be forcing the user to 2475 * open a misbehaving device, which can have undesirable 2476 * effects. 2477 */ 2478 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, 2479 (uint64_t **)&vs, &vsc) != 0 || 2480 vs->vs_state >= VDEV_STATE_DEGRADED) && 2481 zhp != NULL && 2482 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 2483 /* 2484 * Determine if the current path is correct. 2485 */ 2486 char *newdevid = path_to_devid(path); 2487 2488 if (newdevid == NULL || 2489 strcmp(devid, newdevid) != 0) { 2490 char *newpath; 2491 2492 if ((newpath = devid_to_path(devid)) != NULL) { 2493 /* 2494 * Update the path appropriately. 2495 */ 2496 set_path(zhp, nv, newpath); 2497 if (nvlist_add_string(nv, 2498 ZPOOL_CONFIG_PATH, newpath) == 0) 2499 verify(nvlist_lookup_string(nv, 2500 ZPOOL_CONFIG_PATH, 2501 &path) == 0); 2502 free(newpath); 2503 } 2504 } 2505 2506 if (newdevid) 2507 devid_str_free(newdevid); 2508 } 2509 2510 if (strncmp(path, "/dev/dsk/", 9) == 0) 2511 path += 9; 2512 2513 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2514 &value) == 0 && value) { 2515 char *tmp = zfs_strdup(hdl, path); 2516 if (tmp == NULL) 2517 return (NULL); 2518 tmp[strlen(path) - 2] = '\0'; 2519 return (tmp); 2520 } 2521 } else { 2522 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 2523 2524 /* 2525 * If it's a raidz device, we need to stick in the parity level. 2526 */ 2527 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 2528 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 2529 &value) == 0); 2530 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 2531 (u_longlong_t)value); 2532 path = buf; 2533 } 2534 } 2535 2536 return (zfs_strdup(hdl, path)); 2537 } 2538 2539 static int 2540 zbookmark_compare(const void *a, const void *b) 2541 { 2542 return (memcmp(a, b, sizeof (zbookmark_t))); 2543 } 2544 2545 /* 2546 * Retrieve the persistent error log, uniquify the members, and return to the 2547 * caller. 2548 */ 2549 int 2550 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 2551 { 2552 zfs_cmd_t zc = { 0 }; 2553 uint64_t count; 2554 zbookmark_t *zb = NULL; 2555 int i; 2556 2557 /* 2558 * Retrieve the raw error list from the kernel. If the number of errors 2559 * has increased, allocate more space and continue until we get the 2560 * entire list. 2561 */ 2562 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 2563 &count) == 0); 2564 if (count == 0) 2565 return (0); 2566 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 2567 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 2568 return (-1); 2569 zc.zc_nvlist_dst_size = count; 2570 (void) strcpy(zc.zc_name, zhp->zpool_name); 2571 for (;;) { 2572 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 2573 &zc) != 0) { 2574 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2575 if (errno == ENOMEM) { 2576 count = zc.zc_nvlist_dst_size; 2577 if ((zc.zc_nvlist_dst = (uintptr_t) 2578 zfs_alloc(zhp->zpool_hdl, count * 2579 sizeof (zbookmark_t))) == (uintptr_t)NULL) 2580 return (-1); 2581 } else { 2582 return (-1); 2583 } 2584 } else { 2585 break; 2586 } 2587 } 2588 2589 /* 2590 * Sort the resulting bookmarks. This is a little confusing due to the 2591 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 2592 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 2593 * _not_ copied as part of the process. So we point the start of our 2594 * array appropriate and decrement the total number of elements. 2595 */ 2596 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 2597 zc.zc_nvlist_dst_size; 2598 count -= zc.zc_nvlist_dst_size; 2599 2600 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 2601 2602 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 2603 2604 /* 2605 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 2606 */ 2607 for (i = 0; i < count; i++) { 2608 nvlist_t *nv; 2609 2610 /* ignoring zb_blkid and zb_level for now */ 2611 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 2612 zb[i-1].zb_object == zb[i].zb_object) 2613 continue; 2614 2615 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 2616 goto nomem; 2617 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 2618 zb[i].zb_objset) != 0) { 2619 nvlist_free(nv); 2620 goto nomem; 2621 } 2622 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 2623 zb[i].zb_object) != 0) { 2624 nvlist_free(nv); 2625 goto nomem; 2626 } 2627 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 2628 nvlist_free(nv); 2629 goto nomem; 2630 } 2631 nvlist_free(nv); 2632 } 2633 2634 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2635 return (0); 2636 2637 nomem: 2638 free((void *)(uintptr_t)zc.zc_nvlist_dst); 2639 return (no_memory(zhp->zpool_hdl)); 2640 } 2641 2642 /* 2643 * Upgrade a ZFS pool to the latest on-disk version. 2644 */ 2645 int 2646 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 2647 { 2648 zfs_cmd_t zc = { 0 }; 2649 libzfs_handle_t *hdl = zhp->zpool_hdl; 2650 2651 (void) strcpy(zc.zc_name, zhp->zpool_name); 2652 zc.zc_cookie = new_version; 2653 2654 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 2655 return (zpool_standard_error_fmt(hdl, errno, 2656 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 2657 zhp->zpool_name)); 2658 return (0); 2659 } 2660 2661 void 2662 zpool_set_history_str(const char *subcommand, int argc, char **argv, 2663 char *history_str) 2664 { 2665 int i; 2666 2667 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); 2668 for (i = 1; i < argc; i++) { 2669 if (strlen(history_str) + 1 + strlen(argv[i]) > 2670 HIS_MAX_RECORD_LEN) 2671 break; 2672 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); 2673 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); 2674 } 2675 } 2676 2677 /* 2678 * Stage command history for logging. 2679 */ 2680 int 2681 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) 2682 { 2683 if (history_str == NULL) 2684 return (EINVAL); 2685 2686 if (strlen(history_str) > HIS_MAX_RECORD_LEN) 2687 return (EINVAL); 2688 2689 if (hdl->libzfs_log_str != NULL) 2690 free(hdl->libzfs_log_str); 2691 2692 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) 2693 return (no_memory(hdl)); 2694 2695 return (0); 2696 } 2697 2698 /* 2699 * Perform ioctl to get some command history of a pool. 2700 * 2701 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 2702 * logical offset of the history buffer to start reading from. 2703 * 2704 * Upon return, 'off' is the next logical offset to read from and 2705 * 'len' is the actual amount of bytes read into 'buf'. 2706 */ 2707 static int 2708 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 2709 { 2710 zfs_cmd_t zc = { 0 }; 2711 libzfs_handle_t *hdl = zhp->zpool_hdl; 2712 2713 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2714 2715 zc.zc_history = (uint64_t)(uintptr_t)buf; 2716 zc.zc_history_len = *len; 2717 zc.zc_history_offset = *off; 2718 2719 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 2720 switch (errno) { 2721 case EPERM: 2722 return (zfs_error_fmt(hdl, EZFS_PERM, 2723 dgettext(TEXT_DOMAIN, 2724 "cannot show history for pool '%s'"), 2725 zhp->zpool_name)); 2726 case ENOENT: 2727 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 2728 dgettext(TEXT_DOMAIN, "cannot get history for pool " 2729 "'%s'"), zhp->zpool_name)); 2730 case ENOTSUP: 2731 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 2732 dgettext(TEXT_DOMAIN, "cannot get history for pool " 2733 "'%s', pool must be upgraded"), zhp->zpool_name)); 2734 default: 2735 return (zpool_standard_error_fmt(hdl, errno, 2736 dgettext(TEXT_DOMAIN, 2737 "cannot get history for '%s'"), zhp->zpool_name)); 2738 } 2739 } 2740 2741 *len = zc.zc_history_len; 2742 *off = zc.zc_history_offset; 2743 2744 return (0); 2745 } 2746 2747 /* 2748 * Process the buffer of nvlists, unpacking and storing each nvlist record 2749 * into 'records'. 'leftover' is set to the number of bytes that weren't 2750 * processed as there wasn't a complete record. 2751 */ 2752 static int 2753 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 2754 nvlist_t ***records, uint_t *numrecords) 2755 { 2756 uint64_t reclen; 2757 nvlist_t *nv; 2758 int i; 2759 2760 while (bytes_read > sizeof (reclen)) { 2761 2762 /* get length of packed record (stored as little endian) */ 2763 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 2764 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 2765 2766 if (bytes_read < sizeof (reclen) + reclen) 2767 break; 2768 2769 /* unpack record */ 2770 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 2771 return (ENOMEM); 2772 bytes_read -= sizeof (reclen) + reclen; 2773 buf += sizeof (reclen) + reclen; 2774 2775 /* add record to nvlist array */ 2776 (*numrecords)++; 2777 if (ISP2(*numrecords + 1)) { 2778 *records = realloc(*records, 2779 *numrecords * 2 * sizeof (nvlist_t *)); 2780 } 2781 (*records)[*numrecords - 1] = nv; 2782 } 2783 2784 *leftover = bytes_read; 2785 return (0); 2786 } 2787 2788 #define HIS_BUF_LEN (128*1024) 2789 2790 /* 2791 * Retrieve the command history of a pool. 2792 */ 2793 int 2794 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 2795 { 2796 char buf[HIS_BUF_LEN]; 2797 uint64_t off = 0; 2798 nvlist_t **records = NULL; 2799 uint_t numrecords = 0; 2800 int err, i; 2801 2802 do { 2803 uint64_t bytes_read = sizeof (buf); 2804 uint64_t leftover; 2805 2806 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 2807 break; 2808 2809 /* if nothing else was read in, we're at EOF, just return */ 2810 if (!bytes_read) 2811 break; 2812 2813 if ((err = zpool_history_unpack(buf, bytes_read, 2814 &leftover, &records, &numrecords)) != 0) 2815 break; 2816 off -= leftover; 2817 2818 /* CONSTCOND */ 2819 } while (1); 2820 2821 if (!err) { 2822 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 2823 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 2824 records, numrecords) == 0); 2825 } 2826 for (i = 0; i < numrecords; i++) 2827 nvlist_free(records[i]); 2828 free(records); 2829 2830 return (err); 2831 } 2832 2833 void 2834 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 2835 char *pathname, size_t len) 2836 { 2837 zfs_cmd_t zc = { 0 }; 2838 boolean_t mounted = B_FALSE; 2839 char *mntpnt = NULL; 2840 char dsname[MAXNAMELEN]; 2841 2842 if (dsobj == 0) { 2843 /* special case for the MOS */ 2844 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 2845 return; 2846 } 2847 2848 /* get the dataset's name */ 2849 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2850 zc.zc_obj = dsobj; 2851 if (ioctl(zhp->zpool_hdl->libzfs_fd, 2852 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 2853 /* just write out a path of two object numbers */ 2854 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 2855 dsobj, obj); 2856 return; 2857 } 2858 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 2859 2860 /* find out if the dataset is mounted */ 2861 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 2862 2863 /* get the corrupted object's path */ 2864 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 2865 zc.zc_obj = obj; 2866 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 2867 &zc) == 0) { 2868 if (mounted) { 2869 (void) snprintf(pathname, len, "%s%s", mntpnt, 2870 zc.zc_value); 2871 } else { 2872 (void) snprintf(pathname, len, "%s:%s", 2873 dsname, zc.zc_value); 2874 } 2875 } else { 2876 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 2877 } 2878 free(mntpnt); 2879 } 2880 2881 #define RDISK_ROOT "/dev/rdsk" 2882 #define BACKUP_SLICE "s2" 2883 /* 2884 * Don't start the slice at the default block of 34; many storage 2885 * devices will use a stripe width of 128k, so start there instead. 2886 */ 2887 #define NEW_START_BLOCK 256 2888 2889 /* 2890 * Read the EFI label from the config, if a label does not exist then 2891 * pass back the error to the caller. If the caller has passed a non-NULL 2892 * diskaddr argument then we set it to the starting address of the EFI 2893 * partition. 2894 */ 2895 static int 2896 read_efi_label(nvlist_t *config, diskaddr_t *sb) 2897 { 2898 char *path; 2899 int fd; 2900 char diskname[MAXPATHLEN]; 2901 int err = -1; 2902 2903 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 2904 return (err); 2905 2906 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 2907 strrchr(path, '/')); 2908 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 2909 struct dk_gpt *vtoc; 2910 2911 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 2912 if (sb != NULL) 2913 *sb = vtoc->efi_parts[0].p_start; 2914 efi_free(vtoc); 2915 } 2916 (void) close(fd); 2917 } 2918 return (err); 2919 } 2920 2921 /* 2922 * determine where a partition starts on a disk in the current 2923 * configuration 2924 */ 2925 static diskaddr_t 2926 find_start_block(nvlist_t *config) 2927 { 2928 nvlist_t **child; 2929 uint_t c, children; 2930 diskaddr_t sb = MAXOFFSET_T; 2931 uint64_t wholedisk; 2932 2933 if (nvlist_lookup_nvlist_array(config, 2934 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 2935 if (nvlist_lookup_uint64(config, 2936 ZPOOL_CONFIG_WHOLE_DISK, 2937 &wholedisk) != 0 || !wholedisk) { 2938 return (MAXOFFSET_T); 2939 } 2940 if (read_efi_label(config, &sb) < 0) 2941 sb = MAXOFFSET_T; 2942 return (sb); 2943 } 2944 2945 for (c = 0; c < children; c++) { 2946 sb = find_start_block(child[c]); 2947 if (sb != MAXOFFSET_T) { 2948 return (sb); 2949 } 2950 } 2951 return (MAXOFFSET_T); 2952 } 2953 2954 /* 2955 * Label an individual disk. The name provided is the short name, 2956 * stripped of any leading /dev path. 2957 */ 2958 int 2959 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) 2960 { 2961 char path[MAXPATHLEN]; 2962 struct dk_gpt *vtoc; 2963 int fd; 2964 size_t resv = EFI_MIN_RESV_SIZE; 2965 uint64_t slice_size; 2966 diskaddr_t start_block; 2967 char errbuf[1024]; 2968 2969 /* prepare an error message just in case */ 2970 (void) snprintf(errbuf, sizeof (errbuf), 2971 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 2972 2973 if (zhp) { 2974 nvlist_t *nvroot; 2975 2976 if (pool_is_bootable(zhp)) { 2977 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2978 "EFI labeled devices are not supported on root " 2979 "pools.")); 2980 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 2981 } 2982 2983 verify(nvlist_lookup_nvlist(zhp->zpool_config, 2984 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 2985 2986 if (zhp->zpool_start_block == 0) 2987 start_block = find_start_block(nvroot); 2988 else 2989 start_block = zhp->zpool_start_block; 2990 zhp->zpool_start_block = start_block; 2991 } else { 2992 /* new pool */ 2993 start_block = NEW_START_BLOCK; 2994 } 2995 2996 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 2997 BACKUP_SLICE); 2998 2999 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3000 /* 3001 * This shouldn't happen. We've long since verified that this 3002 * is a valid device. 3003 */ 3004 zfs_error_aux(hdl, 3005 dgettext(TEXT_DOMAIN, "unable to open device")); 3006 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3007 } 3008 3009 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3010 /* 3011 * The only way this can fail is if we run out of memory, or we 3012 * were unable to read the disk's capacity 3013 */ 3014 if (errno == ENOMEM) 3015 (void) no_memory(hdl); 3016 3017 (void) close(fd); 3018 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3019 "unable to read disk capacity"), name); 3020 3021 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3022 } 3023 3024 slice_size = vtoc->efi_last_u_lba + 1; 3025 slice_size -= EFI_MIN_RESV_SIZE; 3026 if (start_block == MAXOFFSET_T) 3027 start_block = NEW_START_BLOCK; 3028 slice_size -= start_block; 3029 3030 vtoc->efi_parts[0].p_start = start_block; 3031 vtoc->efi_parts[0].p_size = slice_size; 3032 3033 /* 3034 * Why we use V_USR: V_BACKUP confuses users, and is considered 3035 * disposable by some EFI utilities (since EFI doesn't have a backup 3036 * slice). V_UNASSIGNED is supposed to be used only for zero size 3037 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3038 * etc. were all pretty specific. V_USR is as close to reality as we 3039 * can get, in the absence of V_OTHER. 3040 */ 3041 vtoc->efi_parts[0].p_tag = V_USR; 3042 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3043 3044 vtoc->efi_parts[8].p_start = slice_size + start_block; 3045 vtoc->efi_parts[8].p_size = resv; 3046 vtoc->efi_parts[8].p_tag = V_RESERVED; 3047 3048 if (efi_write(fd, vtoc) != 0) { 3049 /* 3050 * Some block drivers (like pcata) may not support EFI 3051 * GPT labels. Print out a helpful error message dir- 3052 * ecting the user to manually label the disk and give 3053 * a specific slice. 3054 */ 3055 (void) close(fd); 3056 efi_free(vtoc); 3057 3058 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3059 "try using fdisk(1M) and then provide a specific slice")); 3060 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3061 } 3062 3063 (void) close(fd); 3064 efi_free(vtoc); 3065 return (0); 3066 } 3067 3068 static boolean_t 3069 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3070 { 3071 char *type; 3072 nvlist_t **child; 3073 uint_t children, c; 3074 3075 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3076 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 3077 strcmp(type, VDEV_TYPE_FILE) == 0 || 3078 strcmp(type, VDEV_TYPE_LOG) == 0 || 3079 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3080 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3081 "vdev type '%s' is not supported"), type); 3082 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3083 return (B_FALSE); 3084 } 3085 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3086 &child, &children) == 0) { 3087 for (c = 0; c < children; c++) { 3088 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3089 return (B_FALSE); 3090 } 3091 } 3092 return (B_TRUE); 3093 } 3094 3095 /* 3096 * check if this zvol is allowable for use as a dump device; zero if 3097 * it is, > 0 if it isn't, < 0 if it isn't a zvol 3098 */ 3099 int 3100 zvol_check_dump_config(char *arg) 3101 { 3102 zpool_handle_t *zhp = NULL; 3103 nvlist_t *config, *nvroot; 3104 char *p, *volname; 3105 nvlist_t **top; 3106 uint_t toplevels; 3107 libzfs_handle_t *hdl; 3108 char errbuf[1024]; 3109 char poolname[ZPOOL_MAXNAMELEN]; 3110 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 3111 int ret = 1; 3112 3113 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 3114 return (-1); 3115 } 3116 3117 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3118 "dump is not supported on device '%s'"), arg); 3119 3120 if ((hdl = libzfs_init()) == NULL) 3121 return (1); 3122 libzfs_print_on_error(hdl, B_TRUE); 3123 3124 volname = arg + pathlen; 3125 3126 /* check the configuration of the pool */ 3127 if ((p = strchr(volname, '/')) == NULL) { 3128 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3129 "malformed dataset name")); 3130 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 3131 return (1); 3132 } else if (p - volname >= ZFS_MAXNAMELEN) { 3133 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3134 "dataset name is too long")); 3135 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 3136 return (1); 3137 } else { 3138 (void) strncpy(poolname, volname, p - volname); 3139 poolname[p - volname] = '\0'; 3140 } 3141 3142 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 3143 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3144 "could not open pool '%s'"), poolname); 3145 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 3146 goto out; 3147 } 3148 config = zpool_get_config(zhp, NULL); 3149 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3150 &nvroot) != 0) { 3151 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3152 "could not obtain vdev configuration for '%s'"), poolname); 3153 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3154 goto out; 3155 } 3156 3157 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3158 &top, &toplevels) == 0); 3159 if (toplevels != 1) { 3160 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3161 "'%s' has multiple top level vdevs"), poolname); 3162 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3163 goto out; 3164 } 3165 3166 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 3167 goto out; 3168 } 3169 ret = 0; 3170 3171 out: 3172 if (zhp) 3173 zpool_close(zhp); 3174 libzfs_fini(hdl); 3175 return (ret); 3176 } 3177