1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012 by Delphix. All rights reserved. 26 */ 27 28 #include <ctype.h> 29 #include <errno.h> 30 #include <devid.h> 31 #include <fcntl.h> 32 #include <libintl.h> 33 #include <stdio.h> 34 #include <stdlib.h> 35 #include <strings.h> 36 #include <unistd.h> 37 #include <sys/efi_partition.h> 38 #include <sys/vtoc.h> 39 #include <sys/zfs_ioctl.h> 40 #include <dlfcn.h> 41 42 #include "zfs_namecheck.h" 43 #include "zfs_prop.h" 44 #include "libzfs_impl.h" 45 #include "zfs_comutil.h" 46 47 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 48 49 #define DISK_ROOT "/dev/dsk" 50 #define RDISK_ROOT "/dev/rdsk" 51 #define BACKUP_SLICE "s2" 52 53 typedef struct prop_flags { 54 int create:1; /* Validate property on creation */ 55 int import:1; /* Validate property on import */ 56 } prop_flags_t; 57 58 /* 59 * ==================================================================== 60 * zpool property functions 61 * ==================================================================== 62 */ 63 64 static int 65 zpool_get_all_props(zpool_handle_t *zhp) 66 { 67 zfs_cmd_t zc = { 0 }; 68 libzfs_handle_t *hdl = zhp->zpool_hdl; 69 70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 71 72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 73 return (-1); 74 75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 76 if (errno == ENOMEM) { 77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 78 zcmd_free_nvlists(&zc); 79 return (-1); 80 } 81 } else { 82 zcmd_free_nvlists(&zc); 83 return (-1); 84 } 85 } 86 87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 88 zcmd_free_nvlists(&zc); 89 return (-1); 90 } 91 92 zcmd_free_nvlists(&zc); 93 94 return (0); 95 } 96 97 static int 98 zpool_props_refresh(zpool_handle_t *zhp) 99 { 100 nvlist_t *old_props; 101 102 old_props = zhp->zpool_props; 103 104 if (zpool_get_all_props(zhp) != 0) 105 return (-1); 106 107 nvlist_free(old_props); 108 return (0); 109 } 110 111 static char * 112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 113 zprop_source_t *src) 114 { 115 nvlist_t *nv, *nvl; 116 uint64_t ival; 117 char *value; 118 zprop_source_t source; 119 120 nvl = zhp->zpool_props; 121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 123 source = ival; 124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 125 } else { 126 source = ZPROP_SRC_DEFAULT; 127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 128 value = "-"; 129 } 130 131 if (src) 132 *src = source; 133 134 return (value); 135 } 136 137 uint64_t 138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 139 { 140 nvlist_t *nv, *nvl; 141 uint64_t value; 142 zprop_source_t source; 143 144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 145 /* 146 * zpool_get_all_props() has most likely failed because 147 * the pool is faulted, but if all we need is the top level 148 * vdev's guid then get it from the zhp config nvlist. 149 */ 150 if ((prop == ZPOOL_PROP_GUID) && 151 (nvlist_lookup_nvlist(zhp->zpool_config, 152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 154 == 0)) { 155 return (value); 156 } 157 return (zpool_prop_default_numeric(prop)); 158 } 159 160 nvl = zhp->zpool_props; 161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 163 source = value; 164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 165 } else { 166 source = ZPROP_SRC_DEFAULT; 167 value = zpool_prop_default_numeric(prop); 168 } 169 170 if (src) 171 *src = source; 172 173 return (value); 174 } 175 176 /* 177 * Map VDEV STATE to printed strings. 178 */ 179 char * 180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 181 { 182 switch (state) { 183 case VDEV_STATE_CLOSED: 184 case VDEV_STATE_OFFLINE: 185 return (gettext("OFFLINE")); 186 case VDEV_STATE_REMOVED: 187 return (gettext("REMOVED")); 188 case VDEV_STATE_CANT_OPEN: 189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 190 return (gettext("FAULTED")); 191 else if (aux == VDEV_AUX_SPLIT_POOL) 192 return (gettext("SPLIT")); 193 else 194 return (gettext("UNAVAIL")); 195 case VDEV_STATE_FAULTED: 196 return (gettext("FAULTED")); 197 case VDEV_STATE_DEGRADED: 198 return (gettext("DEGRADED")); 199 case VDEV_STATE_HEALTHY: 200 return (gettext("ONLINE")); 201 } 202 203 return (gettext("UNKNOWN")); 204 } 205 206 /* 207 * Get a zpool property value for 'prop' and return the value in 208 * a pre-allocated buffer. 209 */ 210 int 211 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 212 zprop_source_t *srctype) 213 { 214 uint64_t intval; 215 const char *strval; 216 zprop_source_t src = ZPROP_SRC_NONE; 217 nvlist_t *nvroot; 218 vdev_stat_t *vs; 219 uint_t vsc; 220 221 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 222 switch (prop) { 223 case ZPOOL_PROP_NAME: 224 (void) strlcpy(buf, zpool_get_name(zhp), len); 225 break; 226 227 case ZPOOL_PROP_HEALTH: 228 (void) strlcpy(buf, "FAULTED", len); 229 break; 230 231 case ZPOOL_PROP_GUID: 232 intval = zpool_get_prop_int(zhp, prop, &src); 233 (void) snprintf(buf, len, "%llu", intval); 234 break; 235 236 case ZPOOL_PROP_ALTROOT: 237 case ZPOOL_PROP_CACHEFILE: 238 case ZPOOL_PROP_COMMENT: 239 if (zhp->zpool_props != NULL || 240 zpool_get_all_props(zhp) == 0) { 241 (void) strlcpy(buf, 242 zpool_get_prop_string(zhp, prop, &src), 243 len); 244 if (srctype != NULL) 245 *srctype = src; 246 return (0); 247 } 248 /* FALLTHROUGH */ 249 default: 250 (void) strlcpy(buf, "-", len); 251 break; 252 } 253 254 if (srctype != NULL) 255 *srctype = src; 256 return (0); 257 } 258 259 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 260 prop != ZPOOL_PROP_NAME) 261 return (-1); 262 263 switch (zpool_prop_get_type(prop)) { 264 case PROP_TYPE_STRING: 265 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 266 len); 267 break; 268 269 case PROP_TYPE_NUMBER: 270 intval = zpool_get_prop_int(zhp, prop, &src); 271 272 switch (prop) { 273 case ZPOOL_PROP_SIZE: 274 case ZPOOL_PROP_ALLOCATED: 275 case ZPOOL_PROP_FREE: 276 case ZPOOL_PROP_EXPANDSZ: 277 (void) zfs_nicenum(intval, buf, len); 278 break; 279 280 case ZPOOL_PROP_CAPACITY: 281 (void) snprintf(buf, len, "%llu%%", 282 (u_longlong_t)intval); 283 break; 284 285 case ZPOOL_PROP_DEDUPRATIO: 286 (void) snprintf(buf, len, "%llu.%02llux", 287 (u_longlong_t)(intval / 100), 288 (u_longlong_t)(intval % 100)); 289 break; 290 291 case ZPOOL_PROP_HEALTH: 292 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 293 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 294 verify(nvlist_lookup_uint64_array(nvroot, 295 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 296 == 0); 297 298 (void) strlcpy(buf, zpool_state_to_name(intval, 299 vs->vs_aux), len); 300 break; 301 default: 302 (void) snprintf(buf, len, "%llu", intval); 303 } 304 break; 305 306 case PROP_TYPE_INDEX: 307 intval = zpool_get_prop_int(zhp, prop, &src); 308 if (zpool_prop_index_to_string(prop, intval, &strval) 309 != 0) 310 return (-1); 311 (void) strlcpy(buf, strval, len); 312 break; 313 314 default: 315 abort(); 316 } 317 318 if (srctype) 319 *srctype = src; 320 321 return (0); 322 } 323 324 /* 325 * Check if the bootfs name has the same pool name as it is set to. 326 * Assuming bootfs is a valid dataset name. 327 */ 328 static boolean_t 329 bootfs_name_valid(const char *pool, char *bootfs) 330 { 331 int len = strlen(pool); 332 333 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 334 return (B_FALSE); 335 336 if (strncmp(pool, bootfs, len) == 0 && 337 (bootfs[len] == '/' || bootfs[len] == '\0')) 338 return (B_TRUE); 339 340 return (B_FALSE); 341 } 342 343 /* 344 * Inspect the configuration to determine if any of the devices contain 345 * an EFI label. 346 */ 347 static boolean_t 348 pool_uses_efi(nvlist_t *config) 349 { 350 nvlist_t **child; 351 uint_t c, children; 352 353 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 354 &child, &children) != 0) 355 return (read_efi_label(config, NULL) >= 0); 356 357 for (c = 0; c < children; c++) { 358 if (pool_uses_efi(child[c])) 359 return (B_TRUE); 360 } 361 return (B_FALSE); 362 } 363 364 boolean_t 365 zpool_is_bootable(zpool_handle_t *zhp) 366 { 367 char bootfs[ZPOOL_MAXNAMELEN]; 368 369 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 370 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", 371 sizeof (bootfs)) != 0); 372 } 373 374 375 /* 376 * Given an nvlist of zpool properties to be set, validate that they are 377 * correct, and parse any numeric properties (index, boolean, etc) if they are 378 * specified as strings. 379 */ 380 static nvlist_t * 381 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 382 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 383 { 384 nvpair_t *elem; 385 nvlist_t *retprops; 386 zpool_prop_t prop; 387 char *strval; 388 uint64_t intval; 389 char *slash, *check; 390 struct stat64 statbuf; 391 zpool_handle_t *zhp; 392 nvlist_t *nvroot; 393 394 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 395 (void) no_memory(hdl); 396 return (NULL); 397 } 398 399 elem = NULL; 400 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 401 const char *propname = nvpair_name(elem); 402 403 /* 404 * Make sure this property is valid and applies to this type. 405 */ 406 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { 407 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 408 "invalid property '%s'"), propname); 409 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 410 goto error; 411 } 412 413 if (zpool_prop_readonly(prop)) { 414 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 415 "is readonly"), propname); 416 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 417 goto error; 418 } 419 420 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 421 &strval, &intval, errbuf) != 0) 422 goto error; 423 424 /* 425 * Perform additional checking for specific properties. 426 */ 427 switch (prop) { 428 case ZPOOL_PROP_VERSION: 429 if (intval < version || intval > SPA_VERSION) { 430 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 431 "property '%s' number %d is invalid."), 432 propname, intval); 433 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 434 goto error; 435 } 436 break; 437 438 case ZPOOL_PROP_BOOTFS: 439 if (flags.create || flags.import) { 440 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 441 "property '%s' cannot be set at creation " 442 "or import time"), propname); 443 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 444 goto error; 445 } 446 447 if (version < SPA_VERSION_BOOTFS) { 448 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 449 "pool must be upgraded to support " 450 "'%s' property"), propname); 451 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 452 goto error; 453 } 454 455 /* 456 * bootfs property value has to be a dataset name and 457 * the dataset has to be in the same pool as it sets to. 458 */ 459 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 460 strval)) { 461 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 462 "is an invalid name"), strval); 463 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 464 goto error; 465 } 466 467 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 468 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 469 "could not open pool '%s'"), poolname); 470 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 471 goto error; 472 } 473 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 474 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 475 476 /* 477 * bootfs property cannot be set on a disk which has 478 * been EFI labeled. 479 */ 480 if (pool_uses_efi(nvroot)) { 481 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 482 "property '%s' not supported on " 483 "EFI labeled devices"), propname); 484 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 485 zpool_close(zhp); 486 goto error; 487 } 488 zpool_close(zhp); 489 break; 490 491 case ZPOOL_PROP_ALTROOT: 492 if (!flags.create && !flags.import) { 493 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 494 "property '%s' can only be set during pool " 495 "creation or import"), propname); 496 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 497 goto error; 498 } 499 500 if (strval[0] != '/') { 501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 502 "bad alternate root '%s'"), strval); 503 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 504 goto error; 505 } 506 break; 507 508 case ZPOOL_PROP_CACHEFILE: 509 if (strval[0] == '\0') 510 break; 511 512 if (strcmp(strval, "none") == 0) 513 break; 514 515 if (strval[0] != '/') { 516 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 517 "property '%s' must be empty, an " 518 "absolute path, or 'none'"), propname); 519 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 520 goto error; 521 } 522 523 slash = strrchr(strval, '/'); 524 525 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 526 strcmp(slash, "/..") == 0) { 527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 528 "'%s' is not a valid file"), strval); 529 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 530 goto error; 531 } 532 533 *slash = '\0'; 534 535 if (strval[0] != '\0' && 536 (stat64(strval, &statbuf) != 0 || 537 !S_ISDIR(statbuf.st_mode))) { 538 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 539 "'%s' is not a valid directory"), 540 strval); 541 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 542 goto error; 543 } 544 545 *slash = '/'; 546 break; 547 548 case ZPOOL_PROP_COMMENT: 549 for (check = strval; *check != '\0'; check++) { 550 if (!isprint(*check)) { 551 zfs_error_aux(hdl, 552 dgettext(TEXT_DOMAIN, 553 "comment may only have printable " 554 "characters")); 555 (void) zfs_error(hdl, EZFS_BADPROP, 556 errbuf); 557 goto error; 558 } 559 } 560 if (strlen(strval) > ZPROP_MAX_COMMENT) { 561 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 562 "comment must not exceed %d characters"), 563 ZPROP_MAX_COMMENT); 564 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 565 goto error; 566 } 567 break; 568 case ZPOOL_PROP_READONLY: 569 if (!flags.import) { 570 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 571 "property '%s' can only be set at " 572 "import time"), propname); 573 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 574 goto error; 575 } 576 break; 577 } 578 } 579 580 return (retprops); 581 error: 582 nvlist_free(retprops); 583 return (NULL); 584 } 585 586 /* 587 * Set zpool property : propname=propval. 588 */ 589 int 590 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 591 { 592 zfs_cmd_t zc = { 0 }; 593 int ret = -1; 594 char errbuf[1024]; 595 nvlist_t *nvl = NULL; 596 nvlist_t *realprops; 597 uint64_t version; 598 prop_flags_t flags = { 0 }; 599 600 (void) snprintf(errbuf, sizeof (errbuf), 601 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 602 zhp->zpool_name); 603 604 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 605 return (no_memory(zhp->zpool_hdl)); 606 607 if (nvlist_add_string(nvl, propname, propval) != 0) { 608 nvlist_free(nvl); 609 return (no_memory(zhp->zpool_hdl)); 610 } 611 612 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 613 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 614 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 615 nvlist_free(nvl); 616 return (-1); 617 } 618 619 nvlist_free(nvl); 620 nvl = realprops; 621 622 /* 623 * Execute the corresponding ioctl() to set this property. 624 */ 625 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 626 627 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 628 nvlist_free(nvl); 629 return (-1); 630 } 631 632 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 633 634 zcmd_free_nvlists(&zc); 635 nvlist_free(nvl); 636 637 if (ret) 638 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 639 else 640 (void) zpool_props_refresh(zhp); 641 642 return (ret); 643 } 644 645 int 646 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 647 { 648 libzfs_handle_t *hdl = zhp->zpool_hdl; 649 zprop_list_t *entry; 650 char buf[ZFS_MAXPROPLEN]; 651 652 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 653 return (-1); 654 655 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 656 657 if (entry->pl_fixed) 658 continue; 659 660 if (entry->pl_prop != ZPROP_INVAL && 661 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 662 NULL) == 0) { 663 if (strlen(buf) > entry->pl_width) 664 entry->pl_width = strlen(buf); 665 } 666 } 667 668 return (0); 669 } 670 671 672 /* 673 * Don't start the slice at the default block of 34; many storage 674 * devices will use a stripe width of 128k, so start there instead. 675 */ 676 #define NEW_START_BLOCK 256 677 678 /* 679 * Validate the given pool name, optionally putting an extended error message in 680 * 'buf'. 681 */ 682 boolean_t 683 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 684 { 685 namecheck_err_t why; 686 char what; 687 int ret; 688 689 ret = pool_namecheck(pool, &why, &what); 690 691 /* 692 * The rules for reserved pool names were extended at a later point. 693 * But we need to support users with existing pools that may now be 694 * invalid. So we only check for this expanded set of names during a 695 * create (or import), and only in userland. 696 */ 697 if (ret == 0 && !isopen && 698 (strncmp(pool, "mirror", 6) == 0 || 699 strncmp(pool, "raidz", 5) == 0 || 700 strncmp(pool, "spare", 5) == 0 || 701 strcmp(pool, "log") == 0)) { 702 if (hdl != NULL) 703 zfs_error_aux(hdl, 704 dgettext(TEXT_DOMAIN, "name is reserved")); 705 return (B_FALSE); 706 } 707 708 709 if (ret != 0) { 710 if (hdl != NULL) { 711 switch (why) { 712 case NAME_ERR_TOOLONG: 713 zfs_error_aux(hdl, 714 dgettext(TEXT_DOMAIN, "name is too long")); 715 break; 716 717 case NAME_ERR_INVALCHAR: 718 zfs_error_aux(hdl, 719 dgettext(TEXT_DOMAIN, "invalid character " 720 "'%c' in pool name"), what); 721 break; 722 723 case NAME_ERR_NOLETTER: 724 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 725 "name must begin with a letter")); 726 break; 727 728 case NAME_ERR_RESERVED: 729 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 730 "name is reserved")); 731 break; 732 733 case NAME_ERR_DISKLIKE: 734 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 735 "pool name is reserved")); 736 break; 737 738 case NAME_ERR_LEADING_SLASH: 739 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 740 "leading slash in name")); 741 break; 742 743 case NAME_ERR_EMPTY_COMPONENT: 744 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 745 "empty component in name")); 746 break; 747 748 case NAME_ERR_TRAILING_SLASH: 749 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 750 "trailing slash in name")); 751 break; 752 753 case NAME_ERR_MULTIPLE_AT: 754 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 755 "multiple '@' delimiters in name")); 756 break; 757 758 } 759 } 760 return (B_FALSE); 761 } 762 763 return (B_TRUE); 764 } 765 766 /* 767 * Open a handle to the given pool, even if the pool is currently in the FAULTED 768 * state. 769 */ 770 zpool_handle_t * 771 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 772 { 773 zpool_handle_t *zhp; 774 boolean_t missing; 775 776 /* 777 * Make sure the pool name is valid. 778 */ 779 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 780 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 781 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 782 pool); 783 return (NULL); 784 } 785 786 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 787 return (NULL); 788 789 zhp->zpool_hdl = hdl; 790 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 791 792 if (zpool_refresh_stats(zhp, &missing) != 0) { 793 zpool_close(zhp); 794 return (NULL); 795 } 796 797 if (missing) { 798 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 799 (void) zfs_error_fmt(hdl, EZFS_NOENT, 800 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 801 zpool_close(zhp); 802 return (NULL); 803 } 804 805 return (zhp); 806 } 807 808 /* 809 * Like the above, but silent on error. Used when iterating over pools (because 810 * the configuration cache may be out of date). 811 */ 812 int 813 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 814 { 815 zpool_handle_t *zhp; 816 boolean_t missing; 817 818 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 819 return (-1); 820 821 zhp->zpool_hdl = hdl; 822 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 823 824 if (zpool_refresh_stats(zhp, &missing) != 0) { 825 zpool_close(zhp); 826 return (-1); 827 } 828 829 if (missing) { 830 zpool_close(zhp); 831 *ret = NULL; 832 return (0); 833 } 834 835 *ret = zhp; 836 return (0); 837 } 838 839 /* 840 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 841 * state. 842 */ 843 zpool_handle_t * 844 zpool_open(libzfs_handle_t *hdl, const char *pool) 845 { 846 zpool_handle_t *zhp; 847 848 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 849 return (NULL); 850 851 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 852 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 853 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 854 zpool_close(zhp); 855 return (NULL); 856 } 857 858 return (zhp); 859 } 860 861 /* 862 * Close the handle. Simply frees the memory associated with the handle. 863 */ 864 void 865 zpool_close(zpool_handle_t *zhp) 866 { 867 if (zhp->zpool_config) 868 nvlist_free(zhp->zpool_config); 869 if (zhp->zpool_old_config) 870 nvlist_free(zhp->zpool_old_config); 871 if (zhp->zpool_props) 872 nvlist_free(zhp->zpool_props); 873 free(zhp); 874 } 875 876 /* 877 * Return the name of the pool. 878 */ 879 const char * 880 zpool_get_name(zpool_handle_t *zhp) 881 { 882 return (zhp->zpool_name); 883 } 884 885 886 /* 887 * Return the state of the pool (ACTIVE or UNAVAILABLE) 888 */ 889 int 890 zpool_get_state(zpool_handle_t *zhp) 891 { 892 return (zhp->zpool_state); 893 } 894 895 /* 896 * Create the named pool, using the provided vdev list. It is assumed 897 * that the consumer has already validated the contents of the nvlist, so we 898 * don't have to worry about error semantics. 899 */ 900 int 901 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 902 nvlist_t *props, nvlist_t *fsprops) 903 { 904 zfs_cmd_t zc = { 0 }; 905 nvlist_t *zc_fsprops = NULL; 906 nvlist_t *zc_props = NULL; 907 char msg[1024]; 908 char *altroot; 909 int ret = -1; 910 911 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 912 "cannot create '%s'"), pool); 913 914 if (!zpool_name_valid(hdl, B_FALSE, pool)) 915 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 916 917 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 918 return (-1); 919 920 if (props) { 921 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 922 923 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 924 SPA_VERSION_1, flags, msg)) == NULL) { 925 goto create_failed; 926 } 927 } 928 929 if (fsprops) { 930 uint64_t zoned; 931 char *zonestr; 932 933 zoned = ((nvlist_lookup_string(fsprops, 934 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 935 strcmp(zonestr, "on") == 0); 936 937 if ((zc_fsprops = zfs_valid_proplist(hdl, 938 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 939 goto create_failed; 940 } 941 if (!zc_props && 942 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 943 goto create_failed; 944 } 945 if (nvlist_add_nvlist(zc_props, 946 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 947 goto create_failed; 948 } 949 } 950 951 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 952 goto create_failed; 953 954 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 955 956 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 957 958 zcmd_free_nvlists(&zc); 959 nvlist_free(zc_props); 960 nvlist_free(zc_fsprops); 961 962 switch (errno) { 963 case EBUSY: 964 /* 965 * This can happen if the user has specified the same 966 * device multiple times. We can't reliably detect this 967 * until we try to add it and see we already have a 968 * label. 969 */ 970 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 971 "one or more vdevs refer to the same device")); 972 return (zfs_error(hdl, EZFS_BADDEV, msg)); 973 974 case EOVERFLOW: 975 /* 976 * This occurs when one of the devices is below 977 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 978 * device was the problem device since there's no 979 * reliable way to determine device size from userland. 980 */ 981 { 982 char buf[64]; 983 984 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 985 986 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 987 "one or more devices is less than the " 988 "minimum size (%s)"), buf); 989 } 990 return (zfs_error(hdl, EZFS_BADDEV, msg)); 991 992 case ENOSPC: 993 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 994 "one or more devices is out of space")); 995 return (zfs_error(hdl, EZFS_BADDEV, msg)); 996 997 case ENOTBLK: 998 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 999 "cache device must be a disk or disk slice")); 1000 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1001 1002 default: 1003 return (zpool_standard_error(hdl, errno, msg)); 1004 } 1005 } 1006 1007 /* 1008 * If this is an alternate root pool, then we automatically set the 1009 * mountpoint of the root dataset to be '/'. 1010 */ 1011 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), 1012 &altroot) == 0) { 1013 zfs_handle_t *zhp; 1014 1015 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); 1016 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1017 "/") == 0); 1018 1019 zfs_close(zhp); 1020 } 1021 1022 create_failed: 1023 zcmd_free_nvlists(&zc); 1024 nvlist_free(zc_props); 1025 nvlist_free(zc_fsprops); 1026 return (ret); 1027 } 1028 1029 /* 1030 * Destroy the given pool. It is up to the caller to ensure that there are no 1031 * datasets left in the pool. 1032 */ 1033 int 1034 zpool_destroy(zpool_handle_t *zhp) 1035 { 1036 zfs_cmd_t zc = { 0 }; 1037 zfs_handle_t *zfp = NULL; 1038 libzfs_handle_t *hdl = zhp->zpool_hdl; 1039 char msg[1024]; 1040 1041 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1042 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1043 return (-1); 1044 1045 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1046 1047 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1048 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1049 "cannot destroy '%s'"), zhp->zpool_name); 1050 1051 if (errno == EROFS) { 1052 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1053 "one or more devices is read only")); 1054 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1055 } else { 1056 (void) zpool_standard_error(hdl, errno, msg); 1057 } 1058 1059 if (zfp) 1060 zfs_close(zfp); 1061 return (-1); 1062 } 1063 1064 if (zfp) { 1065 remove_mountpoint(zfp); 1066 zfs_close(zfp); 1067 } 1068 1069 return (0); 1070 } 1071 1072 /* 1073 * Add the given vdevs to the pool. The caller must have already performed the 1074 * necessary verification to ensure that the vdev specification is well-formed. 1075 */ 1076 int 1077 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1078 { 1079 zfs_cmd_t zc = { 0 }; 1080 int ret; 1081 libzfs_handle_t *hdl = zhp->zpool_hdl; 1082 char msg[1024]; 1083 nvlist_t **spares, **l2cache; 1084 uint_t nspares, nl2cache; 1085 1086 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1087 "cannot add to '%s'"), zhp->zpool_name); 1088 1089 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1090 SPA_VERSION_SPARES && 1091 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1092 &spares, &nspares) == 0) { 1093 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1094 "upgraded to add hot spares")); 1095 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1096 } 1097 1098 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1099 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1100 uint64_t s; 1101 1102 for (s = 0; s < nspares; s++) { 1103 char *path; 1104 1105 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1106 &path) == 0 && pool_uses_efi(spares[s])) { 1107 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1108 "device '%s' contains an EFI label and " 1109 "cannot be used on root pools."), 1110 zpool_vdev_name(hdl, NULL, spares[s], 1111 B_FALSE)); 1112 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1113 } 1114 } 1115 } 1116 1117 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1118 SPA_VERSION_L2CACHE && 1119 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1120 &l2cache, &nl2cache) == 0) { 1121 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1122 "upgraded to add cache devices")); 1123 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1124 } 1125 1126 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1127 return (-1); 1128 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1129 1130 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1131 switch (errno) { 1132 case EBUSY: 1133 /* 1134 * This can happen if the user has specified the same 1135 * device multiple times. We can't reliably detect this 1136 * until we try to add it and see we already have a 1137 * label. 1138 */ 1139 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1140 "one or more vdevs refer to the same device")); 1141 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1142 break; 1143 1144 case EOVERFLOW: 1145 /* 1146 * This occurrs when one of the devices is below 1147 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1148 * device was the problem device since there's no 1149 * reliable way to determine device size from userland. 1150 */ 1151 { 1152 char buf[64]; 1153 1154 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1155 1156 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1157 "device is less than the minimum " 1158 "size (%s)"), buf); 1159 } 1160 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1161 break; 1162 1163 case ENOTSUP: 1164 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1165 "pool must be upgraded to add these vdevs")); 1166 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1167 break; 1168 1169 case EDOM: 1170 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1171 "root pool can not have multiple vdevs" 1172 " or separate logs")); 1173 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1174 break; 1175 1176 case ENOTBLK: 1177 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1178 "cache device must be a disk or disk slice")); 1179 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1180 break; 1181 1182 default: 1183 (void) zpool_standard_error(hdl, errno, msg); 1184 } 1185 1186 ret = -1; 1187 } else { 1188 ret = 0; 1189 } 1190 1191 zcmd_free_nvlists(&zc); 1192 1193 return (ret); 1194 } 1195 1196 /* 1197 * Exports the pool from the system. The caller must ensure that there are no 1198 * mounted datasets in the pool. 1199 */ 1200 int 1201 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce) 1202 { 1203 zfs_cmd_t zc = { 0 }; 1204 char msg[1024]; 1205 1206 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1207 "cannot export '%s'"), zhp->zpool_name); 1208 1209 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1210 zc.zc_cookie = force; 1211 zc.zc_guid = hardforce; 1212 1213 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1214 switch (errno) { 1215 case EXDEV: 1216 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1217 "use '-f' to override the following errors:\n" 1218 "'%s' has an active shared spare which could be" 1219 " used by other pools once '%s' is exported."), 1220 zhp->zpool_name, zhp->zpool_name); 1221 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1222 msg)); 1223 default: 1224 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1225 msg)); 1226 } 1227 } 1228 1229 return (0); 1230 } 1231 1232 int 1233 zpool_export(zpool_handle_t *zhp, boolean_t force) 1234 { 1235 return (zpool_export_common(zhp, force, B_FALSE)); 1236 } 1237 1238 int 1239 zpool_export_force(zpool_handle_t *zhp) 1240 { 1241 return (zpool_export_common(zhp, B_TRUE, B_TRUE)); 1242 } 1243 1244 static void 1245 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1246 nvlist_t *config) 1247 { 1248 nvlist_t *nv = NULL; 1249 uint64_t rewindto; 1250 int64_t loss = -1; 1251 struct tm t; 1252 char timestr[128]; 1253 1254 if (!hdl->libzfs_printerr || config == NULL) 1255 return; 1256 1257 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0) 1258 return; 1259 1260 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1261 return; 1262 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1263 1264 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1265 strftime(timestr, 128, 0, &t) != 0) { 1266 if (dryrun) { 1267 (void) printf(dgettext(TEXT_DOMAIN, 1268 "Would be able to return %s " 1269 "to its state as of %s.\n"), 1270 name, timestr); 1271 } else { 1272 (void) printf(dgettext(TEXT_DOMAIN, 1273 "Pool %s returned to its state as of %s.\n"), 1274 name, timestr); 1275 } 1276 if (loss > 120) { 1277 (void) printf(dgettext(TEXT_DOMAIN, 1278 "%s approximately %lld "), 1279 dryrun ? "Would discard" : "Discarded", 1280 (loss + 30) / 60); 1281 (void) printf(dgettext(TEXT_DOMAIN, 1282 "minutes of transactions.\n")); 1283 } else if (loss > 0) { 1284 (void) printf(dgettext(TEXT_DOMAIN, 1285 "%s approximately %lld "), 1286 dryrun ? "Would discard" : "Discarded", loss); 1287 (void) printf(dgettext(TEXT_DOMAIN, 1288 "seconds of transactions.\n")); 1289 } 1290 } 1291 } 1292 1293 void 1294 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1295 nvlist_t *config) 1296 { 1297 nvlist_t *nv = NULL; 1298 int64_t loss = -1; 1299 uint64_t edata = UINT64_MAX; 1300 uint64_t rewindto; 1301 struct tm t; 1302 char timestr[128]; 1303 1304 if (!hdl->libzfs_printerr) 1305 return; 1306 1307 if (reason >= 0) 1308 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1309 else 1310 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1311 1312 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1313 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1314 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1315 goto no_info; 1316 1317 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1318 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1319 &edata); 1320 1321 (void) printf(dgettext(TEXT_DOMAIN, 1322 "Recovery is possible, but will result in some data loss.\n")); 1323 1324 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1325 strftime(timestr, 128, 0, &t) != 0) { 1326 (void) printf(dgettext(TEXT_DOMAIN, 1327 "\tReturning the pool to its state as of %s\n" 1328 "\tshould correct the problem. "), 1329 timestr); 1330 } else { 1331 (void) printf(dgettext(TEXT_DOMAIN, 1332 "\tReverting the pool to an earlier state " 1333 "should correct the problem.\n\t")); 1334 } 1335 1336 if (loss > 120) { 1337 (void) printf(dgettext(TEXT_DOMAIN, 1338 "Approximately %lld minutes of data\n" 1339 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1340 } else if (loss > 0) { 1341 (void) printf(dgettext(TEXT_DOMAIN, 1342 "Approximately %lld seconds of data\n" 1343 "\tmust be discarded, irreversibly. "), loss); 1344 } 1345 if (edata != 0 && edata != UINT64_MAX) { 1346 if (edata == 1) { 1347 (void) printf(dgettext(TEXT_DOMAIN, 1348 "After rewind, at least\n" 1349 "\tone persistent user-data error will remain. ")); 1350 } else { 1351 (void) printf(dgettext(TEXT_DOMAIN, 1352 "After rewind, several\n" 1353 "\tpersistent user-data errors will remain. ")); 1354 } 1355 } 1356 (void) printf(dgettext(TEXT_DOMAIN, 1357 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1358 reason >= 0 ? "clear" : "import", name); 1359 1360 (void) printf(dgettext(TEXT_DOMAIN, 1361 "A scrub of the pool\n" 1362 "\tis strongly recommended after recovery.\n")); 1363 return; 1364 1365 no_info: 1366 (void) printf(dgettext(TEXT_DOMAIN, 1367 "Destroy and re-create the pool from\n\ta backup source.\n")); 1368 } 1369 1370 /* 1371 * zpool_import() is a contracted interface. Should be kept the same 1372 * if possible. 1373 * 1374 * Applications should use zpool_import_props() to import a pool with 1375 * new properties value to be set. 1376 */ 1377 int 1378 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1379 char *altroot) 1380 { 1381 nvlist_t *props = NULL; 1382 int ret; 1383 1384 if (altroot != NULL) { 1385 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1386 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1387 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1388 newname)); 1389 } 1390 1391 if (nvlist_add_string(props, 1392 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1393 nvlist_add_string(props, 1394 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1395 nvlist_free(props); 1396 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1397 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1398 newname)); 1399 } 1400 } 1401 1402 ret = zpool_import_props(hdl, config, newname, props, 1403 ZFS_IMPORT_NORMAL); 1404 if (props) 1405 nvlist_free(props); 1406 return (ret); 1407 } 1408 1409 static void 1410 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1411 int indent) 1412 { 1413 nvlist_t **child; 1414 uint_t c, children; 1415 char *vname; 1416 uint64_t is_log = 0; 1417 1418 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1419 &is_log); 1420 1421 if (name != NULL) 1422 (void) printf("\t%*s%s%s\n", indent, "", name, 1423 is_log ? " [log]" : ""); 1424 1425 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1426 &child, &children) != 0) 1427 return; 1428 1429 for (c = 0; c < children; c++) { 1430 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1431 print_vdev_tree(hdl, vname, child[c], indent + 2); 1432 free(vname); 1433 } 1434 } 1435 1436 /* 1437 * Import the given pool using the known configuration and a list of 1438 * properties to be set. The configuration should have come from 1439 * zpool_find_import(). The 'newname' parameters control whether the pool 1440 * is imported with a different name. 1441 */ 1442 int 1443 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1444 nvlist_t *props, int flags) 1445 { 1446 zfs_cmd_t zc = { 0 }; 1447 zpool_rewind_policy_t policy; 1448 nvlist_t *nv = NULL; 1449 nvlist_t *nvinfo = NULL; 1450 nvlist_t *missing = NULL; 1451 char *thename; 1452 char *origname; 1453 int ret; 1454 int error = 0; 1455 char errbuf[1024]; 1456 1457 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1458 &origname) == 0); 1459 1460 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1461 "cannot import pool '%s'"), origname); 1462 1463 if (newname != NULL) { 1464 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1465 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1466 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1467 newname)); 1468 thename = (char *)newname; 1469 } else { 1470 thename = origname; 1471 } 1472 1473 if (props) { 1474 uint64_t version; 1475 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1476 1477 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1478 &version) == 0); 1479 1480 if ((props = zpool_valid_proplist(hdl, origname, 1481 props, version, flags, errbuf)) == NULL) { 1482 return (-1); 1483 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1484 nvlist_free(props); 1485 return (-1); 1486 } 1487 } 1488 1489 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1490 1491 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1492 &zc.zc_guid) == 0); 1493 1494 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1495 nvlist_free(props); 1496 return (-1); 1497 } 1498 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1499 nvlist_free(props); 1500 return (-1); 1501 } 1502 1503 zc.zc_cookie = flags; 1504 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1505 errno == ENOMEM) { 1506 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1507 zcmd_free_nvlists(&zc); 1508 return (-1); 1509 } 1510 } 1511 if (ret != 0) 1512 error = errno; 1513 1514 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1515 zpool_get_rewind_policy(config, &policy); 1516 1517 if (error) { 1518 char desc[1024]; 1519 1520 /* 1521 * Dry-run failed, but we print out what success 1522 * looks like if we found a best txg 1523 */ 1524 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1525 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1526 B_TRUE, nv); 1527 nvlist_free(nv); 1528 return (-1); 1529 } 1530 1531 if (newname == NULL) 1532 (void) snprintf(desc, sizeof (desc), 1533 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1534 thename); 1535 else 1536 (void) snprintf(desc, sizeof (desc), 1537 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1538 origname, thename); 1539 1540 switch (error) { 1541 case ENOTSUP: 1542 /* 1543 * Unsupported version. 1544 */ 1545 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1546 break; 1547 1548 case EINVAL: 1549 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1550 break; 1551 1552 case EROFS: 1553 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1554 "one or more devices is read only")); 1555 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1556 break; 1557 1558 case ENXIO: 1559 if (nv && nvlist_lookup_nvlist(nv, 1560 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1561 nvlist_lookup_nvlist(nvinfo, 1562 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1563 (void) printf(dgettext(TEXT_DOMAIN, 1564 "The devices below are missing, use " 1565 "'-m' to import the pool anyway:\n")); 1566 print_vdev_tree(hdl, NULL, missing, 2); 1567 (void) printf("\n"); 1568 } 1569 (void) zpool_standard_error(hdl, error, desc); 1570 break; 1571 1572 case EEXIST: 1573 (void) zpool_standard_error(hdl, error, desc); 1574 break; 1575 1576 default: 1577 (void) zpool_standard_error(hdl, error, desc); 1578 zpool_explain_recover(hdl, 1579 newname ? origname : thename, -error, nv); 1580 break; 1581 } 1582 1583 nvlist_free(nv); 1584 ret = -1; 1585 } else { 1586 zpool_handle_t *zhp; 1587 1588 /* 1589 * This should never fail, but play it safe anyway. 1590 */ 1591 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1592 ret = -1; 1593 else if (zhp != NULL) 1594 zpool_close(zhp); 1595 if (policy.zrp_request & 1596 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1597 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1598 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1599 } 1600 nvlist_free(nv); 1601 return (0); 1602 } 1603 1604 zcmd_free_nvlists(&zc); 1605 nvlist_free(props); 1606 1607 return (ret); 1608 } 1609 1610 /* 1611 * Scan the pool. 1612 */ 1613 int 1614 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1615 { 1616 zfs_cmd_t zc = { 0 }; 1617 char msg[1024]; 1618 libzfs_handle_t *hdl = zhp->zpool_hdl; 1619 1620 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1621 zc.zc_cookie = func; 1622 1623 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1624 (errno == ENOENT && func != POOL_SCAN_NONE)) 1625 return (0); 1626 1627 if (func == POOL_SCAN_SCRUB) { 1628 (void) snprintf(msg, sizeof (msg), 1629 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1630 } else if (func == POOL_SCAN_NONE) { 1631 (void) snprintf(msg, sizeof (msg), 1632 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1633 zc.zc_name); 1634 } else { 1635 assert(!"unexpected result"); 1636 } 1637 1638 if (errno == EBUSY) { 1639 nvlist_t *nvroot; 1640 pool_scan_stat_t *ps = NULL; 1641 uint_t psc; 1642 1643 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1644 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1645 (void) nvlist_lookup_uint64_array(nvroot, 1646 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1647 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1648 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1649 else 1650 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1651 } else if (errno == ENOENT) { 1652 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1653 } else { 1654 return (zpool_standard_error(hdl, errno, msg)); 1655 } 1656 } 1657 1658 /* 1659 * This provides a very minimal check whether a given string is likely a 1660 * c#t#d# style string. Users of this are expected to do their own 1661 * verification of the s# part. 1662 */ 1663 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1664 1665 /* 1666 * More elaborate version for ones which may start with "/dev/dsk/" 1667 * and the like. 1668 */ 1669 static int 1670 ctd_check_path(char *str) { 1671 /* 1672 * If it starts with a slash, check the last component. 1673 */ 1674 if (str && str[0] == '/') { 1675 char *tmp = strrchr(str, '/'); 1676 1677 /* 1678 * If it ends in "/old", check the second-to-last 1679 * component of the string instead. 1680 */ 1681 if (tmp != str && strcmp(tmp, "/old") == 0) { 1682 for (tmp--; *tmp != '/'; tmp--) 1683 ; 1684 } 1685 str = tmp + 1; 1686 } 1687 return (CTD_CHECK(str)); 1688 } 1689 1690 /* 1691 * Find a vdev that matches the search criteria specified. We use the 1692 * the nvpair name to determine how we should look for the device. 1693 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1694 * spare; but FALSE if its an INUSE spare. 1695 */ 1696 static nvlist_t * 1697 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1698 boolean_t *l2cache, boolean_t *log) 1699 { 1700 uint_t c, children; 1701 nvlist_t **child; 1702 nvlist_t *ret; 1703 uint64_t is_log; 1704 char *srchkey; 1705 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1706 1707 /* Nothing to look for */ 1708 if (search == NULL || pair == NULL) 1709 return (NULL); 1710 1711 /* Obtain the key we will use to search */ 1712 srchkey = nvpair_name(pair); 1713 1714 switch (nvpair_type(pair)) { 1715 case DATA_TYPE_UINT64: 1716 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1717 uint64_t srchval, theguid; 1718 1719 verify(nvpair_value_uint64(pair, &srchval) == 0); 1720 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1721 &theguid) == 0); 1722 if (theguid == srchval) 1723 return (nv); 1724 } 1725 break; 1726 1727 case DATA_TYPE_STRING: { 1728 char *srchval, *val; 1729 1730 verify(nvpair_value_string(pair, &srchval) == 0); 1731 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1732 break; 1733 1734 /* 1735 * Search for the requested value. Special cases: 1736 * 1737 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1738 * "s0" or "s0/old". The "s0" part is hidden from the user, 1739 * but included in the string, so this matches around it. 1740 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1741 * 1742 * Otherwise, all other searches are simple string compares. 1743 */ 1744 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1745 ctd_check_path(val)) { 1746 uint64_t wholedisk = 0; 1747 1748 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1749 &wholedisk); 1750 if (wholedisk) { 1751 int slen = strlen(srchval); 1752 int vlen = strlen(val); 1753 1754 if (slen != vlen - 2) 1755 break; 1756 1757 /* 1758 * make_leaf_vdev() should only set 1759 * wholedisk for ZPOOL_CONFIG_PATHs which 1760 * will include "/dev/dsk/", giving plenty of 1761 * room for the indices used next. 1762 */ 1763 ASSERT(vlen >= 6); 1764 1765 /* 1766 * strings identical except trailing "s0" 1767 */ 1768 if (strcmp(&val[vlen - 2], "s0") == 0 && 1769 strncmp(srchval, val, slen) == 0) 1770 return (nv); 1771 1772 /* 1773 * strings identical except trailing "s0/old" 1774 */ 1775 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 1776 strcmp(&srchval[slen - 4], "/old") == 0 && 1777 strncmp(srchval, val, slen - 4) == 0) 1778 return (nv); 1779 1780 break; 1781 } 1782 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 1783 char *type, *idx, *end, *p; 1784 uint64_t id, vdev_id; 1785 1786 /* 1787 * Determine our vdev type, keeping in mind 1788 * that the srchval is composed of a type and 1789 * vdev id pair (i.e. mirror-4). 1790 */ 1791 if ((type = strdup(srchval)) == NULL) 1792 return (NULL); 1793 1794 if ((p = strrchr(type, '-')) == NULL) { 1795 free(type); 1796 break; 1797 } 1798 idx = p + 1; 1799 *p = '\0'; 1800 1801 /* 1802 * If the types don't match then keep looking. 1803 */ 1804 if (strncmp(val, type, strlen(val)) != 0) { 1805 free(type); 1806 break; 1807 } 1808 1809 verify(strncmp(type, VDEV_TYPE_RAIDZ, 1810 strlen(VDEV_TYPE_RAIDZ)) == 0 || 1811 strncmp(type, VDEV_TYPE_MIRROR, 1812 strlen(VDEV_TYPE_MIRROR)) == 0); 1813 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 1814 &id) == 0); 1815 1816 errno = 0; 1817 vdev_id = strtoull(idx, &end, 10); 1818 1819 free(type); 1820 if (errno != 0) 1821 return (NULL); 1822 1823 /* 1824 * Now verify that we have the correct vdev id. 1825 */ 1826 if (vdev_id == id) 1827 return (nv); 1828 } 1829 1830 /* 1831 * Common case 1832 */ 1833 if (strcmp(srchval, val) == 0) 1834 return (nv); 1835 break; 1836 } 1837 1838 default: 1839 break; 1840 } 1841 1842 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1843 &child, &children) != 0) 1844 return (NULL); 1845 1846 for (c = 0; c < children; c++) { 1847 if ((ret = vdev_to_nvlist_iter(child[c], search, 1848 avail_spare, l2cache, NULL)) != NULL) { 1849 /* 1850 * The 'is_log' value is only set for the toplevel 1851 * vdev, not the leaf vdevs. So we always lookup the 1852 * log device from the root of the vdev tree (where 1853 * 'log' is non-NULL). 1854 */ 1855 if (log != NULL && 1856 nvlist_lookup_uint64(child[c], 1857 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 1858 is_log) { 1859 *log = B_TRUE; 1860 } 1861 return (ret); 1862 } 1863 } 1864 1865 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1866 &child, &children) == 0) { 1867 for (c = 0; c < children; c++) { 1868 if ((ret = vdev_to_nvlist_iter(child[c], search, 1869 avail_spare, l2cache, NULL)) != NULL) { 1870 *avail_spare = B_TRUE; 1871 return (ret); 1872 } 1873 } 1874 } 1875 1876 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1877 &child, &children) == 0) { 1878 for (c = 0; c < children; c++) { 1879 if ((ret = vdev_to_nvlist_iter(child[c], search, 1880 avail_spare, l2cache, NULL)) != NULL) { 1881 *l2cache = B_TRUE; 1882 return (ret); 1883 } 1884 } 1885 } 1886 1887 return (NULL); 1888 } 1889 1890 /* 1891 * Given a physical path (minus the "/devices" prefix), find the 1892 * associated vdev. 1893 */ 1894 nvlist_t * 1895 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 1896 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 1897 { 1898 nvlist_t *search, *nvroot, *ret; 1899 1900 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1901 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 1902 1903 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1904 &nvroot) == 0); 1905 1906 *avail_spare = B_FALSE; 1907 *l2cache = B_FALSE; 1908 if (log != NULL) 1909 *log = B_FALSE; 1910 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1911 nvlist_free(search); 1912 1913 return (ret); 1914 } 1915 1916 /* 1917 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 1918 */ 1919 boolean_t 1920 zpool_vdev_is_interior(const char *name) 1921 { 1922 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 1923 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 1924 return (B_TRUE); 1925 return (B_FALSE); 1926 } 1927 1928 nvlist_t * 1929 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 1930 boolean_t *l2cache, boolean_t *log) 1931 { 1932 char buf[MAXPATHLEN]; 1933 char *end; 1934 nvlist_t *nvroot, *search, *ret; 1935 uint64_t guid; 1936 1937 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1938 1939 guid = strtoull(path, &end, 10); 1940 if (guid != 0 && *end == '\0') { 1941 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 1942 } else if (zpool_vdev_is_interior(path)) { 1943 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 1944 } else if (path[0] != '/') { 1945 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 1946 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 1947 } else { 1948 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 1949 } 1950 1951 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1952 &nvroot) == 0); 1953 1954 *avail_spare = B_FALSE; 1955 *l2cache = B_FALSE; 1956 if (log != NULL) 1957 *log = B_FALSE; 1958 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1959 nvlist_free(search); 1960 1961 return (ret); 1962 } 1963 1964 static int 1965 vdev_online(nvlist_t *nv) 1966 { 1967 uint64_t ival; 1968 1969 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 1970 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 1971 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 1972 return (0); 1973 1974 return (1); 1975 } 1976 1977 /* 1978 * Helper function for zpool_get_physpaths(). 1979 */ 1980 static int 1981 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 1982 size_t *bytes_written) 1983 { 1984 size_t bytes_left, pos, rsz; 1985 char *tmppath; 1986 const char *format; 1987 1988 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 1989 &tmppath) != 0) 1990 return (EZFS_NODEVICE); 1991 1992 pos = *bytes_written; 1993 bytes_left = physpath_size - pos; 1994 format = (pos == 0) ? "%s" : " %s"; 1995 1996 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 1997 *bytes_written += rsz; 1998 1999 if (rsz >= bytes_left) { 2000 /* if physpath was not copied properly, clear it */ 2001 if (bytes_left != 0) { 2002 physpath[pos] = 0; 2003 } 2004 return (EZFS_NOSPC); 2005 } 2006 return (0); 2007 } 2008 2009 static int 2010 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2011 size_t *rsz, boolean_t is_spare) 2012 { 2013 char *type; 2014 int ret; 2015 2016 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2017 return (EZFS_INVALCONFIG); 2018 2019 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2020 /* 2021 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2022 * For a spare vdev, we only want to boot from the active 2023 * spare device. 2024 */ 2025 if (is_spare) { 2026 uint64_t spare = 0; 2027 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2028 &spare); 2029 if (!spare) 2030 return (EZFS_INVALCONFIG); 2031 } 2032 2033 if (vdev_online(nv)) { 2034 if ((ret = vdev_get_one_physpath(nv, physpath, 2035 phypath_size, rsz)) != 0) 2036 return (ret); 2037 } 2038 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2039 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2040 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2041 nvlist_t **child; 2042 uint_t count; 2043 int i, ret; 2044 2045 if (nvlist_lookup_nvlist_array(nv, 2046 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2047 return (EZFS_INVALCONFIG); 2048 2049 for (i = 0; i < count; i++) { 2050 ret = vdev_get_physpaths(child[i], physpath, 2051 phypath_size, rsz, is_spare); 2052 if (ret == EZFS_NOSPC) 2053 return (ret); 2054 } 2055 } 2056 2057 return (EZFS_POOL_INVALARG); 2058 } 2059 2060 /* 2061 * Get phys_path for a root pool config. 2062 * Return 0 on success; non-zero on failure. 2063 */ 2064 static int 2065 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2066 { 2067 size_t rsz; 2068 nvlist_t *vdev_root; 2069 nvlist_t **child; 2070 uint_t count; 2071 char *type; 2072 2073 rsz = 0; 2074 2075 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2076 &vdev_root) != 0) 2077 return (EZFS_INVALCONFIG); 2078 2079 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2080 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2081 &child, &count) != 0) 2082 return (EZFS_INVALCONFIG); 2083 2084 /* 2085 * root pool can not have EFI labeled disks and can only have 2086 * a single top-level vdev. 2087 */ 2088 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2089 pool_uses_efi(vdev_root)) 2090 return (EZFS_POOL_INVALARG); 2091 2092 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2093 B_FALSE); 2094 2095 /* No online devices */ 2096 if (rsz == 0) 2097 return (EZFS_NODEVICE); 2098 2099 return (0); 2100 } 2101 2102 /* 2103 * Get phys_path for a root pool 2104 * Return 0 on success; non-zero on failure. 2105 */ 2106 int 2107 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2108 { 2109 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2110 phypath_size)); 2111 } 2112 2113 /* 2114 * If the device has being dynamically expanded then we need to relabel 2115 * the disk to use the new unallocated space. 2116 */ 2117 static int 2118 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2119 { 2120 char path[MAXPATHLEN]; 2121 char errbuf[1024]; 2122 int fd, error; 2123 int (*_efi_use_whole_disk)(int); 2124 2125 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2126 "efi_use_whole_disk")) == NULL) 2127 return (-1); 2128 2129 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2130 2131 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2132 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2133 "relabel '%s': unable to open device"), name); 2134 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2135 } 2136 2137 /* 2138 * It's possible that we might encounter an error if the device 2139 * does not have any unallocated space left. If so, we simply 2140 * ignore that error and continue on. 2141 */ 2142 error = _efi_use_whole_disk(fd); 2143 (void) close(fd); 2144 if (error && error != VT_ENOSPC) { 2145 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2146 "relabel '%s': unable to read disk capacity"), name); 2147 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2148 } 2149 return (0); 2150 } 2151 2152 /* 2153 * Bring the specified vdev online. The 'flags' parameter is a set of the 2154 * ZFS_ONLINE_* flags. 2155 */ 2156 int 2157 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2158 vdev_state_t *newstate) 2159 { 2160 zfs_cmd_t zc = { 0 }; 2161 char msg[1024]; 2162 nvlist_t *tgt; 2163 boolean_t avail_spare, l2cache, islog; 2164 libzfs_handle_t *hdl = zhp->zpool_hdl; 2165 2166 if (flags & ZFS_ONLINE_EXPAND) { 2167 (void) snprintf(msg, sizeof (msg), 2168 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2169 } else { 2170 (void) snprintf(msg, sizeof (msg), 2171 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2172 } 2173 2174 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2175 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2176 &islog)) == NULL) 2177 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2178 2179 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2180 2181 if (avail_spare) 2182 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2183 2184 if (flags & ZFS_ONLINE_EXPAND || 2185 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2186 char *pathname = NULL; 2187 uint64_t wholedisk = 0; 2188 2189 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2190 &wholedisk); 2191 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2192 &pathname) == 0); 2193 2194 /* 2195 * XXX - L2ARC 1.0 devices can't support expansion. 2196 */ 2197 if (l2cache) { 2198 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2199 "cannot expand cache devices")); 2200 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2201 } 2202 2203 if (wholedisk) { 2204 pathname += strlen(DISK_ROOT) + 1; 2205 (void) zpool_relabel_disk(hdl, pathname); 2206 } 2207 } 2208 2209 zc.zc_cookie = VDEV_STATE_ONLINE; 2210 zc.zc_obj = flags; 2211 2212 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2213 if (errno == EINVAL) { 2214 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2215 "from this pool into a new one. Use '%s' " 2216 "instead"), "zpool detach"); 2217 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2218 } 2219 return (zpool_standard_error(hdl, errno, msg)); 2220 } 2221 2222 *newstate = zc.zc_cookie; 2223 return (0); 2224 } 2225 2226 /* 2227 * Take the specified vdev offline 2228 */ 2229 int 2230 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2231 { 2232 zfs_cmd_t zc = { 0 }; 2233 char msg[1024]; 2234 nvlist_t *tgt; 2235 boolean_t avail_spare, l2cache; 2236 libzfs_handle_t *hdl = zhp->zpool_hdl; 2237 2238 (void) snprintf(msg, sizeof (msg), 2239 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2240 2241 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2242 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2243 NULL)) == NULL) 2244 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2245 2246 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2247 2248 if (avail_spare) 2249 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2250 2251 zc.zc_cookie = VDEV_STATE_OFFLINE; 2252 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2253 2254 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2255 return (0); 2256 2257 switch (errno) { 2258 case EBUSY: 2259 2260 /* 2261 * There are no other replicas of this device. 2262 */ 2263 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2264 2265 case EEXIST: 2266 /* 2267 * The log device has unplayed logs 2268 */ 2269 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2270 2271 default: 2272 return (zpool_standard_error(hdl, errno, msg)); 2273 } 2274 } 2275 2276 /* 2277 * Mark the given vdev faulted. 2278 */ 2279 int 2280 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2281 { 2282 zfs_cmd_t zc = { 0 }; 2283 char msg[1024]; 2284 libzfs_handle_t *hdl = zhp->zpool_hdl; 2285 2286 (void) snprintf(msg, sizeof (msg), 2287 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2288 2289 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2290 zc.zc_guid = guid; 2291 zc.zc_cookie = VDEV_STATE_FAULTED; 2292 zc.zc_obj = aux; 2293 2294 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2295 return (0); 2296 2297 switch (errno) { 2298 case EBUSY: 2299 2300 /* 2301 * There are no other replicas of this device. 2302 */ 2303 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2304 2305 default: 2306 return (zpool_standard_error(hdl, errno, msg)); 2307 } 2308 2309 } 2310 2311 /* 2312 * Mark the given vdev degraded. 2313 */ 2314 int 2315 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2316 { 2317 zfs_cmd_t zc = { 0 }; 2318 char msg[1024]; 2319 libzfs_handle_t *hdl = zhp->zpool_hdl; 2320 2321 (void) snprintf(msg, sizeof (msg), 2322 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2323 2324 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2325 zc.zc_guid = guid; 2326 zc.zc_cookie = VDEV_STATE_DEGRADED; 2327 zc.zc_obj = aux; 2328 2329 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2330 return (0); 2331 2332 return (zpool_standard_error(hdl, errno, msg)); 2333 } 2334 2335 /* 2336 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2337 * a hot spare. 2338 */ 2339 static boolean_t 2340 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2341 { 2342 nvlist_t **child; 2343 uint_t c, children; 2344 char *type; 2345 2346 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2347 &children) == 0) { 2348 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2349 &type) == 0); 2350 2351 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2352 children == 2 && child[which] == tgt) 2353 return (B_TRUE); 2354 2355 for (c = 0; c < children; c++) 2356 if (is_replacing_spare(child[c], tgt, which)) 2357 return (B_TRUE); 2358 } 2359 2360 return (B_FALSE); 2361 } 2362 2363 /* 2364 * Attach new_disk (fully described by nvroot) to old_disk. 2365 * If 'replacing' is specified, the new disk will replace the old one. 2366 */ 2367 int 2368 zpool_vdev_attach(zpool_handle_t *zhp, 2369 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2370 { 2371 zfs_cmd_t zc = { 0 }; 2372 char msg[1024]; 2373 int ret; 2374 nvlist_t *tgt; 2375 boolean_t avail_spare, l2cache, islog; 2376 uint64_t val; 2377 char *newname; 2378 nvlist_t **child; 2379 uint_t children; 2380 nvlist_t *config_root; 2381 libzfs_handle_t *hdl = zhp->zpool_hdl; 2382 boolean_t rootpool = zpool_is_bootable(zhp); 2383 2384 if (replacing) 2385 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2386 "cannot replace %s with %s"), old_disk, new_disk); 2387 else 2388 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2389 "cannot attach %s to %s"), new_disk, old_disk); 2390 2391 /* 2392 * If this is a root pool, make sure that we're not attaching an 2393 * EFI labeled device. 2394 */ 2395 if (rootpool && pool_uses_efi(nvroot)) { 2396 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2397 "EFI labeled devices are not supported on root pools.")); 2398 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2399 } 2400 2401 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2402 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2403 &islog)) == 0) 2404 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2405 2406 if (avail_spare) 2407 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2408 2409 if (l2cache) 2410 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2411 2412 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2413 zc.zc_cookie = replacing; 2414 2415 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2416 &child, &children) != 0 || children != 1) { 2417 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2418 "new device must be a single disk")); 2419 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2420 } 2421 2422 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2423 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2424 2425 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2426 return (-1); 2427 2428 /* 2429 * If the target is a hot spare that has been swapped in, we can only 2430 * replace it with another hot spare. 2431 */ 2432 if (replacing && 2433 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2434 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2435 NULL) == NULL || !avail_spare) && 2436 is_replacing_spare(config_root, tgt, 1)) { 2437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2438 "can only be replaced by another hot spare")); 2439 free(newname); 2440 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2441 } 2442 2443 free(newname); 2444 2445 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2446 return (-1); 2447 2448 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2449 2450 zcmd_free_nvlists(&zc); 2451 2452 if (ret == 0) { 2453 if (rootpool) { 2454 /* 2455 * XXX need a better way to prevent user from 2456 * booting up a half-baked vdev. 2457 */ 2458 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2459 "sure to wait until resilver is done " 2460 "before rebooting.\n")); 2461 } 2462 return (0); 2463 } 2464 2465 switch (errno) { 2466 case ENOTSUP: 2467 /* 2468 * Can't attach to or replace this type of vdev. 2469 */ 2470 if (replacing) { 2471 uint64_t version = zpool_get_prop_int(zhp, 2472 ZPOOL_PROP_VERSION, NULL); 2473 2474 if (islog) 2475 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2476 "cannot replace a log with a spare")); 2477 else if (version >= SPA_VERSION_MULTI_REPLACE) 2478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2479 "already in replacing/spare config; wait " 2480 "for completion or use 'zpool detach'")); 2481 else 2482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2483 "cannot replace a replacing device")); 2484 } else { 2485 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2486 "can only attach to mirrors and top-level " 2487 "disks")); 2488 } 2489 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2490 break; 2491 2492 case EINVAL: 2493 /* 2494 * The new device must be a single disk. 2495 */ 2496 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2497 "new device must be a single disk")); 2498 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2499 break; 2500 2501 case EBUSY: 2502 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2503 new_disk); 2504 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2505 break; 2506 2507 case EOVERFLOW: 2508 /* 2509 * The new device is too small. 2510 */ 2511 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2512 "device is too small")); 2513 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2514 break; 2515 2516 case EDOM: 2517 /* 2518 * The new device has a different alignment requirement. 2519 */ 2520 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2521 "devices have different sector alignment")); 2522 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2523 break; 2524 2525 case ENAMETOOLONG: 2526 /* 2527 * The resulting top-level vdev spec won't fit in the label. 2528 */ 2529 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2530 break; 2531 2532 default: 2533 (void) zpool_standard_error(hdl, errno, msg); 2534 } 2535 2536 return (-1); 2537 } 2538 2539 /* 2540 * Detach the specified device. 2541 */ 2542 int 2543 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2544 { 2545 zfs_cmd_t zc = { 0 }; 2546 char msg[1024]; 2547 nvlist_t *tgt; 2548 boolean_t avail_spare, l2cache; 2549 libzfs_handle_t *hdl = zhp->zpool_hdl; 2550 2551 (void) snprintf(msg, sizeof (msg), 2552 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2553 2554 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2555 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2556 NULL)) == 0) 2557 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2558 2559 if (avail_spare) 2560 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2561 2562 if (l2cache) 2563 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2564 2565 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2566 2567 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2568 return (0); 2569 2570 switch (errno) { 2571 2572 case ENOTSUP: 2573 /* 2574 * Can't detach from this type of vdev. 2575 */ 2576 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2577 "applicable to mirror and replacing vdevs")); 2578 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2579 break; 2580 2581 case EBUSY: 2582 /* 2583 * There are no other replicas of this device. 2584 */ 2585 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2586 break; 2587 2588 default: 2589 (void) zpool_standard_error(hdl, errno, msg); 2590 } 2591 2592 return (-1); 2593 } 2594 2595 /* 2596 * Find a mirror vdev in the source nvlist. 2597 * 2598 * The mchild array contains a list of disks in one of the top-level mirrors 2599 * of the source pool. The schild array contains a list of disks that the 2600 * user specified on the command line. We loop over the mchild array to 2601 * see if any entry in the schild array matches. 2602 * 2603 * If a disk in the mchild array is found in the schild array, we return 2604 * the index of that entry. Otherwise we return -1. 2605 */ 2606 static int 2607 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2608 nvlist_t **schild, uint_t schildren) 2609 { 2610 uint_t mc; 2611 2612 for (mc = 0; mc < mchildren; mc++) { 2613 uint_t sc; 2614 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2615 mchild[mc], B_FALSE); 2616 2617 for (sc = 0; sc < schildren; sc++) { 2618 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2619 schild[sc], B_FALSE); 2620 boolean_t result = (strcmp(mpath, spath) == 0); 2621 2622 free(spath); 2623 if (result) { 2624 free(mpath); 2625 return (mc); 2626 } 2627 } 2628 2629 free(mpath); 2630 } 2631 2632 return (-1); 2633 } 2634 2635 /* 2636 * Split a mirror pool. If newroot points to null, then a new nvlist 2637 * is generated and it is the responsibility of the caller to free it. 2638 */ 2639 int 2640 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2641 nvlist_t *props, splitflags_t flags) 2642 { 2643 zfs_cmd_t zc = { 0 }; 2644 char msg[1024]; 2645 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2646 nvlist_t **varray = NULL, *zc_props = NULL; 2647 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2648 libzfs_handle_t *hdl = zhp->zpool_hdl; 2649 uint64_t vers; 2650 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2651 int retval = 0; 2652 2653 (void) snprintf(msg, sizeof (msg), 2654 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2655 2656 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2657 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2658 2659 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2660 (void) fprintf(stderr, gettext("Internal error: unable to " 2661 "retrieve pool configuration\n")); 2662 return (-1); 2663 } 2664 2665 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2666 == 0); 2667 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2668 2669 if (props) { 2670 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2671 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2672 props, vers, flags, msg)) == NULL) 2673 return (-1); 2674 } 2675 2676 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2677 &children) != 0) { 2678 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2679 "Source pool is missing vdev tree")); 2680 if (zc_props) 2681 nvlist_free(zc_props); 2682 return (-1); 2683 } 2684 2685 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2686 vcount = 0; 2687 2688 if (*newroot == NULL || 2689 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2690 &newchild, &newchildren) != 0) 2691 newchildren = 0; 2692 2693 for (c = 0; c < children; c++) { 2694 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2695 char *type; 2696 nvlist_t **mchild, *vdev; 2697 uint_t mchildren; 2698 int entry; 2699 2700 /* 2701 * Unlike cache & spares, slogs are stored in the 2702 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2703 */ 2704 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2705 &is_log); 2706 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2707 &is_hole); 2708 if (is_log || is_hole) { 2709 /* 2710 * Create a hole vdev and put it in the config. 2711 */ 2712 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2713 goto out; 2714 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2715 VDEV_TYPE_HOLE) != 0) 2716 goto out; 2717 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2718 1) != 0) 2719 goto out; 2720 if (lastlog == 0) 2721 lastlog = vcount; 2722 varray[vcount++] = vdev; 2723 continue; 2724 } 2725 lastlog = 0; 2726 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2727 == 0); 2728 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2729 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2730 "Source pool must be composed only of mirrors\n")); 2731 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2732 goto out; 2733 } 2734 2735 verify(nvlist_lookup_nvlist_array(child[c], 2736 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2737 2738 /* find or add an entry for this top-level vdev */ 2739 if (newchildren > 0 && 2740 (entry = find_vdev_entry(zhp, mchild, mchildren, 2741 newchild, newchildren)) >= 0) { 2742 /* We found a disk that the user specified. */ 2743 vdev = mchild[entry]; 2744 ++found; 2745 } else { 2746 /* User didn't specify a disk for this vdev. */ 2747 vdev = mchild[mchildren - 1]; 2748 } 2749 2750 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2751 goto out; 2752 } 2753 2754 /* did we find every disk the user specified? */ 2755 if (found != newchildren) { 2756 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2757 "include at most one disk from each mirror")); 2758 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2759 goto out; 2760 } 2761 2762 /* Prepare the nvlist for populating. */ 2763 if (*newroot == NULL) { 2764 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2765 goto out; 2766 freelist = B_TRUE; 2767 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2768 VDEV_TYPE_ROOT) != 0) 2769 goto out; 2770 } else { 2771 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 2772 } 2773 2774 /* Add all the children we found */ 2775 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 2776 lastlog == 0 ? vcount : lastlog) != 0) 2777 goto out; 2778 2779 /* 2780 * If we're just doing a dry run, exit now with success. 2781 */ 2782 if (flags.dryrun) { 2783 memory_err = B_FALSE; 2784 freelist = B_FALSE; 2785 goto out; 2786 } 2787 2788 /* now build up the config list & call the ioctl */ 2789 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 2790 goto out; 2791 2792 if (nvlist_add_nvlist(newconfig, 2793 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 2794 nvlist_add_string(newconfig, 2795 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 2796 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 2797 goto out; 2798 2799 /* 2800 * The new pool is automatically part of the namespace unless we 2801 * explicitly export it. 2802 */ 2803 if (!flags.import) 2804 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 2805 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2806 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 2807 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 2808 goto out; 2809 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 2810 goto out; 2811 2812 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 2813 retval = zpool_standard_error(hdl, errno, msg); 2814 goto out; 2815 } 2816 2817 freelist = B_FALSE; 2818 memory_err = B_FALSE; 2819 2820 out: 2821 if (varray != NULL) { 2822 int v; 2823 2824 for (v = 0; v < vcount; v++) 2825 nvlist_free(varray[v]); 2826 free(varray); 2827 } 2828 zcmd_free_nvlists(&zc); 2829 if (zc_props) 2830 nvlist_free(zc_props); 2831 if (newconfig) 2832 nvlist_free(newconfig); 2833 if (freelist) { 2834 nvlist_free(*newroot); 2835 *newroot = NULL; 2836 } 2837 2838 if (retval != 0) 2839 return (retval); 2840 2841 if (memory_err) 2842 return (no_memory(hdl)); 2843 2844 return (0); 2845 } 2846 2847 /* 2848 * Remove the given device. Currently, this is supported only for hot spares 2849 * and level 2 cache devices. 2850 */ 2851 int 2852 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 2853 { 2854 zfs_cmd_t zc = { 0 }; 2855 char msg[1024]; 2856 nvlist_t *tgt; 2857 boolean_t avail_spare, l2cache, islog; 2858 libzfs_handle_t *hdl = zhp->zpool_hdl; 2859 uint64_t version; 2860 2861 (void) snprintf(msg, sizeof (msg), 2862 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 2863 2864 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2865 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2866 &islog)) == 0) 2867 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2868 /* 2869 * XXX - this should just go away. 2870 */ 2871 if (!avail_spare && !l2cache && !islog) { 2872 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2873 "only inactive hot spares, cache, top-level, " 2874 "or log devices can be removed")); 2875 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2876 } 2877 2878 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 2879 if (islog && version < SPA_VERSION_HOLES) { 2880 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2881 "pool must be upgrade to support log removal")); 2882 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 2883 } 2884 2885 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2886 2887 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 2888 return (0); 2889 2890 return (zpool_standard_error(hdl, errno, msg)); 2891 } 2892 2893 /* 2894 * Clear the errors for the pool, or the particular device if specified. 2895 */ 2896 int 2897 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 2898 { 2899 zfs_cmd_t zc = { 0 }; 2900 char msg[1024]; 2901 nvlist_t *tgt; 2902 zpool_rewind_policy_t policy; 2903 boolean_t avail_spare, l2cache; 2904 libzfs_handle_t *hdl = zhp->zpool_hdl; 2905 nvlist_t *nvi = NULL; 2906 int error; 2907 2908 if (path) 2909 (void) snprintf(msg, sizeof (msg), 2910 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2911 path); 2912 else 2913 (void) snprintf(msg, sizeof (msg), 2914 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2915 zhp->zpool_name); 2916 2917 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2918 if (path) { 2919 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 2920 &l2cache, NULL)) == 0) 2921 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2922 2923 /* 2924 * Don't allow error clearing for hot spares. Do allow 2925 * error clearing for l2cache devices. 2926 */ 2927 if (avail_spare) 2928 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2929 2930 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 2931 &zc.zc_guid) == 0); 2932 } 2933 2934 zpool_get_rewind_policy(rewindnvl, &policy); 2935 zc.zc_cookie = policy.zrp_request; 2936 2937 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 2938 return (-1); 2939 2940 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 2941 return (-1); 2942 2943 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 2944 errno == ENOMEM) { 2945 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 2946 zcmd_free_nvlists(&zc); 2947 return (-1); 2948 } 2949 } 2950 2951 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 2952 errno != EPERM && errno != EACCES)) { 2953 if (policy.zrp_request & 2954 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2955 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 2956 zpool_rewind_exclaim(hdl, zc.zc_name, 2957 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 2958 nvi); 2959 nvlist_free(nvi); 2960 } 2961 zcmd_free_nvlists(&zc); 2962 return (0); 2963 } 2964 2965 zcmd_free_nvlists(&zc); 2966 return (zpool_standard_error(hdl, errno, msg)); 2967 } 2968 2969 /* 2970 * Similar to zpool_clear(), but takes a GUID (used by fmd). 2971 */ 2972 int 2973 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 2974 { 2975 zfs_cmd_t zc = { 0 }; 2976 char msg[1024]; 2977 libzfs_handle_t *hdl = zhp->zpool_hdl; 2978 2979 (void) snprintf(msg, sizeof (msg), 2980 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 2981 guid); 2982 2983 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2984 zc.zc_guid = guid; 2985 zc.zc_cookie = ZPOOL_NO_REWIND; 2986 2987 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 2988 return (0); 2989 2990 return (zpool_standard_error(hdl, errno, msg)); 2991 } 2992 2993 /* 2994 * Change the GUID for a pool. 2995 */ 2996 int 2997 zpool_reguid(zpool_handle_t *zhp) 2998 { 2999 char msg[1024]; 3000 libzfs_handle_t *hdl = zhp->zpool_hdl; 3001 zfs_cmd_t zc = { 0 }; 3002 3003 (void) snprintf(msg, sizeof (msg), 3004 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3005 3006 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3007 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3008 return (0); 3009 3010 return (zpool_standard_error(hdl, errno, msg)); 3011 } 3012 3013 /* 3014 * Reopen the pool. 3015 */ 3016 int 3017 zpool_reopen(zpool_handle_t *zhp) 3018 { 3019 zfs_cmd_t zc = { 0 }; 3020 char msg[1024]; 3021 libzfs_handle_t *hdl = zhp->zpool_hdl; 3022 3023 (void) snprintf(msg, sizeof (msg), 3024 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3025 zhp->zpool_name); 3026 3027 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3028 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3029 return (0); 3030 return (zpool_standard_error(hdl, errno, msg)); 3031 } 3032 3033 /* 3034 * Convert from a devid string to a path. 3035 */ 3036 static char * 3037 devid_to_path(char *devid_str) 3038 { 3039 ddi_devid_t devid; 3040 char *minor; 3041 char *path; 3042 devid_nmlist_t *list = NULL; 3043 int ret; 3044 3045 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3046 return (NULL); 3047 3048 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3049 3050 devid_str_free(minor); 3051 devid_free(devid); 3052 3053 if (ret != 0) 3054 return (NULL); 3055 3056 if ((path = strdup(list[0].devname)) == NULL) 3057 return (NULL); 3058 3059 devid_free_nmlist(list); 3060 3061 return (path); 3062 } 3063 3064 /* 3065 * Convert from a path to a devid string. 3066 */ 3067 static char * 3068 path_to_devid(const char *path) 3069 { 3070 int fd; 3071 ddi_devid_t devid; 3072 char *minor, *ret; 3073 3074 if ((fd = open(path, O_RDONLY)) < 0) 3075 return (NULL); 3076 3077 minor = NULL; 3078 ret = NULL; 3079 if (devid_get(fd, &devid) == 0) { 3080 if (devid_get_minor_name(fd, &minor) == 0) 3081 ret = devid_str_encode(devid, minor); 3082 if (minor != NULL) 3083 devid_str_free(minor); 3084 devid_free(devid); 3085 } 3086 (void) close(fd); 3087 3088 return (ret); 3089 } 3090 3091 /* 3092 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3093 * ignore any failure here, since a common case is for an unprivileged user to 3094 * type 'zpool status', and we'll display the correct information anyway. 3095 */ 3096 static void 3097 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3098 { 3099 zfs_cmd_t zc = { 0 }; 3100 3101 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3102 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3103 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3104 &zc.zc_guid) == 0); 3105 3106 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3107 } 3108 3109 /* 3110 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3111 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3112 * We also check if this is a whole disk, in which case we strip off the 3113 * trailing 's0' slice name. 3114 * 3115 * This routine is also responsible for identifying when disks have been 3116 * reconfigured in a new location. The kernel will have opened the device by 3117 * devid, but the path will still refer to the old location. To catch this, we 3118 * first do a path -> devid translation (which is fast for the common case). If 3119 * the devid matches, we're done. If not, we do a reverse devid -> path 3120 * translation and issue the appropriate ioctl() to update the path of the vdev. 3121 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3122 * of these checks. 3123 */ 3124 char * 3125 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3126 boolean_t verbose) 3127 { 3128 char *path, *devid; 3129 uint64_t value; 3130 char buf[64]; 3131 vdev_stat_t *vs; 3132 uint_t vsc; 3133 3134 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3135 &value) == 0) { 3136 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3137 &value) == 0); 3138 (void) snprintf(buf, sizeof (buf), "%llu", 3139 (u_longlong_t)value); 3140 path = buf; 3141 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3142 3143 /* 3144 * If the device is dead (faulted, offline, etc) then don't 3145 * bother opening it. Otherwise we may be forcing the user to 3146 * open a misbehaving device, which can have undesirable 3147 * effects. 3148 */ 3149 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3150 (uint64_t **)&vs, &vsc) != 0 || 3151 vs->vs_state >= VDEV_STATE_DEGRADED) && 3152 zhp != NULL && 3153 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3154 /* 3155 * Determine if the current path is correct. 3156 */ 3157 char *newdevid = path_to_devid(path); 3158 3159 if (newdevid == NULL || 3160 strcmp(devid, newdevid) != 0) { 3161 char *newpath; 3162 3163 if ((newpath = devid_to_path(devid)) != NULL) { 3164 /* 3165 * Update the path appropriately. 3166 */ 3167 set_path(zhp, nv, newpath); 3168 if (nvlist_add_string(nv, 3169 ZPOOL_CONFIG_PATH, newpath) == 0) 3170 verify(nvlist_lookup_string(nv, 3171 ZPOOL_CONFIG_PATH, 3172 &path) == 0); 3173 free(newpath); 3174 } 3175 } 3176 3177 if (newdevid) 3178 devid_str_free(newdevid); 3179 } 3180 3181 if (strncmp(path, "/dev/dsk/", 9) == 0) 3182 path += 9; 3183 3184 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3185 &value) == 0 && value) { 3186 int pathlen = strlen(path); 3187 char *tmp = zfs_strdup(hdl, path); 3188 3189 /* 3190 * If it starts with c#, and ends with "s0", chop 3191 * the "s0" off, or if it ends with "s0/old", remove 3192 * the "s0" from the middle. 3193 */ 3194 if (CTD_CHECK(tmp)) { 3195 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3196 tmp[pathlen - 2] = '\0'; 3197 } else if (pathlen > 6 && 3198 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3199 (void) strcpy(&tmp[pathlen - 6], 3200 "/old"); 3201 } 3202 } 3203 return (tmp); 3204 } 3205 } else { 3206 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3207 3208 /* 3209 * If it's a raidz device, we need to stick in the parity level. 3210 */ 3211 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3212 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3213 &value) == 0); 3214 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3215 (u_longlong_t)value); 3216 path = buf; 3217 } 3218 3219 /* 3220 * We identify each top-level vdev by using a <type-id> 3221 * naming convention. 3222 */ 3223 if (verbose) { 3224 uint64_t id; 3225 3226 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3227 &id) == 0); 3228 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3229 (u_longlong_t)id); 3230 path = buf; 3231 } 3232 } 3233 3234 return (zfs_strdup(hdl, path)); 3235 } 3236 3237 static int 3238 zbookmark_compare(const void *a, const void *b) 3239 { 3240 return (memcmp(a, b, sizeof (zbookmark_t))); 3241 } 3242 3243 /* 3244 * Retrieve the persistent error log, uniquify the members, and return to the 3245 * caller. 3246 */ 3247 int 3248 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3249 { 3250 zfs_cmd_t zc = { 0 }; 3251 uint64_t count; 3252 zbookmark_t *zb = NULL; 3253 int i; 3254 3255 /* 3256 * Retrieve the raw error list from the kernel. If the number of errors 3257 * has increased, allocate more space and continue until we get the 3258 * entire list. 3259 */ 3260 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3261 &count) == 0); 3262 if (count == 0) 3263 return (0); 3264 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3265 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 3266 return (-1); 3267 zc.zc_nvlist_dst_size = count; 3268 (void) strcpy(zc.zc_name, zhp->zpool_name); 3269 for (;;) { 3270 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3271 &zc) != 0) { 3272 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3273 if (errno == ENOMEM) { 3274 count = zc.zc_nvlist_dst_size; 3275 if ((zc.zc_nvlist_dst = (uintptr_t) 3276 zfs_alloc(zhp->zpool_hdl, count * 3277 sizeof (zbookmark_t))) == (uintptr_t)NULL) 3278 return (-1); 3279 } else { 3280 return (-1); 3281 } 3282 } else { 3283 break; 3284 } 3285 } 3286 3287 /* 3288 * Sort the resulting bookmarks. This is a little confusing due to the 3289 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3290 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3291 * _not_ copied as part of the process. So we point the start of our 3292 * array appropriate and decrement the total number of elements. 3293 */ 3294 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 3295 zc.zc_nvlist_dst_size; 3296 count -= zc.zc_nvlist_dst_size; 3297 3298 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 3299 3300 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3301 3302 /* 3303 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3304 */ 3305 for (i = 0; i < count; i++) { 3306 nvlist_t *nv; 3307 3308 /* ignoring zb_blkid and zb_level for now */ 3309 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3310 zb[i-1].zb_object == zb[i].zb_object) 3311 continue; 3312 3313 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3314 goto nomem; 3315 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3316 zb[i].zb_objset) != 0) { 3317 nvlist_free(nv); 3318 goto nomem; 3319 } 3320 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3321 zb[i].zb_object) != 0) { 3322 nvlist_free(nv); 3323 goto nomem; 3324 } 3325 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3326 nvlist_free(nv); 3327 goto nomem; 3328 } 3329 nvlist_free(nv); 3330 } 3331 3332 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3333 return (0); 3334 3335 nomem: 3336 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3337 return (no_memory(zhp->zpool_hdl)); 3338 } 3339 3340 /* 3341 * Upgrade a ZFS pool to the latest on-disk version. 3342 */ 3343 int 3344 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3345 { 3346 zfs_cmd_t zc = { 0 }; 3347 libzfs_handle_t *hdl = zhp->zpool_hdl; 3348 3349 (void) strcpy(zc.zc_name, zhp->zpool_name); 3350 zc.zc_cookie = new_version; 3351 3352 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3353 return (zpool_standard_error_fmt(hdl, errno, 3354 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3355 zhp->zpool_name)); 3356 return (0); 3357 } 3358 3359 void 3360 zpool_set_history_str(const char *subcommand, int argc, char **argv, 3361 char *history_str) 3362 { 3363 int i; 3364 3365 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); 3366 for (i = 1; i < argc; i++) { 3367 if (strlen(history_str) + 1 + strlen(argv[i]) > 3368 HIS_MAX_RECORD_LEN) 3369 break; 3370 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); 3371 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); 3372 } 3373 } 3374 3375 /* 3376 * Stage command history for logging. 3377 */ 3378 int 3379 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) 3380 { 3381 if (history_str == NULL) 3382 return (EINVAL); 3383 3384 if (strlen(history_str) > HIS_MAX_RECORD_LEN) 3385 return (EINVAL); 3386 3387 if (hdl->libzfs_log_str != NULL) 3388 free(hdl->libzfs_log_str); 3389 3390 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) 3391 return (no_memory(hdl)); 3392 3393 return (0); 3394 } 3395 3396 /* 3397 * Perform ioctl to get some command history of a pool. 3398 * 3399 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3400 * logical offset of the history buffer to start reading from. 3401 * 3402 * Upon return, 'off' is the next logical offset to read from and 3403 * 'len' is the actual amount of bytes read into 'buf'. 3404 */ 3405 static int 3406 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3407 { 3408 zfs_cmd_t zc = { 0 }; 3409 libzfs_handle_t *hdl = zhp->zpool_hdl; 3410 3411 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3412 3413 zc.zc_history = (uint64_t)(uintptr_t)buf; 3414 zc.zc_history_len = *len; 3415 zc.zc_history_offset = *off; 3416 3417 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3418 switch (errno) { 3419 case EPERM: 3420 return (zfs_error_fmt(hdl, EZFS_PERM, 3421 dgettext(TEXT_DOMAIN, 3422 "cannot show history for pool '%s'"), 3423 zhp->zpool_name)); 3424 case ENOENT: 3425 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3426 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3427 "'%s'"), zhp->zpool_name)); 3428 case ENOTSUP: 3429 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3430 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3431 "'%s', pool must be upgraded"), zhp->zpool_name)); 3432 default: 3433 return (zpool_standard_error_fmt(hdl, errno, 3434 dgettext(TEXT_DOMAIN, 3435 "cannot get history for '%s'"), zhp->zpool_name)); 3436 } 3437 } 3438 3439 *len = zc.zc_history_len; 3440 *off = zc.zc_history_offset; 3441 3442 return (0); 3443 } 3444 3445 /* 3446 * Process the buffer of nvlists, unpacking and storing each nvlist record 3447 * into 'records'. 'leftover' is set to the number of bytes that weren't 3448 * processed as there wasn't a complete record. 3449 */ 3450 int 3451 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3452 nvlist_t ***records, uint_t *numrecords) 3453 { 3454 uint64_t reclen; 3455 nvlist_t *nv; 3456 int i; 3457 3458 while (bytes_read > sizeof (reclen)) { 3459 3460 /* get length of packed record (stored as little endian) */ 3461 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3462 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3463 3464 if (bytes_read < sizeof (reclen) + reclen) 3465 break; 3466 3467 /* unpack record */ 3468 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3469 return (ENOMEM); 3470 bytes_read -= sizeof (reclen) + reclen; 3471 buf += sizeof (reclen) + reclen; 3472 3473 /* add record to nvlist array */ 3474 (*numrecords)++; 3475 if (ISP2(*numrecords + 1)) { 3476 *records = realloc(*records, 3477 *numrecords * 2 * sizeof (nvlist_t *)); 3478 } 3479 (*records)[*numrecords - 1] = nv; 3480 } 3481 3482 *leftover = bytes_read; 3483 return (0); 3484 } 3485 3486 #define HIS_BUF_LEN (128*1024) 3487 3488 /* 3489 * Retrieve the command history of a pool. 3490 */ 3491 int 3492 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3493 { 3494 char buf[HIS_BUF_LEN]; 3495 uint64_t off = 0; 3496 nvlist_t **records = NULL; 3497 uint_t numrecords = 0; 3498 int err, i; 3499 3500 do { 3501 uint64_t bytes_read = sizeof (buf); 3502 uint64_t leftover; 3503 3504 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3505 break; 3506 3507 /* if nothing else was read in, we're at EOF, just return */ 3508 if (!bytes_read) 3509 break; 3510 3511 if ((err = zpool_history_unpack(buf, bytes_read, 3512 &leftover, &records, &numrecords)) != 0) 3513 break; 3514 off -= leftover; 3515 3516 /* CONSTCOND */ 3517 } while (1); 3518 3519 if (!err) { 3520 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3521 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3522 records, numrecords) == 0); 3523 } 3524 for (i = 0; i < numrecords; i++) 3525 nvlist_free(records[i]); 3526 free(records); 3527 3528 return (err); 3529 } 3530 3531 void 3532 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3533 char *pathname, size_t len) 3534 { 3535 zfs_cmd_t zc = { 0 }; 3536 boolean_t mounted = B_FALSE; 3537 char *mntpnt = NULL; 3538 char dsname[MAXNAMELEN]; 3539 3540 if (dsobj == 0) { 3541 /* special case for the MOS */ 3542 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3543 return; 3544 } 3545 3546 /* get the dataset's name */ 3547 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3548 zc.zc_obj = dsobj; 3549 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3550 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3551 /* just write out a path of two object numbers */ 3552 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3553 dsobj, obj); 3554 return; 3555 } 3556 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3557 3558 /* find out if the dataset is mounted */ 3559 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3560 3561 /* get the corrupted object's path */ 3562 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3563 zc.zc_obj = obj; 3564 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3565 &zc) == 0) { 3566 if (mounted) { 3567 (void) snprintf(pathname, len, "%s%s", mntpnt, 3568 zc.zc_value); 3569 } else { 3570 (void) snprintf(pathname, len, "%s:%s", 3571 dsname, zc.zc_value); 3572 } 3573 } else { 3574 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3575 } 3576 free(mntpnt); 3577 } 3578 3579 /* 3580 * Read the EFI label from the config, if a label does not exist then 3581 * pass back the error to the caller. If the caller has passed a non-NULL 3582 * diskaddr argument then we set it to the starting address of the EFI 3583 * partition. 3584 */ 3585 static int 3586 read_efi_label(nvlist_t *config, diskaddr_t *sb) 3587 { 3588 char *path; 3589 int fd; 3590 char diskname[MAXPATHLEN]; 3591 int err = -1; 3592 3593 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3594 return (err); 3595 3596 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3597 strrchr(path, '/')); 3598 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3599 struct dk_gpt *vtoc; 3600 3601 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3602 if (sb != NULL) 3603 *sb = vtoc->efi_parts[0].p_start; 3604 efi_free(vtoc); 3605 } 3606 (void) close(fd); 3607 } 3608 return (err); 3609 } 3610 3611 /* 3612 * determine where a partition starts on a disk in the current 3613 * configuration 3614 */ 3615 static diskaddr_t 3616 find_start_block(nvlist_t *config) 3617 { 3618 nvlist_t **child; 3619 uint_t c, children; 3620 diskaddr_t sb = MAXOFFSET_T; 3621 uint64_t wholedisk; 3622 3623 if (nvlist_lookup_nvlist_array(config, 3624 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3625 if (nvlist_lookup_uint64(config, 3626 ZPOOL_CONFIG_WHOLE_DISK, 3627 &wholedisk) != 0 || !wholedisk) { 3628 return (MAXOFFSET_T); 3629 } 3630 if (read_efi_label(config, &sb) < 0) 3631 sb = MAXOFFSET_T; 3632 return (sb); 3633 } 3634 3635 for (c = 0; c < children; c++) { 3636 sb = find_start_block(child[c]); 3637 if (sb != MAXOFFSET_T) { 3638 return (sb); 3639 } 3640 } 3641 return (MAXOFFSET_T); 3642 } 3643 3644 /* 3645 * Label an individual disk. The name provided is the short name, 3646 * stripped of any leading /dev path. 3647 */ 3648 int 3649 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) 3650 { 3651 char path[MAXPATHLEN]; 3652 struct dk_gpt *vtoc; 3653 int fd; 3654 size_t resv = EFI_MIN_RESV_SIZE; 3655 uint64_t slice_size; 3656 diskaddr_t start_block; 3657 char errbuf[1024]; 3658 3659 /* prepare an error message just in case */ 3660 (void) snprintf(errbuf, sizeof (errbuf), 3661 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3662 3663 if (zhp) { 3664 nvlist_t *nvroot; 3665 3666 if (zpool_is_bootable(zhp)) { 3667 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3668 "EFI labeled devices are not supported on root " 3669 "pools.")); 3670 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3671 } 3672 3673 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3674 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3675 3676 if (zhp->zpool_start_block == 0) 3677 start_block = find_start_block(nvroot); 3678 else 3679 start_block = zhp->zpool_start_block; 3680 zhp->zpool_start_block = start_block; 3681 } else { 3682 /* new pool */ 3683 start_block = NEW_START_BLOCK; 3684 } 3685 3686 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3687 BACKUP_SLICE); 3688 3689 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3690 /* 3691 * This shouldn't happen. We've long since verified that this 3692 * is a valid device. 3693 */ 3694 zfs_error_aux(hdl, 3695 dgettext(TEXT_DOMAIN, "unable to open device")); 3696 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3697 } 3698 3699 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3700 /* 3701 * The only way this can fail is if we run out of memory, or we 3702 * were unable to read the disk's capacity 3703 */ 3704 if (errno == ENOMEM) 3705 (void) no_memory(hdl); 3706 3707 (void) close(fd); 3708 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3709 "unable to read disk capacity"), name); 3710 3711 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3712 } 3713 3714 slice_size = vtoc->efi_last_u_lba + 1; 3715 slice_size -= EFI_MIN_RESV_SIZE; 3716 if (start_block == MAXOFFSET_T) 3717 start_block = NEW_START_BLOCK; 3718 slice_size -= start_block; 3719 3720 vtoc->efi_parts[0].p_start = start_block; 3721 vtoc->efi_parts[0].p_size = slice_size; 3722 3723 /* 3724 * Why we use V_USR: V_BACKUP confuses users, and is considered 3725 * disposable by some EFI utilities (since EFI doesn't have a backup 3726 * slice). V_UNASSIGNED is supposed to be used only for zero size 3727 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3728 * etc. were all pretty specific. V_USR is as close to reality as we 3729 * can get, in the absence of V_OTHER. 3730 */ 3731 vtoc->efi_parts[0].p_tag = V_USR; 3732 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3733 3734 vtoc->efi_parts[8].p_start = slice_size + start_block; 3735 vtoc->efi_parts[8].p_size = resv; 3736 vtoc->efi_parts[8].p_tag = V_RESERVED; 3737 3738 if (efi_write(fd, vtoc) != 0) { 3739 /* 3740 * Some block drivers (like pcata) may not support EFI 3741 * GPT labels. Print out a helpful error message dir- 3742 * ecting the user to manually label the disk and give 3743 * a specific slice. 3744 */ 3745 (void) close(fd); 3746 efi_free(vtoc); 3747 3748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3749 "try using fdisk(1M) and then provide a specific slice")); 3750 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3751 } 3752 3753 (void) close(fd); 3754 efi_free(vtoc); 3755 return (0); 3756 } 3757 3758 static boolean_t 3759 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3760 { 3761 char *type; 3762 nvlist_t **child; 3763 uint_t children, c; 3764 3765 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3766 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 3767 strcmp(type, VDEV_TYPE_FILE) == 0 || 3768 strcmp(type, VDEV_TYPE_LOG) == 0 || 3769 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3770 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3771 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3772 "vdev type '%s' is not supported"), type); 3773 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3774 return (B_FALSE); 3775 } 3776 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3777 &child, &children) == 0) { 3778 for (c = 0; c < children; c++) { 3779 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3780 return (B_FALSE); 3781 } 3782 } 3783 return (B_TRUE); 3784 } 3785 3786 /* 3787 * check if this zvol is allowable for use as a dump device; zero if 3788 * it is, > 0 if it isn't, < 0 if it isn't a zvol 3789 */ 3790 int 3791 zvol_check_dump_config(char *arg) 3792 { 3793 zpool_handle_t *zhp = NULL; 3794 nvlist_t *config, *nvroot; 3795 char *p, *volname; 3796 nvlist_t **top; 3797 uint_t toplevels; 3798 libzfs_handle_t *hdl; 3799 char errbuf[1024]; 3800 char poolname[ZPOOL_MAXNAMELEN]; 3801 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 3802 int ret = 1; 3803 3804 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 3805 return (-1); 3806 } 3807 3808 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3809 "dump is not supported on device '%s'"), arg); 3810 3811 if ((hdl = libzfs_init()) == NULL) 3812 return (1); 3813 libzfs_print_on_error(hdl, B_TRUE); 3814 3815 volname = arg + pathlen; 3816 3817 /* check the configuration of the pool */ 3818 if ((p = strchr(volname, '/')) == NULL) { 3819 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3820 "malformed dataset name")); 3821 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 3822 return (1); 3823 } else if (p - volname >= ZFS_MAXNAMELEN) { 3824 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3825 "dataset name is too long")); 3826 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 3827 return (1); 3828 } else { 3829 (void) strncpy(poolname, volname, p - volname); 3830 poolname[p - volname] = '\0'; 3831 } 3832 3833 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 3834 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3835 "could not open pool '%s'"), poolname); 3836 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 3837 goto out; 3838 } 3839 config = zpool_get_config(zhp, NULL); 3840 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3841 &nvroot) != 0) { 3842 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3843 "could not obtain vdev configuration for '%s'"), poolname); 3844 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3845 goto out; 3846 } 3847 3848 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3849 &top, &toplevels) == 0); 3850 if (toplevels != 1) { 3851 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3852 "'%s' has multiple top level vdevs"), poolname); 3853 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3854 goto out; 3855 } 3856 3857 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 3858 goto out; 3859 } 3860 ret = 0; 3861 3862 out: 3863 if (zhp) 3864 zpool_close(zhp); 3865 libzfs_fini(hdl); 3866 return (ret); 3867 } 3868