1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2011 by Delphix. All rights reserved. 26 */ 27 28 #include <ctype.h> 29 #include <errno.h> 30 #include <devid.h> 31 #include <fcntl.h> 32 #include <libintl.h> 33 #include <stdio.h> 34 #include <stdlib.h> 35 #include <strings.h> 36 #include <unistd.h> 37 #include <sys/efi_partition.h> 38 #include <sys/vtoc.h> 39 #include <sys/zfs_ioctl.h> 40 #include <dlfcn.h> 41 42 #include "zfs_namecheck.h" 43 #include "zfs_prop.h" 44 #include "libzfs_impl.h" 45 #include "zfs_comutil.h" 46 47 static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 48 49 #define DISK_ROOT "/dev/dsk" 50 #define RDISK_ROOT "/dev/rdsk" 51 #define BACKUP_SLICE "s2" 52 53 typedef struct prop_flags { 54 int create:1; /* Validate property on creation */ 55 int import:1; /* Validate property on import */ 56 } prop_flags_t; 57 58 /* 59 * ==================================================================== 60 * zpool property functions 61 * ==================================================================== 62 */ 63 64 static int 65 zpool_get_all_props(zpool_handle_t *zhp) 66 { 67 zfs_cmd_t zc = { 0 }; 68 libzfs_handle_t *hdl = zhp->zpool_hdl; 69 70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 71 72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 73 return (-1); 74 75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 76 if (errno == ENOMEM) { 77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 78 zcmd_free_nvlists(&zc); 79 return (-1); 80 } 81 } else { 82 zcmd_free_nvlists(&zc); 83 return (-1); 84 } 85 } 86 87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 88 zcmd_free_nvlists(&zc); 89 return (-1); 90 } 91 92 zcmd_free_nvlists(&zc); 93 94 return (0); 95 } 96 97 static int 98 zpool_props_refresh(zpool_handle_t *zhp) 99 { 100 nvlist_t *old_props; 101 102 old_props = zhp->zpool_props; 103 104 if (zpool_get_all_props(zhp) != 0) 105 return (-1); 106 107 nvlist_free(old_props); 108 return (0); 109 } 110 111 static char * 112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 113 zprop_source_t *src) 114 { 115 nvlist_t *nv, *nvl; 116 uint64_t ival; 117 char *value; 118 zprop_source_t source; 119 120 nvl = zhp->zpool_props; 121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 123 source = ival; 124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 125 } else { 126 source = ZPROP_SRC_DEFAULT; 127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 128 value = "-"; 129 } 130 131 if (src) 132 *src = source; 133 134 return (value); 135 } 136 137 uint64_t 138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 139 { 140 nvlist_t *nv, *nvl; 141 uint64_t value; 142 zprop_source_t source; 143 144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 145 /* 146 * zpool_get_all_props() has most likely failed because 147 * the pool is faulted, but if all we need is the top level 148 * vdev's guid then get it from the zhp config nvlist. 149 */ 150 if ((prop == ZPOOL_PROP_GUID) && 151 (nvlist_lookup_nvlist(zhp->zpool_config, 152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 154 == 0)) { 155 return (value); 156 } 157 return (zpool_prop_default_numeric(prop)); 158 } 159 160 nvl = zhp->zpool_props; 161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 163 source = value; 164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 165 } else { 166 source = ZPROP_SRC_DEFAULT; 167 value = zpool_prop_default_numeric(prop); 168 } 169 170 if (src) 171 *src = source; 172 173 return (value); 174 } 175 176 /* 177 * Map VDEV STATE to printed strings. 178 */ 179 char * 180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 181 { 182 switch (state) { 183 case VDEV_STATE_CLOSED: 184 case VDEV_STATE_OFFLINE: 185 return (gettext("OFFLINE")); 186 case VDEV_STATE_REMOVED: 187 return (gettext("REMOVED")); 188 case VDEV_STATE_CANT_OPEN: 189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 190 return (gettext("FAULTED")); 191 else if (aux == VDEV_AUX_SPLIT_POOL) 192 return (gettext("SPLIT")); 193 else 194 return (gettext("UNAVAIL")); 195 case VDEV_STATE_FAULTED: 196 return (gettext("FAULTED")); 197 case VDEV_STATE_DEGRADED: 198 return (gettext("DEGRADED")); 199 case VDEV_STATE_HEALTHY: 200 return (gettext("ONLINE")); 201 } 202 203 return (gettext("UNKNOWN")); 204 } 205 206 /* 207 * Get a zpool property value for 'prop' and return the value in 208 * a pre-allocated buffer. 209 */ 210 int 211 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 212 zprop_source_t *srctype) 213 { 214 uint64_t intval; 215 const char *strval; 216 zprop_source_t src = ZPROP_SRC_NONE; 217 nvlist_t *nvroot; 218 vdev_stat_t *vs; 219 uint_t vsc; 220 221 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 222 switch (prop) { 223 case ZPOOL_PROP_NAME: 224 (void) strlcpy(buf, zpool_get_name(zhp), len); 225 break; 226 227 case ZPOOL_PROP_HEALTH: 228 (void) strlcpy(buf, "FAULTED", len); 229 break; 230 231 case ZPOOL_PROP_GUID: 232 intval = zpool_get_prop_int(zhp, prop, &src); 233 (void) snprintf(buf, len, "%llu", intval); 234 break; 235 236 case ZPOOL_PROP_ALTROOT: 237 case ZPOOL_PROP_CACHEFILE: 238 case ZPOOL_PROP_COMMENT: 239 if (zhp->zpool_props != NULL || 240 zpool_get_all_props(zhp) == 0) { 241 (void) strlcpy(buf, 242 zpool_get_prop_string(zhp, prop, &src), 243 len); 244 if (srctype != NULL) 245 *srctype = src; 246 return (0); 247 } 248 /* FALLTHROUGH */ 249 default: 250 (void) strlcpy(buf, "-", len); 251 break; 252 } 253 254 if (srctype != NULL) 255 *srctype = src; 256 return (0); 257 } 258 259 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 260 prop != ZPOOL_PROP_NAME) 261 return (-1); 262 263 switch (zpool_prop_get_type(prop)) { 264 case PROP_TYPE_STRING: 265 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 266 len); 267 break; 268 269 case PROP_TYPE_NUMBER: 270 intval = zpool_get_prop_int(zhp, prop, &src); 271 272 switch (prop) { 273 case ZPOOL_PROP_SIZE: 274 case ZPOOL_PROP_ALLOCATED: 275 case ZPOOL_PROP_FREE: 276 (void) zfs_nicenum(intval, buf, len); 277 break; 278 279 case ZPOOL_PROP_CAPACITY: 280 (void) snprintf(buf, len, "%llu%%", 281 (u_longlong_t)intval); 282 break; 283 284 case ZPOOL_PROP_DEDUPRATIO: 285 (void) snprintf(buf, len, "%llu.%02llux", 286 (u_longlong_t)(intval / 100), 287 (u_longlong_t)(intval % 100)); 288 break; 289 290 case ZPOOL_PROP_HEALTH: 291 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 292 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 293 verify(nvlist_lookup_uint64_array(nvroot, 294 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 295 == 0); 296 297 (void) strlcpy(buf, zpool_state_to_name(intval, 298 vs->vs_aux), len); 299 break; 300 default: 301 (void) snprintf(buf, len, "%llu", intval); 302 } 303 break; 304 305 case PROP_TYPE_INDEX: 306 intval = zpool_get_prop_int(zhp, prop, &src); 307 if (zpool_prop_index_to_string(prop, intval, &strval) 308 != 0) 309 return (-1); 310 (void) strlcpy(buf, strval, len); 311 break; 312 313 default: 314 abort(); 315 } 316 317 if (srctype) 318 *srctype = src; 319 320 return (0); 321 } 322 323 /* 324 * Check if the bootfs name has the same pool name as it is set to. 325 * Assuming bootfs is a valid dataset name. 326 */ 327 static boolean_t 328 bootfs_name_valid(const char *pool, char *bootfs) 329 { 330 int len = strlen(pool); 331 332 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 333 return (B_FALSE); 334 335 if (strncmp(pool, bootfs, len) == 0 && 336 (bootfs[len] == '/' || bootfs[len] == '\0')) 337 return (B_TRUE); 338 339 return (B_FALSE); 340 } 341 342 /* 343 * Inspect the configuration to determine if any of the devices contain 344 * an EFI label. 345 */ 346 static boolean_t 347 pool_uses_efi(nvlist_t *config) 348 { 349 nvlist_t **child; 350 uint_t c, children; 351 352 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 353 &child, &children) != 0) 354 return (read_efi_label(config, NULL) >= 0); 355 356 for (c = 0; c < children; c++) { 357 if (pool_uses_efi(child[c])) 358 return (B_TRUE); 359 } 360 return (B_FALSE); 361 } 362 363 static boolean_t 364 pool_is_bootable(zpool_handle_t *zhp) 365 { 366 char bootfs[ZPOOL_MAXNAMELEN]; 367 368 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 369 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", 370 sizeof (bootfs)) != 0); 371 } 372 373 374 /* 375 * Given an nvlist of zpool properties to be set, validate that they are 376 * correct, and parse any numeric properties (index, boolean, etc) if they are 377 * specified as strings. 378 */ 379 static nvlist_t * 380 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 381 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 382 { 383 nvpair_t *elem; 384 nvlist_t *retprops; 385 zpool_prop_t prop; 386 char *strval; 387 uint64_t intval; 388 char *slash, *check; 389 struct stat64 statbuf; 390 zpool_handle_t *zhp; 391 nvlist_t *nvroot; 392 393 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 394 (void) no_memory(hdl); 395 return (NULL); 396 } 397 398 elem = NULL; 399 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 400 const char *propname = nvpair_name(elem); 401 402 /* 403 * Make sure this property is valid and applies to this type. 404 */ 405 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { 406 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 407 "invalid property '%s'"), propname); 408 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 409 goto error; 410 } 411 412 if (zpool_prop_readonly(prop)) { 413 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 414 "is readonly"), propname); 415 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 416 goto error; 417 } 418 419 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 420 &strval, &intval, errbuf) != 0) 421 goto error; 422 423 /* 424 * Perform additional checking for specific properties. 425 */ 426 switch (prop) { 427 case ZPOOL_PROP_VERSION: 428 if (intval < version || intval > SPA_VERSION) { 429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 430 "property '%s' number %d is invalid."), 431 propname, intval); 432 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 433 goto error; 434 } 435 break; 436 437 case ZPOOL_PROP_BOOTFS: 438 if (flags.create || flags.import) { 439 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 440 "property '%s' cannot be set at creation " 441 "or import time"), propname); 442 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 443 goto error; 444 } 445 446 if (version < SPA_VERSION_BOOTFS) { 447 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 448 "pool must be upgraded to support " 449 "'%s' property"), propname); 450 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 451 goto error; 452 } 453 454 /* 455 * bootfs property value has to be a dataset name and 456 * the dataset has to be in the same pool as it sets to. 457 */ 458 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 459 strval)) { 460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 461 "is an invalid name"), strval); 462 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 463 goto error; 464 } 465 466 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 468 "could not open pool '%s'"), poolname); 469 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 470 goto error; 471 } 472 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 473 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 474 475 /* 476 * bootfs property cannot be set on a disk which has 477 * been EFI labeled. 478 */ 479 if (pool_uses_efi(nvroot)) { 480 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 481 "property '%s' not supported on " 482 "EFI labeled devices"), propname); 483 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 484 zpool_close(zhp); 485 goto error; 486 } 487 zpool_close(zhp); 488 break; 489 490 case ZPOOL_PROP_ALTROOT: 491 if (!flags.create && !flags.import) { 492 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 493 "property '%s' can only be set during pool " 494 "creation or import"), propname); 495 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 496 goto error; 497 } 498 499 if (strval[0] != '/') { 500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 501 "bad alternate root '%s'"), strval); 502 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 503 goto error; 504 } 505 break; 506 507 case ZPOOL_PROP_CACHEFILE: 508 if (strval[0] == '\0') 509 break; 510 511 if (strcmp(strval, "none") == 0) 512 break; 513 514 if (strval[0] != '/') { 515 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 516 "property '%s' must be empty, an " 517 "absolute path, or 'none'"), propname); 518 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 519 goto error; 520 } 521 522 slash = strrchr(strval, '/'); 523 524 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 525 strcmp(slash, "/..") == 0) { 526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 527 "'%s' is not a valid file"), strval); 528 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 529 goto error; 530 } 531 532 *slash = '\0'; 533 534 if (strval[0] != '\0' && 535 (stat64(strval, &statbuf) != 0 || 536 !S_ISDIR(statbuf.st_mode))) { 537 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 538 "'%s' is not a valid directory"), 539 strval); 540 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 541 goto error; 542 } 543 544 *slash = '/'; 545 break; 546 547 case ZPOOL_PROP_COMMENT: 548 for (check = strval; *check != '\0'; check++) { 549 if (!isprint(*check)) { 550 zfs_error_aux(hdl, 551 dgettext(TEXT_DOMAIN, 552 "comment may only have printable " 553 "characters")); 554 (void) zfs_error(hdl, EZFS_BADPROP, 555 errbuf); 556 goto error; 557 } 558 } 559 if (strlen(strval) > ZPROP_MAX_COMMENT) { 560 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 561 "comment must not exceed %d characters"), 562 ZPROP_MAX_COMMENT); 563 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 564 goto error; 565 } 566 break; 567 case ZPOOL_PROP_READONLY: 568 if (!flags.import) { 569 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 570 "property '%s' can only be set at " 571 "import time"), propname); 572 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 573 goto error; 574 } 575 break; 576 } 577 } 578 579 return (retprops); 580 error: 581 nvlist_free(retprops); 582 return (NULL); 583 } 584 585 /* 586 * Set zpool property : propname=propval. 587 */ 588 int 589 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 590 { 591 zfs_cmd_t zc = { 0 }; 592 int ret = -1; 593 char errbuf[1024]; 594 nvlist_t *nvl = NULL; 595 nvlist_t *realprops; 596 uint64_t version; 597 prop_flags_t flags = { 0 }; 598 599 (void) snprintf(errbuf, sizeof (errbuf), 600 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 601 zhp->zpool_name); 602 603 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 604 return (no_memory(zhp->zpool_hdl)); 605 606 if (nvlist_add_string(nvl, propname, propval) != 0) { 607 nvlist_free(nvl); 608 return (no_memory(zhp->zpool_hdl)); 609 } 610 611 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 612 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 613 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 614 nvlist_free(nvl); 615 return (-1); 616 } 617 618 nvlist_free(nvl); 619 nvl = realprops; 620 621 /* 622 * Execute the corresponding ioctl() to set this property. 623 */ 624 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 625 626 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 627 nvlist_free(nvl); 628 return (-1); 629 } 630 631 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 632 633 zcmd_free_nvlists(&zc); 634 nvlist_free(nvl); 635 636 if (ret) 637 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 638 else 639 (void) zpool_props_refresh(zhp); 640 641 return (ret); 642 } 643 644 int 645 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 646 { 647 libzfs_handle_t *hdl = zhp->zpool_hdl; 648 zprop_list_t *entry; 649 char buf[ZFS_MAXPROPLEN]; 650 651 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 652 return (-1); 653 654 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 655 656 if (entry->pl_fixed) 657 continue; 658 659 if (entry->pl_prop != ZPROP_INVAL && 660 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 661 NULL) == 0) { 662 if (strlen(buf) > entry->pl_width) 663 entry->pl_width = strlen(buf); 664 } 665 } 666 667 return (0); 668 } 669 670 671 /* 672 * Don't start the slice at the default block of 34; many storage 673 * devices will use a stripe width of 128k, so start there instead. 674 */ 675 #define NEW_START_BLOCK 256 676 677 /* 678 * Validate the given pool name, optionally putting an extended error message in 679 * 'buf'. 680 */ 681 boolean_t 682 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 683 { 684 namecheck_err_t why; 685 char what; 686 int ret; 687 688 ret = pool_namecheck(pool, &why, &what); 689 690 /* 691 * The rules for reserved pool names were extended at a later point. 692 * But we need to support users with existing pools that may now be 693 * invalid. So we only check for this expanded set of names during a 694 * create (or import), and only in userland. 695 */ 696 if (ret == 0 && !isopen && 697 (strncmp(pool, "mirror", 6) == 0 || 698 strncmp(pool, "raidz", 5) == 0 || 699 strncmp(pool, "spare", 5) == 0 || 700 strcmp(pool, "log") == 0)) { 701 if (hdl != NULL) 702 zfs_error_aux(hdl, 703 dgettext(TEXT_DOMAIN, "name is reserved")); 704 return (B_FALSE); 705 } 706 707 708 if (ret != 0) { 709 if (hdl != NULL) { 710 switch (why) { 711 case NAME_ERR_TOOLONG: 712 zfs_error_aux(hdl, 713 dgettext(TEXT_DOMAIN, "name is too long")); 714 break; 715 716 case NAME_ERR_INVALCHAR: 717 zfs_error_aux(hdl, 718 dgettext(TEXT_DOMAIN, "invalid character " 719 "'%c' in pool name"), what); 720 break; 721 722 case NAME_ERR_NOLETTER: 723 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 724 "name must begin with a letter")); 725 break; 726 727 case NAME_ERR_RESERVED: 728 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 729 "name is reserved")); 730 break; 731 732 case NAME_ERR_DISKLIKE: 733 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 734 "pool name is reserved")); 735 break; 736 737 case NAME_ERR_LEADING_SLASH: 738 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 739 "leading slash in name")); 740 break; 741 742 case NAME_ERR_EMPTY_COMPONENT: 743 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 744 "empty component in name")); 745 break; 746 747 case NAME_ERR_TRAILING_SLASH: 748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 749 "trailing slash in name")); 750 break; 751 752 case NAME_ERR_MULTIPLE_AT: 753 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 754 "multiple '@' delimiters in name")); 755 break; 756 757 } 758 } 759 return (B_FALSE); 760 } 761 762 return (B_TRUE); 763 } 764 765 /* 766 * Open a handle to the given pool, even if the pool is currently in the FAULTED 767 * state. 768 */ 769 zpool_handle_t * 770 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 771 { 772 zpool_handle_t *zhp; 773 boolean_t missing; 774 775 /* 776 * Make sure the pool name is valid. 777 */ 778 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 779 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 780 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 781 pool); 782 return (NULL); 783 } 784 785 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 786 return (NULL); 787 788 zhp->zpool_hdl = hdl; 789 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 790 791 if (zpool_refresh_stats(zhp, &missing) != 0) { 792 zpool_close(zhp); 793 return (NULL); 794 } 795 796 if (missing) { 797 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 798 (void) zfs_error_fmt(hdl, EZFS_NOENT, 799 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 800 zpool_close(zhp); 801 return (NULL); 802 } 803 804 return (zhp); 805 } 806 807 /* 808 * Like the above, but silent on error. Used when iterating over pools (because 809 * the configuration cache may be out of date). 810 */ 811 int 812 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 813 { 814 zpool_handle_t *zhp; 815 boolean_t missing; 816 817 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 818 return (-1); 819 820 zhp->zpool_hdl = hdl; 821 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 822 823 if (zpool_refresh_stats(zhp, &missing) != 0) { 824 zpool_close(zhp); 825 return (-1); 826 } 827 828 if (missing) { 829 zpool_close(zhp); 830 *ret = NULL; 831 return (0); 832 } 833 834 *ret = zhp; 835 return (0); 836 } 837 838 /* 839 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 840 * state. 841 */ 842 zpool_handle_t * 843 zpool_open(libzfs_handle_t *hdl, const char *pool) 844 { 845 zpool_handle_t *zhp; 846 847 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 848 return (NULL); 849 850 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 851 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 852 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 853 zpool_close(zhp); 854 return (NULL); 855 } 856 857 return (zhp); 858 } 859 860 /* 861 * Close the handle. Simply frees the memory associated with the handle. 862 */ 863 void 864 zpool_close(zpool_handle_t *zhp) 865 { 866 if (zhp->zpool_config) 867 nvlist_free(zhp->zpool_config); 868 if (zhp->zpool_old_config) 869 nvlist_free(zhp->zpool_old_config); 870 if (zhp->zpool_props) 871 nvlist_free(zhp->zpool_props); 872 free(zhp); 873 } 874 875 /* 876 * Return the name of the pool. 877 */ 878 const char * 879 zpool_get_name(zpool_handle_t *zhp) 880 { 881 return (zhp->zpool_name); 882 } 883 884 885 /* 886 * Return the state of the pool (ACTIVE or UNAVAILABLE) 887 */ 888 int 889 zpool_get_state(zpool_handle_t *zhp) 890 { 891 return (zhp->zpool_state); 892 } 893 894 /* 895 * Create the named pool, using the provided vdev list. It is assumed 896 * that the consumer has already validated the contents of the nvlist, so we 897 * don't have to worry about error semantics. 898 */ 899 int 900 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 901 nvlist_t *props, nvlist_t *fsprops) 902 { 903 zfs_cmd_t zc = { 0 }; 904 nvlist_t *zc_fsprops = NULL; 905 nvlist_t *zc_props = NULL; 906 char msg[1024]; 907 char *altroot; 908 int ret = -1; 909 910 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 911 "cannot create '%s'"), pool); 912 913 if (!zpool_name_valid(hdl, B_FALSE, pool)) 914 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 915 916 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 917 return (-1); 918 919 if (props) { 920 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 921 922 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 923 SPA_VERSION_1, flags, msg)) == NULL) { 924 goto create_failed; 925 } 926 } 927 928 if (fsprops) { 929 uint64_t zoned; 930 char *zonestr; 931 932 zoned = ((nvlist_lookup_string(fsprops, 933 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 934 strcmp(zonestr, "on") == 0); 935 936 if ((zc_fsprops = zfs_valid_proplist(hdl, 937 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 938 goto create_failed; 939 } 940 if (!zc_props && 941 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 942 goto create_failed; 943 } 944 if (nvlist_add_nvlist(zc_props, 945 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 946 goto create_failed; 947 } 948 } 949 950 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 951 goto create_failed; 952 953 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 954 955 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 956 957 zcmd_free_nvlists(&zc); 958 nvlist_free(zc_props); 959 nvlist_free(zc_fsprops); 960 961 switch (errno) { 962 case EBUSY: 963 /* 964 * This can happen if the user has specified the same 965 * device multiple times. We can't reliably detect this 966 * until we try to add it and see we already have a 967 * label. 968 */ 969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 970 "one or more vdevs refer to the same device")); 971 return (zfs_error(hdl, EZFS_BADDEV, msg)); 972 973 case EOVERFLOW: 974 /* 975 * This occurs when one of the devices is below 976 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 977 * device was the problem device since there's no 978 * reliable way to determine device size from userland. 979 */ 980 { 981 char buf[64]; 982 983 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 984 985 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 986 "one or more devices is less than the " 987 "minimum size (%s)"), buf); 988 } 989 return (zfs_error(hdl, EZFS_BADDEV, msg)); 990 991 case ENOSPC: 992 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 993 "one or more devices is out of space")); 994 return (zfs_error(hdl, EZFS_BADDEV, msg)); 995 996 case ENOTBLK: 997 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 998 "cache device must be a disk or disk slice")); 999 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1000 1001 default: 1002 return (zpool_standard_error(hdl, errno, msg)); 1003 } 1004 } 1005 1006 /* 1007 * If this is an alternate root pool, then we automatically set the 1008 * mountpoint of the root dataset to be '/'. 1009 */ 1010 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), 1011 &altroot) == 0) { 1012 zfs_handle_t *zhp; 1013 1014 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); 1015 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1016 "/") == 0); 1017 1018 zfs_close(zhp); 1019 } 1020 1021 create_failed: 1022 zcmd_free_nvlists(&zc); 1023 nvlist_free(zc_props); 1024 nvlist_free(zc_fsprops); 1025 return (ret); 1026 } 1027 1028 /* 1029 * Destroy the given pool. It is up to the caller to ensure that there are no 1030 * datasets left in the pool. 1031 */ 1032 int 1033 zpool_destroy(zpool_handle_t *zhp) 1034 { 1035 zfs_cmd_t zc = { 0 }; 1036 zfs_handle_t *zfp = NULL; 1037 libzfs_handle_t *hdl = zhp->zpool_hdl; 1038 char msg[1024]; 1039 1040 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1041 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1042 return (-1); 1043 1044 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1045 1046 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1047 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1048 "cannot destroy '%s'"), zhp->zpool_name); 1049 1050 if (errno == EROFS) { 1051 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1052 "one or more devices is read only")); 1053 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1054 } else { 1055 (void) zpool_standard_error(hdl, errno, msg); 1056 } 1057 1058 if (zfp) 1059 zfs_close(zfp); 1060 return (-1); 1061 } 1062 1063 if (zfp) { 1064 remove_mountpoint(zfp); 1065 zfs_close(zfp); 1066 } 1067 1068 return (0); 1069 } 1070 1071 /* 1072 * Add the given vdevs to the pool. The caller must have already performed the 1073 * necessary verification to ensure that the vdev specification is well-formed. 1074 */ 1075 int 1076 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1077 { 1078 zfs_cmd_t zc = { 0 }; 1079 int ret; 1080 libzfs_handle_t *hdl = zhp->zpool_hdl; 1081 char msg[1024]; 1082 nvlist_t **spares, **l2cache; 1083 uint_t nspares, nl2cache; 1084 1085 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1086 "cannot add to '%s'"), zhp->zpool_name); 1087 1088 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1089 SPA_VERSION_SPARES && 1090 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1091 &spares, &nspares) == 0) { 1092 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1093 "upgraded to add hot spares")); 1094 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1095 } 1096 1097 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1098 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1099 uint64_t s; 1100 1101 for (s = 0; s < nspares; s++) { 1102 char *path; 1103 1104 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1105 &path) == 0 && pool_uses_efi(spares[s])) { 1106 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1107 "device '%s' contains an EFI label and " 1108 "cannot be used on root pools."), 1109 zpool_vdev_name(hdl, NULL, spares[s], 1110 B_FALSE)); 1111 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1112 } 1113 } 1114 } 1115 1116 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1117 SPA_VERSION_L2CACHE && 1118 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1119 &l2cache, &nl2cache) == 0) { 1120 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1121 "upgraded to add cache devices")); 1122 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1123 } 1124 1125 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1126 return (-1); 1127 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1128 1129 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1130 switch (errno) { 1131 case EBUSY: 1132 /* 1133 * This can happen if the user has specified the same 1134 * device multiple times. We can't reliably detect this 1135 * until we try to add it and see we already have a 1136 * label. 1137 */ 1138 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1139 "one or more vdevs refer to the same device")); 1140 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1141 break; 1142 1143 case EOVERFLOW: 1144 /* 1145 * This occurrs when one of the devices is below 1146 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1147 * device was the problem device since there's no 1148 * reliable way to determine device size from userland. 1149 */ 1150 { 1151 char buf[64]; 1152 1153 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1154 1155 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1156 "device is less than the minimum " 1157 "size (%s)"), buf); 1158 } 1159 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1160 break; 1161 1162 case ENOTSUP: 1163 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1164 "pool must be upgraded to add these vdevs")); 1165 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1166 break; 1167 1168 case EDOM: 1169 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1170 "root pool can not have multiple vdevs" 1171 " or separate logs")); 1172 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1173 break; 1174 1175 case ENOTBLK: 1176 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1177 "cache device must be a disk or disk slice")); 1178 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1179 break; 1180 1181 default: 1182 (void) zpool_standard_error(hdl, errno, msg); 1183 } 1184 1185 ret = -1; 1186 } else { 1187 ret = 0; 1188 } 1189 1190 zcmd_free_nvlists(&zc); 1191 1192 return (ret); 1193 } 1194 1195 /* 1196 * Exports the pool from the system. The caller must ensure that there are no 1197 * mounted datasets in the pool. 1198 */ 1199 int 1200 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce) 1201 { 1202 zfs_cmd_t zc = { 0 }; 1203 char msg[1024]; 1204 1205 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1206 "cannot export '%s'"), zhp->zpool_name); 1207 1208 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1209 zc.zc_cookie = force; 1210 zc.zc_guid = hardforce; 1211 1212 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1213 switch (errno) { 1214 case EXDEV: 1215 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1216 "use '-f' to override the following errors:\n" 1217 "'%s' has an active shared spare which could be" 1218 " used by other pools once '%s' is exported."), 1219 zhp->zpool_name, zhp->zpool_name); 1220 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1221 msg)); 1222 default: 1223 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1224 msg)); 1225 } 1226 } 1227 1228 return (0); 1229 } 1230 1231 int 1232 zpool_export(zpool_handle_t *zhp, boolean_t force) 1233 { 1234 return (zpool_export_common(zhp, force, B_FALSE)); 1235 } 1236 1237 int 1238 zpool_export_force(zpool_handle_t *zhp) 1239 { 1240 return (zpool_export_common(zhp, B_TRUE, B_TRUE)); 1241 } 1242 1243 static void 1244 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1245 nvlist_t *config) 1246 { 1247 nvlist_t *nv = NULL; 1248 uint64_t rewindto; 1249 int64_t loss = -1; 1250 struct tm t; 1251 char timestr[128]; 1252 1253 if (!hdl->libzfs_printerr || config == NULL) 1254 return; 1255 1256 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0) 1257 return; 1258 1259 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1260 return; 1261 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1262 1263 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1264 strftime(timestr, 128, 0, &t) != 0) { 1265 if (dryrun) { 1266 (void) printf(dgettext(TEXT_DOMAIN, 1267 "Would be able to return %s " 1268 "to its state as of %s.\n"), 1269 name, timestr); 1270 } else { 1271 (void) printf(dgettext(TEXT_DOMAIN, 1272 "Pool %s returned to its state as of %s.\n"), 1273 name, timestr); 1274 } 1275 if (loss > 120) { 1276 (void) printf(dgettext(TEXT_DOMAIN, 1277 "%s approximately %lld "), 1278 dryrun ? "Would discard" : "Discarded", 1279 (loss + 30) / 60); 1280 (void) printf(dgettext(TEXT_DOMAIN, 1281 "minutes of transactions.\n")); 1282 } else if (loss > 0) { 1283 (void) printf(dgettext(TEXT_DOMAIN, 1284 "%s approximately %lld "), 1285 dryrun ? "Would discard" : "Discarded", loss); 1286 (void) printf(dgettext(TEXT_DOMAIN, 1287 "seconds of transactions.\n")); 1288 } 1289 } 1290 } 1291 1292 void 1293 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1294 nvlist_t *config) 1295 { 1296 nvlist_t *nv = NULL; 1297 int64_t loss = -1; 1298 uint64_t edata = UINT64_MAX; 1299 uint64_t rewindto; 1300 struct tm t; 1301 char timestr[128]; 1302 1303 if (!hdl->libzfs_printerr) 1304 return; 1305 1306 if (reason >= 0) 1307 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1308 else 1309 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1310 1311 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1312 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1313 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1314 goto no_info; 1315 1316 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1317 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1318 &edata); 1319 1320 (void) printf(dgettext(TEXT_DOMAIN, 1321 "Recovery is possible, but will result in some data loss.\n")); 1322 1323 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1324 strftime(timestr, 128, 0, &t) != 0) { 1325 (void) printf(dgettext(TEXT_DOMAIN, 1326 "\tReturning the pool to its state as of %s\n" 1327 "\tshould correct the problem. "), 1328 timestr); 1329 } else { 1330 (void) printf(dgettext(TEXT_DOMAIN, 1331 "\tReverting the pool to an earlier state " 1332 "should correct the problem.\n\t")); 1333 } 1334 1335 if (loss > 120) { 1336 (void) printf(dgettext(TEXT_DOMAIN, 1337 "Approximately %lld minutes of data\n" 1338 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1339 } else if (loss > 0) { 1340 (void) printf(dgettext(TEXT_DOMAIN, 1341 "Approximately %lld seconds of data\n" 1342 "\tmust be discarded, irreversibly. "), loss); 1343 } 1344 if (edata != 0 && edata != UINT64_MAX) { 1345 if (edata == 1) { 1346 (void) printf(dgettext(TEXT_DOMAIN, 1347 "After rewind, at least\n" 1348 "\tone persistent user-data error will remain. ")); 1349 } else { 1350 (void) printf(dgettext(TEXT_DOMAIN, 1351 "After rewind, several\n" 1352 "\tpersistent user-data errors will remain. ")); 1353 } 1354 } 1355 (void) printf(dgettext(TEXT_DOMAIN, 1356 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1357 reason >= 0 ? "clear" : "import", name); 1358 1359 (void) printf(dgettext(TEXT_DOMAIN, 1360 "A scrub of the pool\n" 1361 "\tis strongly recommended after recovery.\n")); 1362 return; 1363 1364 no_info: 1365 (void) printf(dgettext(TEXT_DOMAIN, 1366 "Destroy and re-create the pool from\n\ta backup source.\n")); 1367 } 1368 1369 /* 1370 * zpool_import() is a contracted interface. Should be kept the same 1371 * if possible. 1372 * 1373 * Applications should use zpool_import_props() to import a pool with 1374 * new properties value to be set. 1375 */ 1376 int 1377 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1378 char *altroot) 1379 { 1380 nvlist_t *props = NULL; 1381 int ret; 1382 1383 if (altroot != NULL) { 1384 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1385 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1386 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1387 newname)); 1388 } 1389 1390 if (nvlist_add_string(props, 1391 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1392 nvlist_add_string(props, 1393 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1394 nvlist_free(props); 1395 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1396 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1397 newname)); 1398 } 1399 } 1400 1401 ret = zpool_import_props(hdl, config, newname, props, 1402 ZFS_IMPORT_NORMAL); 1403 if (props) 1404 nvlist_free(props); 1405 return (ret); 1406 } 1407 1408 static void 1409 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1410 int indent) 1411 { 1412 nvlist_t **child; 1413 uint_t c, children; 1414 char *vname; 1415 uint64_t is_log = 0; 1416 1417 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1418 &is_log); 1419 1420 if (name != NULL) 1421 (void) printf("\t%*s%s%s\n", indent, "", name, 1422 is_log ? " [log]" : ""); 1423 1424 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1425 &child, &children) != 0) 1426 return; 1427 1428 for (c = 0; c < children; c++) { 1429 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1430 print_vdev_tree(hdl, vname, child[c], indent + 2); 1431 free(vname); 1432 } 1433 } 1434 1435 /* 1436 * Import the given pool using the known configuration and a list of 1437 * properties to be set. The configuration should have come from 1438 * zpool_find_import(). The 'newname' parameters control whether the pool 1439 * is imported with a different name. 1440 */ 1441 int 1442 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1443 nvlist_t *props, int flags) 1444 { 1445 zfs_cmd_t zc = { 0 }; 1446 zpool_rewind_policy_t policy; 1447 nvlist_t *nv = NULL; 1448 nvlist_t *nvinfo = NULL; 1449 nvlist_t *missing = NULL; 1450 char *thename; 1451 char *origname; 1452 int ret; 1453 int error = 0; 1454 char errbuf[1024]; 1455 1456 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1457 &origname) == 0); 1458 1459 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1460 "cannot import pool '%s'"), origname); 1461 1462 if (newname != NULL) { 1463 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1464 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1465 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1466 newname)); 1467 thename = (char *)newname; 1468 } else { 1469 thename = origname; 1470 } 1471 1472 if (props) { 1473 uint64_t version; 1474 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1475 1476 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1477 &version) == 0); 1478 1479 if ((props = zpool_valid_proplist(hdl, origname, 1480 props, version, flags, errbuf)) == NULL) { 1481 return (-1); 1482 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1483 nvlist_free(props); 1484 return (-1); 1485 } 1486 } 1487 1488 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1489 1490 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1491 &zc.zc_guid) == 0); 1492 1493 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1494 nvlist_free(props); 1495 return (-1); 1496 } 1497 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1498 nvlist_free(props); 1499 return (-1); 1500 } 1501 1502 zc.zc_cookie = flags; 1503 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1504 errno == ENOMEM) { 1505 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1506 zcmd_free_nvlists(&zc); 1507 return (-1); 1508 } 1509 } 1510 if (ret != 0) 1511 error = errno; 1512 1513 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1514 zpool_get_rewind_policy(config, &policy); 1515 1516 if (error) { 1517 char desc[1024]; 1518 1519 /* 1520 * Dry-run failed, but we print out what success 1521 * looks like if we found a best txg 1522 */ 1523 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1524 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1525 B_TRUE, nv); 1526 nvlist_free(nv); 1527 return (-1); 1528 } 1529 1530 if (newname == NULL) 1531 (void) snprintf(desc, sizeof (desc), 1532 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1533 thename); 1534 else 1535 (void) snprintf(desc, sizeof (desc), 1536 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1537 origname, thename); 1538 1539 switch (error) { 1540 case ENOTSUP: 1541 /* 1542 * Unsupported version. 1543 */ 1544 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1545 break; 1546 1547 case EINVAL: 1548 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1549 break; 1550 1551 case EROFS: 1552 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1553 "one or more devices is read only")); 1554 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1555 break; 1556 1557 case ENXIO: 1558 if (nv && nvlist_lookup_nvlist(nv, 1559 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1560 nvlist_lookup_nvlist(nvinfo, 1561 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1562 (void) printf(dgettext(TEXT_DOMAIN, 1563 "The devices below are missing, use " 1564 "'-m' to import the pool anyway:\n")); 1565 print_vdev_tree(hdl, NULL, missing, 2); 1566 (void) printf("\n"); 1567 } 1568 (void) zpool_standard_error(hdl, error, desc); 1569 break; 1570 1571 case EEXIST: 1572 (void) zpool_standard_error(hdl, error, desc); 1573 break; 1574 1575 default: 1576 (void) zpool_standard_error(hdl, error, desc); 1577 zpool_explain_recover(hdl, 1578 newname ? origname : thename, -error, nv); 1579 break; 1580 } 1581 1582 nvlist_free(nv); 1583 ret = -1; 1584 } else { 1585 zpool_handle_t *zhp; 1586 1587 /* 1588 * This should never fail, but play it safe anyway. 1589 */ 1590 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1591 ret = -1; 1592 else if (zhp != NULL) 1593 zpool_close(zhp); 1594 if (policy.zrp_request & 1595 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1596 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1597 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1598 } 1599 nvlist_free(nv); 1600 return (0); 1601 } 1602 1603 zcmd_free_nvlists(&zc); 1604 nvlist_free(props); 1605 1606 return (ret); 1607 } 1608 1609 /* 1610 * Scan the pool. 1611 */ 1612 int 1613 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1614 { 1615 zfs_cmd_t zc = { 0 }; 1616 char msg[1024]; 1617 libzfs_handle_t *hdl = zhp->zpool_hdl; 1618 1619 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1620 zc.zc_cookie = func; 1621 1622 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1623 (errno == ENOENT && func != POOL_SCAN_NONE)) 1624 return (0); 1625 1626 if (func == POOL_SCAN_SCRUB) { 1627 (void) snprintf(msg, sizeof (msg), 1628 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1629 } else if (func == POOL_SCAN_NONE) { 1630 (void) snprintf(msg, sizeof (msg), 1631 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1632 zc.zc_name); 1633 } else { 1634 assert(!"unexpected result"); 1635 } 1636 1637 if (errno == EBUSY) { 1638 nvlist_t *nvroot; 1639 pool_scan_stat_t *ps = NULL; 1640 uint_t psc; 1641 1642 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1643 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1644 (void) nvlist_lookup_uint64_array(nvroot, 1645 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1646 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1647 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1648 else 1649 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1650 } else if (errno == ENOENT) { 1651 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1652 } else { 1653 return (zpool_standard_error(hdl, errno, msg)); 1654 } 1655 } 1656 1657 /* 1658 * This provides a very minimal check whether a given string is likely a 1659 * c#t#d# style string. Users of this are expected to do their own 1660 * verification of the s# part. 1661 */ 1662 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1663 1664 /* 1665 * More elaborate version for ones which may start with "/dev/dsk/" 1666 * and the like. 1667 */ 1668 static int 1669 ctd_check_path(char *str) { 1670 /* 1671 * If it starts with a slash, check the last component. 1672 */ 1673 if (str && str[0] == '/') { 1674 char *tmp = strrchr(str, '/'); 1675 1676 /* 1677 * If it ends in "/old", check the second-to-last 1678 * component of the string instead. 1679 */ 1680 if (tmp != str && strcmp(tmp, "/old") == 0) { 1681 for (tmp--; *tmp != '/'; tmp--) 1682 ; 1683 } 1684 str = tmp + 1; 1685 } 1686 return (CTD_CHECK(str)); 1687 } 1688 1689 /* 1690 * Find a vdev that matches the search criteria specified. We use the 1691 * the nvpair name to determine how we should look for the device. 1692 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1693 * spare; but FALSE if its an INUSE spare. 1694 */ 1695 static nvlist_t * 1696 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1697 boolean_t *l2cache, boolean_t *log) 1698 { 1699 uint_t c, children; 1700 nvlist_t **child; 1701 nvlist_t *ret; 1702 uint64_t is_log; 1703 char *srchkey; 1704 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1705 1706 /* Nothing to look for */ 1707 if (search == NULL || pair == NULL) 1708 return (NULL); 1709 1710 /* Obtain the key we will use to search */ 1711 srchkey = nvpair_name(pair); 1712 1713 switch (nvpair_type(pair)) { 1714 case DATA_TYPE_UINT64: 1715 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1716 uint64_t srchval, theguid; 1717 1718 verify(nvpair_value_uint64(pair, &srchval) == 0); 1719 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1720 &theguid) == 0); 1721 if (theguid == srchval) 1722 return (nv); 1723 } 1724 break; 1725 1726 case DATA_TYPE_STRING: { 1727 char *srchval, *val; 1728 1729 verify(nvpair_value_string(pair, &srchval) == 0); 1730 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1731 break; 1732 1733 /* 1734 * Search for the requested value. Special cases: 1735 * 1736 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1737 * "s0" or "s0/old". The "s0" part is hidden from the user, 1738 * but included in the string, so this matches around it. 1739 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1740 * 1741 * Otherwise, all other searches are simple string compares. 1742 */ 1743 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1744 ctd_check_path(val)) { 1745 uint64_t wholedisk = 0; 1746 1747 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1748 &wholedisk); 1749 if (wholedisk) { 1750 int slen = strlen(srchval); 1751 int vlen = strlen(val); 1752 1753 if (slen != vlen - 2) 1754 break; 1755 1756 /* 1757 * make_leaf_vdev() should only set 1758 * wholedisk for ZPOOL_CONFIG_PATHs which 1759 * will include "/dev/dsk/", giving plenty of 1760 * room for the indices used next. 1761 */ 1762 ASSERT(vlen >= 6); 1763 1764 /* 1765 * strings identical except trailing "s0" 1766 */ 1767 if (strcmp(&val[vlen - 2], "s0") == 0 && 1768 strncmp(srchval, val, slen) == 0) 1769 return (nv); 1770 1771 /* 1772 * strings identical except trailing "s0/old" 1773 */ 1774 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 1775 strcmp(&srchval[slen - 4], "/old") == 0 && 1776 strncmp(srchval, val, slen - 4) == 0) 1777 return (nv); 1778 1779 break; 1780 } 1781 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 1782 char *type, *idx, *end, *p; 1783 uint64_t id, vdev_id; 1784 1785 /* 1786 * Determine our vdev type, keeping in mind 1787 * that the srchval is composed of a type and 1788 * vdev id pair (i.e. mirror-4). 1789 */ 1790 if ((type = strdup(srchval)) == NULL) 1791 return (NULL); 1792 1793 if ((p = strrchr(type, '-')) == NULL) { 1794 free(type); 1795 break; 1796 } 1797 idx = p + 1; 1798 *p = '\0'; 1799 1800 /* 1801 * If the types don't match then keep looking. 1802 */ 1803 if (strncmp(val, type, strlen(val)) != 0) { 1804 free(type); 1805 break; 1806 } 1807 1808 verify(strncmp(type, VDEV_TYPE_RAIDZ, 1809 strlen(VDEV_TYPE_RAIDZ)) == 0 || 1810 strncmp(type, VDEV_TYPE_MIRROR, 1811 strlen(VDEV_TYPE_MIRROR)) == 0); 1812 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 1813 &id) == 0); 1814 1815 errno = 0; 1816 vdev_id = strtoull(idx, &end, 10); 1817 1818 free(type); 1819 if (errno != 0) 1820 return (NULL); 1821 1822 /* 1823 * Now verify that we have the correct vdev id. 1824 */ 1825 if (vdev_id == id) 1826 return (nv); 1827 } 1828 1829 /* 1830 * Common case 1831 */ 1832 if (strcmp(srchval, val) == 0) 1833 return (nv); 1834 break; 1835 } 1836 1837 default: 1838 break; 1839 } 1840 1841 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1842 &child, &children) != 0) 1843 return (NULL); 1844 1845 for (c = 0; c < children; c++) { 1846 if ((ret = vdev_to_nvlist_iter(child[c], search, 1847 avail_spare, l2cache, NULL)) != NULL) { 1848 /* 1849 * The 'is_log' value is only set for the toplevel 1850 * vdev, not the leaf vdevs. So we always lookup the 1851 * log device from the root of the vdev tree (where 1852 * 'log' is non-NULL). 1853 */ 1854 if (log != NULL && 1855 nvlist_lookup_uint64(child[c], 1856 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 1857 is_log) { 1858 *log = B_TRUE; 1859 } 1860 return (ret); 1861 } 1862 } 1863 1864 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1865 &child, &children) == 0) { 1866 for (c = 0; c < children; c++) { 1867 if ((ret = vdev_to_nvlist_iter(child[c], search, 1868 avail_spare, l2cache, NULL)) != NULL) { 1869 *avail_spare = B_TRUE; 1870 return (ret); 1871 } 1872 } 1873 } 1874 1875 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1876 &child, &children) == 0) { 1877 for (c = 0; c < children; c++) { 1878 if ((ret = vdev_to_nvlist_iter(child[c], search, 1879 avail_spare, l2cache, NULL)) != NULL) { 1880 *l2cache = B_TRUE; 1881 return (ret); 1882 } 1883 } 1884 } 1885 1886 return (NULL); 1887 } 1888 1889 /* 1890 * Given a physical path (minus the "/devices" prefix), find the 1891 * associated vdev. 1892 */ 1893 nvlist_t * 1894 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 1895 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 1896 { 1897 nvlist_t *search, *nvroot, *ret; 1898 1899 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1900 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 1901 1902 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1903 &nvroot) == 0); 1904 1905 *avail_spare = B_FALSE; 1906 *l2cache = B_FALSE; 1907 if (log != NULL) 1908 *log = B_FALSE; 1909 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1910 nvlist_free(search); 1911 1912 return (ret); 1913 } 1914 1915 /* 1916 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 1917 */ 1918 boolean_t 1919 zpool_vdev_is_interior(const char *name) 1920 { 1921 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 1922 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 1923 return (B_TRUE); 1924 return (B_FALSE); 1925 } 1926 1927 nvlist_t * 1928 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 1929 boolean_t *l2cache, boolean_t *log) 1930 { 1931 char buf[MAXPATHLEN]; 1932 char *end; 1933 nvlist_t *nvroot, *search, *ret; 1934 uint64_t guid; 1935 1936 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1937 1938 guid = strtoull(path, &end, 10); 1939 if (guid != 0 && *end == '\0') { 1940 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 1941 } else if (zpool_vdev_is_interior(path)) { 1942 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 1943 } else if (path[0] != '/') { 1944 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); 1945 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 1946 } else { 1947 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 1948 } 1949 1950 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1951 &nvroot) == 0); 1952 1953 *avail_spare = B_FALSE; 1954 *l2cache = B_FALSE; 1955 if (log != NULL) 1956 *log = B_FALSE; 1957 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1958 nvlist_free(search); 1959 1960 return (ret); 1961 } 1962 1963 static int 1964 vdev_online(nvlist_t *nv) 1965 { 1966 uint64_t ival; 1967 1968 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 1969 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 1970 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 1971 return (0); 1972 1973 return (1); 1974 } 1975 1976 /* 1977 * Helper function for zpool_get_physpaths(). 1978 */ 1979 static int 1980 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 1981 size_t *bytes_written) 1982 { 1983 size_t bytes_left, pos, rsz; 1984 char *tmppath; 1985 const char *format; 1986 1987 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 1988 &tmppath) != 0) 1989 return (EZFS_NODEVICE); 1990 1991 pos = *bytes_written; 1992 bytes_left = physpath_size - pos; 1993 format = (pos == 0) ? "%s" : " %s"; 1994 1995 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 1996 *bytes_written += rsz; 1997 1998 if (rsz >= bytes_left) { 1999 /* if physpath was not copied properly, clear it */ 2000 if (bytes_left != 0) { 2001 physpath[pos] = 0; 2002 } 2003 return (EZFS_NOSPC); 2004 } 2005 return (0); 2006 } 2007 2008 static int 2009 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2010 size_t *rsz, boolean_t is_spare) 2011 { 2012 char *type; 2013 int ret; 2014 2015 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2016 return (EZFS_INVALCONFIG); 2017 2018 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2019 /* 2020 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2021 * For a spare vdev, we only want to boot from the active 2022 * spare device. 2023 */ 2024 if (is_spare) { 2025 uint64_t spare = 0; 2026 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2027 &spare); 2028 if (!spare) 2029 return (EZFS_INVALCONFIG); 2030 } 2031 2032 if (vdev_online(nv)) { 2033 if ((ret = vdev_get_one_physpath(nv, physpath, 2034 phypath_size, rsz)) != 0) 2035 return (ret); 2036 } 2037 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2038 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2039 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2040 nvlist_t **child; 2041 uint_t count; 2042 int i, ret; 2043 2044 if (nvlist_lookup_nvlist_array(nv, 2045 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2046 return (EZFS_INVALCONFIG); 2047 2048 for (i = 0; i < count; i++) { 2049 ret = vdev_get_physpaths(child[i], physpath, 2050 phypath_size, rsz, is_spare); 2051 if (ret == EZFS_NOSPC) 2052 return (ret); 2053 } 2054 } 2055 2056 return (EZFS_POOL_INVALARG); 2057 } 2058 2059 /* 2060 * Get phys_path for a root pool config. 2061 * Return 0 on success; non-zero on failure. 2062 */ 2063 static int 2064 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2065 { 2066 size_t rsz; 2067 nvlist_t *vdev_root; 2068 nvlist_t **child; 2069 uint_t count; 2070 char *type; 2071 2072 rsz = 0; 2073 2074 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2075 &vdev_root) != 0) 2076 return (EZFS_INVALCONFIG); 2077 2078 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2079 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2080 &child, &count) != 0) 2081 return (EZFS_INVALCONFIG); 2082 2083 /* 2084 * root pool can not have EFI labeled disks and can only have 2085 * a single top-level vdev. 2086 */ 2087 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2088 pool_uses_efi(vdev_root)) 2089 return (EZFS_POOL_INVALARG); 2090 2091 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2092 B_FALSE); 2093 2094 /* No online devices */ 2095 if (rsz == 0) 2096 return (EZFS_NODEVICE); 2097 2098 return (0); 2099 } 2100 2101 /* 2102 * Get phys_path for a root pool 2103 * Return 0 on success; non-zero on failure. 2104 */ 2105 int 2106 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2107 { 2108 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2109 phypath_size)); 2110 } 2111 2112 /* 2113 * If the device has being dynamically expanded then we need to relabel 2114 * the disk to use the new unallocated space. 2115 */ 2116 static int 2117 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2118 { 2119 char path[MAXPATHLEN]; 2120 char errbuf[1024]; 2121 int fd, error; 2122 int (*_efi_use_whole_disk)(int); 2123 2124 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2125 "efi_use_whole_disk")) == NULL) 2126 return (-1); 2127 2128 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2129 2130 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2131 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2132 "relabel '%s': unable to open device"), name); 2133 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2134 } 2135 2136 /* 2137 * It's possible that we might encounter an error if the device 2138 * does not have any unallocated space left. If so, we simply 2139 * ignore that error and continue on. 2140 */ 2141 error = _efi_use_whole_disk(fd); 2142 (void) close(fd); 2143 if (error && error != VT_ENOSPC) { 2144 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2145 "relabel '%s': unable to read disk capacity"), name); 2146 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2147 } 2148 return (0); 2149 } 2150 2151 /* 2152 * Bring the specified vdev online. The 'flags' parameter is a set of the 2153 * ZFS_ONLINE_* flags. 2154 */ 2155 int 2156 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2157 vdev_state_t *newstate) 2158 { 2159 zfs_cmd_t zc = { 0 }; 2160 char msg[1024]; 2161 nvlist_t *tgt; 2162 boolean_t avail_spare, l2cache, islog; 2163 libzfs_handle_t *hdl = zhp->zpool_hdl; 2164 2165 if (flags & ZFS_ONLINE_EXPAND) { 2166 (void) snprintf(msg, sizeof (msg), 2167 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2168 } else { 2169 (void) snprintf(msg, sizeof (msg), 2170 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2171 } 2172 2173 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2174 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2175 &islog)) == NULL) 2176 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2177 2178 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2179 2180 if (avail_spare) 2181 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2182 2183 if (flags & ZFS_ONLINE_EXPAND || 2184 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2185 char *pathname = NULL; 2186 uint64_t wholedisk = 0; 2187 2188 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2189 &wholedisk); 2190 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2191 &pathname) == 0); 2192 2193 /* 2194 * XXX - L2ARC 1.0 devices can't support expansion. 2195 */ 2196 if (l2cache) { 2197 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2198 "cannot expand cache devices")); 2199 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2200 } 2201 2202 if (wholedisk) { 2203 pathname += strlen(DISK_ROOT) + 1; 2204 (void) zpool_relabel_disk(hdl, pathname); 2205 } 2206 } 2207 2208 zc.zc_cookie = VDEV_STATE_ONLINE; 2209 zc.zc_obj = flags; 2210 2211 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2212 if (errno == EINVAL) { 2213 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2214 "from this pool into a new one. Use '%s' " 2215 "instead"), "zpool detach"); 2216 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2217 } 2218 return (zpool_standard_error(hdl, errno, msg)); 2219 } 2220 2221 *newstate = zc.zc_cookie; 2222 return (0); 2223 } 2224 2225 /* 2226 * Take the specified vdev offline 2227 */ 2228 int 2229 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2230 { 2231 zfs_cmd_t zc = { 0 }; 2232 char msg[1024]; 2233 nvlist_t *tgt; 2234 boolean_t avail_spare, l2cache; 2235 libzfs_handle_t *hdl = zhp->zpool_hdl; 2236 2237 (void) snprintf(msg, sizeof (msg), 2238 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2239 2240 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2241 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2242 NULL)) == NULL) 2243 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2244 2245 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2246 2247 if (avail_spare) 2248 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2249 2250 zc.zc_cookie = VDEV_STATE_OFFLINE; 2251 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2252 2253 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2254 return (0); 2255 2256 switch (errno) { 2257 case EBUSY: 2258 2259 /* 2260 * There are no other replicas of this device. 2261 */ 2262 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2263 2264 case EEXIST: 2265 /* 2266 * The log device has unplayed logs 2267 */ 2268 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2269 2270 default: 2271 return (zpool_standard_error(hdl, errno, msg)); 2272 } 2273 } 2274 2275 /* 2276 * Mark the given vdev faulted. 2277 */ 2278 int 2279 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2280 { 2281 zfs_cmd_t zc = { 0 }; 2282 char msg[1024]; 2283 libzfs_handle_t *hdl = zhp->zpool_hdl; 2284 2285 (void) snprintf(msg, sizeof (msg), 2286 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2287 2288 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2289 zc.zc_guid = guid; 2290 zc.zc_cookie = VDEV_STATE_FAULTED; 2291 zc.zc_obj = aux; 2292 2293 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2294 return (0); 2295 2296 switch (errno) { 2297 case EBUSY: 2298 2299 /* 2300 * There are no other replicas of this device. 2301 */ 2302 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2303 2304 default: 2305 return (zpool_standard_error(hdl, errno, msg)); 2306 } 2307 2308 } 2309 2310 /* 2311 * Mark the given vdev degraded. 2312 */ 2313 int 2314 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2315 { 2316 zfs_cmd_t zc = { 0 }; 2317 char msg[1024]; 2318 libzfs_handle_t *hdl = zhp->zpool_hdl; 2319 2320 (void) snprintf(msg, sizeof (msg), 2321 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2322 2323 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2324 zc.zc_guid = guid; 2325 zc.zc_cookie = VDEV_STATE_DEGRADED; 2326 zc.zc_obj = aux; 2327 2328 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2329 return (0); 2330 2331 return (zpool_standard_error(hdl, errno, msg)); 2332 } 2333 2334 /* 2335 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2336 * a hot spare. 2337 */ 2338 static boolean_t 2339 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2340 { 2341 nvlist_t **child; 2342 uint_t c, children; 2343 char *type; 2344 2345 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2346 &children) == 0) { 2347 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2348 &type) == 0); 2349 2350 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2351 children == 2 && child[which] == tgt) 2352 return (B_TRUE); 2353 2354 for (c = 0; c < children; c++) 2355 if (is_replacing_spare(child[c], tgt, which)) 2356 return (B_TRUE); 2357 } 2358 2359 return (B_FALSE); 2360 } 2361 2362 /* 2363 * Attach new_disk (fully described by nvroot) to old_disk. 2364 * If 'replacing' is specified, the new disk will replace the old one. 2365 */ 2366 int 2367 zpool_vdev_attach(zpool_handle_t *zhp, 2368 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2369 { 2370 zfs_cmd_t zc = { 0 }; 2371 char msg[1024]; 2372 int ret; 2373 nvlist_t *tgt; 2374 boolean_t avail_spare, l2cache, islog; 2375 uint64_t val; 2376 char *newname; 2377 nvlist_t **child; 2378 uint_t children; 2379 nvlist_t *config_root; 2380 libzfs_handle_t *hdl = zhp->zpool_hdl; 2381 boolean_t rootpool = pool_is_bootable(zhp); 2382 2383 if (replacing) 2384 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2385 "cannot replace %s with %s"), old_disk, new_disk); 2386 else 2387 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2388 "cannot attach %s to %s"), new_disk, old_disk); 2389 2390 /* 2391 * If this is a root pool, make sure that we're not attaching an 2392 * EFI labeled device. 2393 */ 2394 if (rootpool && pool_uses_efi(nvroot)) { 2395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2396 "EFI labeled devices are not supported on root pools.")); 2397 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2398 } 2399 2400 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2401 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2402 &islog)) == 0) 2403 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2404 2405 if (avail_spare) 2406 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2407 2408 if (l2cache) 2409 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2410 2411 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2412 zc.zc_cookie = replacing; 2413 2414 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2415 &child, &children) != 0 || children != 1) { 2416 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2417 "new device must be a single disk")); 2418 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2419 } 2420 2421 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2422 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2423 2424 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2425 return (-1); 2426 2427 /* 2428 * If the target is a hot spare that has been swapped in, we can only 2429 * replace it with another hot spare. 2430 */ 2431 if (replacing && 2432 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2433 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2434 NULL) == NULL || !avail_spare) && 2435 is_replacing_spare(config_root, tgt, 1)) { 2436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2437 "can only be replaced by another hot spare")); 2438 free(newname); 2439 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2440 } 2441 2442 free(newname); 2443 2444 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2445 return (-1); 2446 2447 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2448 2449 zcmd_free_nvlists(&zc); 2450 2451 if (ret == 0) { 2452 if (rootpool) { 2453 /* 2454 * XXX need a better way to prevent user from 2455 * booting up a half-baked vdev. 2456 */ 2457 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2458 "sure to wait until resilver is done " 2459 "before rebooting.\n")); 2460 } 2461 return (0); 2462 } 2463 2464 switch (errno) { 2465 case ENOTSUP: 2466 /* 2467 * Can't attach to or replace this type of vdev. 2468 */ 2469 if (replacing) { 2470 uint64_t version = zpool_get_prop_int(zhp, 2471 ZPOOL_PROP_VERSION, NULL); 2472 2473 if (islog) 2474 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2475 "cannot replace a log with a spare")); 2476 else if (version >= SPA_VERSION_MULTI_REPLACE) 2477 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2478 "already in replacing/spare config; wait " 2479 "for completion or use 'zpool detach'")); 2480 else 2481 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2482 "cannot replace a replacing device")); 2483 } else { 2484 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2485 "can only attach to mirrors and top-level " 2486 "disks")); 2487 } 2488 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2489 break; 2490 2491 case EINVAL: 2492 /* 2493 * The new device must be a single disk. 2494 */ 2495 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2496 "new device must be a single disk")); 2497 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2498 break; 2499 2500 case EBUSY: 2501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2502 new_disk); 2503 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2504 break; 2505 2506 case EOVERFLOW: 2507 /* 2508 * The new device is too small. 2509 */ 2510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2511 "device is too small")); 2512 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2513 break; 2514 2515 case EDOM: 2516 /* 2517 * The new device has a different alignment requirement. 2518 */ 2519 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2520 "devices have different sector alignment")); 2521 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2522 break; 2523 2524 case ENAMETOOLONG: 2525 /* 2526 * The resulting top-level vdev spec won't fit in the label. 2527 */ 2528 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2529 break; 2530 2531 default: 2532 (void) zpool_standard_error(hdl, errno, msg); 2533 } 2534 2535 return (-1); 2536 } 2537 2538 /* 2539 * Detach the specified device. 2540 */ 2541 int 2542 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2543 { 2544 zfs_cmd_t zc = { 0 }; 2545 char msg[1024]; 2546 nvlist_t *tgt; 2547 boolean_t avail_spare, l2cache; 2548 libzfs_handle_t *hdl = zhp->zpool_hdl; 2549 2550 (void) snprintf(msg, sizeof (msg), 2551 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2552 2553 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2554 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2555 NULL)) == 0) 2556 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2557 2558 if (avail_spare) 2559 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2560 2561 if (l2cache) 2562 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2563 2564 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2565 2566 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2567 return (0); 2568 2569 switch (errno) { 2570 2571 case ENOTSUP: 2572 /* 2573 * Can't detach from this type of vdev. 2574 */ 2575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2576 "applicable to mirror and replacing vdevs")); 2577 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2578 break; 2579 2580 case EBUSY: 2581 /* 2582 * There are no other replicas of this device. 2583 */ 2584 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2585 break; 2586 2587 default: 2588 (void) zpool_standard_error(hdl, errno, msg); 2589 } 2590 2591 return (-1); 2592 } 2593 2594 /* 2595 * Find a mirror vdev in the source nvlist. 2596 * 2597 * The mchild array contains a list of disks in one of the top-level mirrors 2598 * of the source pool. The schild array contains a list of disks that the 2599 * user specified on the command line. We loop over the mchild array to 2600 * see if any entry in the schild array matches. 2601 * 2602 * If a disk in the mchild array is found in the schild array, we return 2603 * the index of that entry. Otherwise we return -1. 2604 */ 2605 static int 2606 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2607 nvlist_t **schild, uint_t schildren) 2608 { 2609 uint_t mc; 2610 2611 for (mc = 0; mc < mchildren; mc++) { 2612 uint_t sc; 2613 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2614 mchild[mc], B_FALSE); 2615 2616 for (sc = 0; sc < schildren; sc++) { 2617 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2618 schild[sc], B_FALSE); 2619 boolean_t result = (strcmp(mpath, spath) == 0); 2620 2621 free(spath); 2622 if (result) { 2623 free(mpath); 2624 return (mc); 2625 } 2626 } 2627 2628 free(mpath); 2629 } 2630 2631 return (-1); 2632 } 2633 2634 /* 2635 * Split a mirror pool. If newroot points to null, then a new nvlist 2636 * is generated and it is the responsibility of the caller to free it. 2637 */ 2638 int 2639 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2640 nvlist_t *props, splitflags_t flags) 2641 { 2642 zfs_cmd_t zc = { 0 }; 2643 char msg[1024]; 2644 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2645 nvlist_t **varray = NULL, *zc_props = NULL; 2646 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2647 libzfs_handle_t *hdl = zhp->zpool_hdl; 2648 uint64_t vers; 2649 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2650 int retval = 0; 2651 2652 (void) snprintf(msg, sizeof (msg), 2653 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2654 2655 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2656 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2657 2658 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2659 (void) fprintf(stderr, gettext("Internal error: unable to " 2660 "retrieve pool configuration\n")); 2661 return (-1); 2662 } 2663 2664 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2665 == 0); 2666 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2667 2668 if (props) { 2669 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2670 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2671 props, vers, flags, msg)) == NULL) 2672 return (-1); 2673 } 2674 2675 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2676 &children) != 0) { 2677 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2678 "Source pool is missing vdev tree")); 2679 if (zc_props) 2680 nvlist_free(zc_props); 2681 return (-1); 2682 } 2683 2684 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2685 vcount = 0; 2686 2687 if (*newroot == NULL || 2688 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2689 &newchild, &newchildren) != 0) 2690 newchildren = 0; 2691 2692 for (c = 0; c < children; c++) { 2693 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2694 char *type; 2695 nvlist_t **mchild, *vdev; 2696 uint_t mchildren; 2697 int entry; 2698 2699 /* 2700 * Unlike cache & spares, slogs are stored in the 2701 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2702 */ 2703 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2704 &is_log); 2705 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2706 &is_hole); 2707 if (is_log || is_hole) { 2708 /* 2709 * Create a hole vdev and put it in the config. 2710 */ 2711 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2712 goto out; 2713 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2714 VDEV_TYPE_HOLE) != 0) 2715 goto out; 2716 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2717 1) != 0) 2718 goto out; 2719 if (lastlog == 0) 2720 lastlog = vcount; 2721 varray[vcount++] = vdev; 2722 continue; 2723 } 2724 lastlog = 0; 2725 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2726 == 0); 2727 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2728 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2729 "Source pool must be composed only of mirrors\n")); 2730 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2731 goto out; 2732 } 2733 2734 verify(nvlist_lookup_nvlist_array(child[c], 2735 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2736 2737 /* find or add an entry for this top-level vdev */ 2738 if (newchildren > 0 && 2739 (entry = find_vdev_entry(zhp, mchild, mchildren, 2740 newchild, newchildren)) >= 0) { 2741 /* We found a disk that the user specified. */ 2742 vdev = mchild[entry]; 2743 ++found; 2744 } else { 2745 /* User didn't specify a disk for this vdev. */ 2746 vdev = mchild[mchildren - 1]; 2747 } 2748 2749 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2750 goto out; 2751 } 2752 2753 /* did we find every disk the user specified? */ 2754 if (found != newchildren) { 2755 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2756 "include at most one disk from each mirror")); 2757 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2758 goto out; 2759 } 2760 2761 /* Prepare the nvlist for populating. */ 2762 if (*newroot == NULL) { 2763 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2764 goto out; 2765 freelist = B_TRUE; 2766 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2767 VDEV_TYPE_ROOT) != 0) 2768 goto out; 2769 } else { 2770 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 2771 } 2772 2773 /* Add all the children we found */ 2774 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 2775 lastlog == 0 ? vcount : lastlog) != 0) 2776 goto out; 2777 2778 /* 2779 * If we're just doing a dry run, exit now with success. 2780 */ 2781 if (flags.dryrun) { 2782 memory_err = B_FALSE; 2783 freelist = B_FALSE; 2784 goto out; 2785 } 2786 2787 /* now build up the config list & call the ioctl */ 2788 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 2789 goto out; 2790 2791 if (nvlist_add_nvlist(newconfig, 2792 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 2793 nvlist_add_string(newconfig, 2794 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 2795 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 2796 goto out; 2797 2798 /* 2799 * The new pool is automatically part of the namespace unless we 2800 * explicitly export it. 2801 */ 2802 if (!flags.import) 2803 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 2804 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2805 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 2806 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 2807 goto out; 2808 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 2809 goto out; 2810 2811 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 2812 retval = zpool_standard_error(hdl, errno, msg); 2813 goto out; 2814 } 2815 2816 freelist = B_FALSE; 2817 memory_err = B_FALSE; 2818 2819 out: 2820 if (varray != NULL) { 2821 int v; 2822 2823 for (v = 0; v < vcount; v++) 2824 nvlist_free(varray[v]); 2825 free(varray); 2826 } 2827 zcmd_free_nvlists(&zc); 2828 if (zc_props) 2829 nvlist_free(zc_props); 2830 if (newconfig) 2831 nvlist_free(newconfig); 2832 if (freelist) { 2833 nvlist_free(*newroot); 2834 *newroot = NULL; 2835 } 2836 2837 if (retval != 0) 2838 return (retval); 2839 2840 if (memory_err) 2841 return (no_memory(hdl)); 2842 2843 return (0); 2844 } 2845 2846 /* 2847 * Remove the given device. Currently, this is supported only for hot spares 2848 * and level 2 cache devices. 2849 */ 2850 int 2851 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 2852 { 2853 zfs_cmd_t zc = { 0 }; 2854 char msg[1024]; 2855 nvlist_t *tgt; 2856 boolean_t avail_spare, l2cache, islog; 2857 libzfs_handle_t *hdl = zhp->zpool_hdl; 2858 uint64_t version; 2859 2860 (void) snprintf(msg, sizeof (msg), 2861 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 2862 2863 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2864 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2865 &islog)) == 0) 2866 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2867 /* 2868 * XXX - this should just go away. 2869 */ 2870 if (!avail_spare && !l2cache && !islog) { 2871 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2872 "only inactive hot spares, cache, top-level, " 2873 "or log devices can be removed")); 2874 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2875 } 2876 2877 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 2878 if (islog && version < SPA_VERSION_HOLES) { 2879 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2880 "pool must be upgrade to support log removal")); 2881 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 2882 } 2883 2884 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2885 2886 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 2887 return (0); 2888 2889 return (zpool_standard_error(hdl, errno, msg)); 2890 } 2891 2892 /* 2893 * Clear the errors for the pool, or the particular device if specified. 2894 */ 2895 int 2896 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 2897 { 2898 zfs_cmd_t zc = { 0 }; 2899 char msg[1024]; 2900 nvlist_t *tgt; 2901 zpool_rewind_policy_t policy; 2902 boolean_t avail_spare, l2cache; 2903 libzfs_handle_t *hdl = zhp->zpool_hdl; 2904 nvlist_t *nvi = NULL; 2905 int error; 2906 2907 if (path) 2908 (void) snprintf(msg, sizeof (msg), 2909 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2910 path); 2911 else 2912 (void) snprintf(msg, sizeof (msg), 2913 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2914 zhp->zpool_name); 2915 2916 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2917 if (path) { 2918 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 2919 &l2cache, NULL)) == 0) 2920 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2921 2922 /* 2923 * Don't allow error clearing for hot spares. Do allow 2924 * error clearing for l2cache devices. 2925 */ 2926 if (avail_spare) 2927 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2928 2929 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 2930 &zc.zc_guid) == 0); 2931 } 2932 2933 zpool_get_rewind_policy(rewindnvl, &policy); 2934 zc.zc_cookie = policy.zrp_request; 2935 2936 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 2937 return (-1); 2938 2939 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 2940 return (-1); 2941 2942 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 2943 errno == ENOMEM) { 2944 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 2945 zcmd_free_nvlists(&zc); 2946 return (-1); 2947 } 2948 } 2949 2950 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 2951 errno != EPERM && errno != EACCES)) { 2952 if (policy.zrp_request & 2953 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2954 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 2955 zpool_rewind_exclaim(hdl, zc.zc_name, 2956 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 2957 nvi); 2958 nvlist_free(nvi); 2959 } 2960 zcmd_free_nvlists(&zc); 2961 return (0); 2962 } 2963 2964 zcmd_free_nvlists(&zc); 2965 return (zpool_standard_error(hdl, errno, msg)); 2966 } 2967 2968 /* 2969 * Similar to zpool_clear(), but takes a GUID (used by fmd). 2970 */ 2971 int 2972 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 2973 { 2974 zfs_cmd_t zc = { 0 }; 2975 char msg[1024]; 2976 libzfs_handle_t *hdl = zhp->zpool_hdl; 2977 2978 (void) snprintf(msg, sizeof (msg), 2979 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 2980 guid); 2981 2982 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2983 zc.zc_guid = guid; 2984 zc.zc_cookie = ZPOOL_NO_REWIND; 2985 2986 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 2987 return (0); 2988 2989 return (zpool_standard_error(hdl, errno, msg)); 2990 } 2991 2992 /* 2993 * Change the GUID for a pool. 2994 */ 2995 int 2996 zpool_reguid(zpool_handle_t *zhp) 2997 { 2998 char msg[1024]; 2999 libzfs_handle_t *hdl = zhp->zpool_hdl; 3000 zfs_cmd_t zc = { 0 }; 3001 3002 (void) snprintf(msg, sizeof (msg), 3003 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3004 3005 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3006 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3007 return (0); 3008 3009 return (zpool_standard_error(hdl, errno, msg)); 3010 } 3011 3012 /* 3013 * Convert from a devid string to a path. 3014 */ 3015 static char * 3016 devid_to_path(char *devid_str) 3017 { 3018 ddi_devid_t devid; 3019 char *minor; 3020 char *path; 3021 devid_nmlist_t *list = NULL; 3022 int ret; 3023 3024 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3025 return (NULL); 3026 3027 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3028 3029 devid_str_free(minor); 3030 devid_free(devid); 3031 3032 if (ret != 0) 3033 return (NULL); 3034 3035 if ((path = strdup(list[0].devname)) == NULL) 3036 return (NULL); 3037 3038 devid_free_nmlist(list); 3039 3040 return (path); 3041 } 3042 3043 /* 3044 * Convert from a path to a devid string. 3045 */ 3046 static char * 3047 path_to_devid(const char *path) 3048 { 3049 int fd; 3050 ddi_devid_t devid; 3051 char *minor, *ret; 3052 3053 if ((fd = open(path, O_RDONLY)) < 0) 3054 return (NULL); 3055 3056 minor = NULL; 3057 ret = NULL; 3058 if (devid_get(fd, &devid) == 0) { 3059 if (devid_get_minor_name(fd, &minor) == 0) 3060 ret = devid_str_encode(devid, minor); 3061 if (minor != NULL) 3062 devid_str_free(minor); 3063 devid_free(devid); 3064 } 3065 (void) close(fd); 3066 3067 return (ret); 3068 } 3069 3070 /* 3071 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3072 * ignore any failure here, since a common case is for an unprivileged user to 3073 * type 'zpool status', and we'll display the correct information anyway. 3074 */ 3075 static void 3076 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3077 { 3078 zfs_cmd_t zc = { 0 }; 3079 3080 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3081 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3082 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3083 &zc.zc_guid) == 0); 3084 3085 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3086 } 3087 3088 /* 3089 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3090 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3091 * We also check if this is a whole disk, in which case we strip off the 3092 * trailing 's0' slice name. 3093 * 3094 * This routine is also responsible for identifying when disks have been 3095 * reconfigured in a new location. The kernel will have opened the device by 3096 * devid, but the path will still refer to the old location. To catch this, we 3097 * first do a path -> devid translation (which is fast for the common case). If 3098 * the devid matches, we're done. If not, we do a reverse devid -> path 3099 * translation and issue the appropriate ioctl() to update the path of the vdev. 3100 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3101 * of these checks. 3102 */ 3103 char * 3104 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3105 boolean_t verbose) 3106 { 3107 char *path, *devid; 3108 uint64_t value; 3109 char buf[64]; 3110 vdev_stat_t *vs; 3111 uint_t vsc; 3112 3113 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3114 &value) == 0) { 3115 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3116 &value) == 0); 3117 (void) snprintf(buf, sizeof (buf), "%llu", 3118 (u_longlong_t)value); 3119 path = buf; 3120 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3121 3122 /* 3123 * If the device is dead (faulted, offline, etc) then don't 3124 * bother opening it. Otherwise we may be forcing the user to 3125 * open a misbehaving device, which can have undesirable 3126 * effects. 3127 */ 3128 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3129 (uint64_t **)&vs, &vsc) != 0 || 3130 vs->vs_state >= VDEV_STATE_DEGRADED) && 3131 zhp != NULL && 3132 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3133 /* 3134 * Determine if the current path is correct. 3135 */ 3136 char *newdevid = path_to_devid(path); 3137 3138 if (newdevid == NULL || 3139 strcmp(devid, newdevid) != 0) { 3140 char *newpath; 3141 3142 if ((newpath = devid_to_path(devid)) != NULL) { 3143 /* 3144 * Update the path appropriately. 3145 */ 3146 set_path(zhp, nv, newpath); 3147 if (nvlist_add_string(nv, 3148 ZPOOL_CONFIG_PATH, newpath) == 0) 3149 verify(nvlist_lookup_string(nv, 3150 ZPOOL_CONFIG_PATH, 3151 &path) == 0); 3152 free(newpath); 3153 } 3154 } 3155 3156 if (newdevid) 3157 devid_str_free(newdevid); 3158 } 3159 3160 if (strncmp(path, "/dev/dsk/", 9) == 0) 3161 path += 9; 3162 3163 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3164 &value) == 0 && value) { 3165 int pathlen = strlen(path); 3166 char *tmp = zfs_strdup(hdl, path); 3167 3168 /* 3169 * If it starts with c#, and ends with "s0", chop 3170 * the "s0" off, or if it ends with "s0/old", remove 3171 * the "s0" from the middle. 3172 */ 3173 if (CTD_CHECK(tmp)) { 3174 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3175 tmp[pathlen - 2] = '\0'; 3176 } else if (pathlen > 6 && 3177 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3178 (void) strcpy(&tmp[pathlen - 6], 3179 "/old"); 3180 } 3181 } 3182 return (tmp); 3183 } 3184 } else { 3185 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3186 3187 /* 3188 * If it's a raidz device, we need to stick in the parity level. 3189 */ 3190 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3191 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3192 &value) == 0); 3193 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3194 (u_longlong_t)value); 3195 path = buf; 3196 } 3197 3198 /* 3199 * We identify each top-level vdev by using a <type-id> 3200 * naming convention. 3201 */ 3202 if (verbose) { 3203 uint64_t id; 3204 3205 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3206 &id) == 0); 3207 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3208 (u_longlong_t)id); 3209 path = buf; 3210 } 3211 } 3212 3213 return (zfs_strdup(hdl, path)); 3214 } 3215 3216 static int 3217 zbookmark_compare(const void *a, const void *b) 3218 { 3219 return (memcmp(a, b, sizeof (zbookmark_t))); 3220 } 3221 3222 /* 3223 * Retrieve the persistent error log, uniquify the members, and return to the 3224 * caller. 3225 */ 3226 int 3227 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3228 { 3229 zfs_cmd_t zc = { 0 }; 3230 uint64_t count; 3231 zbookmark_t *zb = NULL; 3232 int i; 3233 3234 /* 3235 * Retrieve the raw error list from the kernel. If the number of errors 3236 * has increased, allocate more space and continue until we get the 3237 * entire list. 3238 */ 3239 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3240 &count) == 0); 3241 if (count == 0) 3242 return (0); 3243 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3244 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 3245 return (-1); 3246 zc.zc_nvlist_dst_size = count; 3247 (void) strcpy(zc.zc_name, zhp->zpool_name); 3248 for (;;) { 3249 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3250 &zc) != 0) { 3251 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3252 if (errno == ENOMEM) { 3253 count = zc.zc_nvlist_dst_size; 3254 if ((zc.zc_nvlist_dst = (uintptr_t) 3255 zfs_alloc(zhp->zpool_hdl, count * 3256 sizeof (zbookmark_t))) == (uintptr_t)NULL) 3257 return (-1); 3258 } else { 3259 return (-1); 3260 } 3261 } else { 3262 break; 3263 } 3264 } 3265 3266 /* 3267 * Sort the resulting bookmarks. This is a little confusing due to the 3268 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3269 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3270 * _not_ copied as part of the process. So we point the start of our 3271 * array appropriate and decrement the total number of elements. 3272 */ 3273 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 3274 zc.zc_nvlist_dst_size; 3275 count -= zc.zc_nvlist_dst_size; 3276 3277 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 3278 3279 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3280 3281 /* 3282 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3283 */ 3284 for (i = 0; i < count; i++) { 3285 nvlist_t *nv; 3286 3287 /* ignoring zb_blkid and zb_level for now */ 3288 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3289 zb[i-1].zb_object == zb[i].zb_object) 3290 continue; 3291 3292 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3293 goto nomem; 3294 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3295 zb[i].zb_objset) != 0) { 3296 nvlist_free(nv); 3297 goto nomem; 3298 } 3299 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3300 zb[i].zb_object) != 0) { 3301 nvlist_free(nv); 3302 goto nomem; 3303 } 3304 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3305 nvlist_free(nv); 3306 goto nomem; 3307 } 3308 nvlist_free(nv); 3309 } 3310 3311 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3312 return (0); 3313 3314 nomem: 3315 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3316 return (no_memory(zhp->zpool_hdl)); 3317 } 3318 3319 /* 3320 * Upgrade a ZFS pool to the latest on-disk version. 3321 */ 3322 int 3323 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3324 { 3325 zfs_cmd_t zc = { 0 }; 3326 libzfs_handle_t *hdl = zhp->zpool_hdl; 3327 3328 (void) strcpy(zc.zc_name, zhp->zpool_name); 3329 zc.zc_cookie = new_version; 3330 3331 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3332 return (zpool_standard_error_fmt(hdl, errno, 3333 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3334 zhp->zpool_name)); 3335 return (0); 3336 } 3337 3338 void 3339 zpool_set_history_str(const char *subcommand, int argc, char **argv, 3340 char *history_str) 3341 { 3342 int i; 3343 3344 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); 3345 for (i = 1; i < argc; i++) { 3346 if (strlen(history_str) + 1 + strlen(argv[i]) > 3347 HIS_MAX_RECORD_LEN) 3348 break; 3349 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); 3350 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); 3351 } 3352 } 3353 3354 /* 3355 * Stage command history for logging. 3356 */ 3357 int 3358 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) 3359 { 3360 if (history_str == NULL) 3361 return (EINVAL); 3362 3363 if (strlen(history_str) > HIS_MAX_RECORD_LEN) 3364 return (EINVAL); 3365 3366 if (hdl->libzfs_log_str != NULL) 3367 free(hdl->libzfs_log_str); 3368 3369 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) 3370 return (no_memory(hdl)); 3371 3372 return (0); 3373 } 3374 3375 /* 3376 * Perform ioctl to get some command history of a pool. 3377 * 3378 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3379 * logical offset of the history buffer to start reading from. 3380 * 3381 * Upon return, 'off' is the next logical offset to read from and 3382 * 'len' is the actual amount of bytes read into 'buf'. 3383 */ 3384 static int 3385 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3386 { 3387 zfs_cmd_t zc = { 0 }; 3388 libzfs_handle_t *hdl = zhp->zpool_hdl; 3389 3390 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3391 3392 zc.zc_history = (uint64_t)(uintptr_t)buf; 3393 zc.zc_history_len = *len; 3394 zc.zc_history_offset = *off; 3395 3396 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3397 switch (errno) { 3398 case EPERM: 3399 return (zfs_error_fmt(hdl, EZFS_PERM, 3400 dgettext(TEXT_DOMAIN, 3401 "cannot show history for pool '%s'"), 3402 zhp->zpool_name)); 3403 case ENOENT: 3404 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3405 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3406 "'%s'"), zhp->zpool_name)); 3407 case ENOTSUP: 3408 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3409 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3410 "'%s', pool must be upgraded"), zhp->zpool_name)); 3411 default: 3412 return (zpool_standard_error_fmt(hdl, errno, 3413 dgettext(TEXT_DOMAIN, 3414 "cannot get history for '%s'"), zhp->zpool_name)); 3415 } 3416 } 3417 3418 *len = zc.zc_history_len; 3419 *off = zc.zc_history_offset; 3420 3421 return (0); 3422 } 3423 3424 /* 3425 * Process the buffer of nvlists, unpacking and storing each nvlist record 3426 * into 'records'. 'leftover' is set to the number of bytes that weren't 3427 * processed as there wasn't a complete record. 3428 */ 3429 int 3430 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3431 nvlist_t ***records, uint_t *numrecords) 3432 { 3433 uint64_t reclen; 3434 nvlist_t *nv; 3435 int i; 3436 3437 while (bytes_read > sizeof (reclen)) { 3438 3439 /* get length of packed record (stored as little endian) */ 3440 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3441 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3442 3443 if (bytes_read < sizeof (reclen) + reclen) 3444 break; 3445 3446 /* unpack record */ 3447 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3448 return (ENOMEM); 3449 bytes_read -= sizeof (reclen) + reclen; 3450 buf += sizeof (reclen) + reclen; 3451 3452 /* add record to nvlist array */ 3453 (*numrecords)++; 3454 if (ISP2(*numrecords + 1)) { 3455 *records = realloc(*records, 3456 *numrecords * 2 * sizeof (nvlist_t *)); 3457 } 3458 (*records)[*numrecords - 1] = nv; 3459 } 3460 3461 *leftover = bytes_read; 3462 return (0); 3463 } 3464 3465 #define HIS_BUF_LEN (128*1024) 3466 3467 /* 3468 * Retrieve the command history of a pool. 3469 */ 3470 int 3471 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3472 { 3473 char buf[HIS_BUF_LEN]; 3474 uint64_t off = 0; 3475 nvlist_t **records = NULL; 3476 uint_t numrecords = 0; 3477 int err, i; 3478 3479 do { 3480 uint64_t bytes_read = sizeof (buf); 3481 uint64_t leftover; 3482 3483 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3484 break; 3485 3486 /* if nothing else was read in, we're at EOF, just return */ 3487 if (!bytes_read) 3488 break; 3489 3490 if ((err = zpool_history_unpack(buf, bytes_read, 3491 &leftover, &records, &numrecords)) != 0) 3492 break; 3493 off -= leftover; 3494 3495 /* CONSTCOND */ 3496 } while (1); 3497 3498 if (!err) { 3499 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3500 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3501 records, numrecords) == 0); 3502 } 3503 for (i = 0; i < numrecords; i++) 3504 nvlist_free(records[i]); 3505 free(records); 3506 3507 return (err); 3508 } 3509 3510 void 3511 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3512 char *pathname, size_t len) 3513 { 3514 zfs_cmd_t zc = { 0 }; 3515 boolean_t mounted = B_FALSE; 3516 char *mntpnt = NULL; 3517 char dsname[MAXNAMELEN]; 3518 3519 if (dsobj == 0) { 3520 /* special case for the MOS */ 3521 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3522 return; 3523 } 3524 3525 /* get the dataset's name */ 3526 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3527 zc.zc_obj = dsobj; 3528 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3529 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3530 /* just write out a path of two object numbers */ 3531 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3532 dsobj, obj); 3533 return; 3534 } 3535 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3536 3537 /* find out if the dataset is mounted */ 3538 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3539 3540 /* get the corrupted object's path */ 3541 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3542 zc.zc_obj = obj; 3543 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3544 &zc) == 0) { 3545 if (mounted) { 3546 (void) snprintf(pathname, len, "%s%s", mntpnt, 3547 zc.zc_value); 3548 } else { 3549 (void) snprintf(pathname, len, "%s:%s", 3550 dsname, zc.zc_value); 3551 } 3552 } else { 3553 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3554 } 3555 free(mntpnt); 3556 } 3557 3558 /* 3559 * Read the EFI label from the config, if a label does not exist then 3560 * pass back the error to the caller. If the caller has passed a non-NULL 3561 * diskaddr argument then we set it to the starting address of the EFI 3562 * partition. 3563 */ 3564 static int 3565 read_efi_label(nvlist_t *config, diskaddr_t *sb) 3566 { 3567 char *path; 3568 int fd; 3569 char diskname[MAXPATHLEN]; 3570 int err = -1; 3571 3572 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3573 return (err); 3574 3575 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3576 strrchr(path, '/')); 3577 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3578 struct dk_gpt *vtoc; 3579 3580 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3581 if (sb != NULL) 3582 *sb = vtoc->efi_parts[0].p_start; 3583 efi_free(vtoc); 3584 } 3585 (void) close(fd); 3586 } 3587 return (err); 3588 } 3589 3590 /* 3591 * determine where a partition starts on a disk in the current 3592 * configuration 3593 */ 3594 static diskaddr_t 3595 find_start_block(nvlist_t *config) 3596 { 3597 nvlist_t **child; 3598 uint_t c, children; 3599 diskaddr_t sb = MAXOFFSET_T; 3600 uint64_t wholedisk; 3601 3602 if (nvlist_lookup_nvlist_array(config, 3603 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3604 if (nvlist_lookup_uint64(config, 3605 ZPOOL_CONFIG_WHOLE_DISK, 3606 &wholedisk) != 0 || !wholedisk) { 3607 return (MAXOFFSET_T); 3608 } 3609 if (read_efi_label(config, &sb) < 0) 3610 sb = MAXOFFSET_T; 3611 return (sb); 3612 } 3613 3614 for (c = 0; c < children; c++) { 3615 sb = find_start_block(child[c]); 3616 if (sb != MAXOFFSET_T) { 3617 return (sb); 3618 } 3619 } 3620 return (MAXOFFSET_T); 3621 } 3622 3623 /* 3624 * Label an individual disk. The name provided is the short name, 3625 * stripped of any leading /dev path. 3626 */ 3627 int 3628 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) 3629 { 3630 char path[MAXPATHLEN]; 3631 struct dk_gpt *vtoc; 3632 int fd; 3633 size_t resv = EFI_MIN_RESV_SIZE; 3634 uint64_t slice_size; 3635 diskaddr_t start_block; 3636 char errbuf[1024]; 3637 3638 /* prepare an error message just in case */ 3639 (void) snprintf(errbuf, sizeof (errbuf), 3640 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3641 3642 if (zhp) { 3643 nvlist_t *nvroot; 3644 3645 if (pool_is_bootable(zhp)) { 3646 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3647 "EFI labeled devices are not supported on root " 3648 "pools.")); 3649 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3650 } 3651 3652 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3653 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3654 3655 if (zhp->zpool_start_block == 0) 3656 start_block = find_start_block(nvroot); 3657 else 3658 start_block = zhp->zpool_start_block; 3659 zhp->zpool_start_block = start_block; 3660 } else { 3661 /* new pool */ 3662 start_block = NEW_START_BLOCK; 3663 } 3664 3665 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3666 BACKUP_SLICE); 3667 3668 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3669 /* 3670 * This shouldn't happen. We've long since verified that this 3671 * is a valid device. 3672 */ 3673 zfs_error_aux(hdl, 3674 dgettext(TEXT_DOMAIN, "unable to open device")); 3675 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3676 } 3677 3678 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3679 /* 3680 * The only way this can fail is if we run out of memory, or we 3681 * were unable to read the disk's capacity 3682 */ 3683 if (errno == ENOMEM) 3684 (void) no_memory(hdl); 3685 3686 (void) close(fd); 3687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3688 "unable to read disk capacity"), name); 3689 3690 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3691 } 3692 3693 slice_size = vtoc->efi_last_u_lba + 1; 3694 slice_size -= EFI_MIN_RESV_SIZE; 3695 if (start_block == MAXOFFSET_T) 3696 start_block = NEW_START_BLOCK; 3697 slice_size -= start_block; 3698 3699 vtoc->efi_parts[0].p_start = start_block; 3700 vtoc->efi_parts[0].p_size = slice_size; 3701 3702 /* 3703 * Why we use V_USR: V_BACKUP confuses users, and is considered 3704 * disposable by some EFI utilities (since EFI doesn't have a backup 3705 * slice). V_UNASSIGNED is supposed to be used only for zero size 3706 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3707 * etc. were all pretty specific. V_USR is as close to reality as we 3708 * can get, in the absence of V_OTHER. 3709 */ 3710 vtoc->efi_parts[0].p_tag = V_USR; 3711 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3712 3713 vtoc->efi_parts[8].p_start = slice_size + start_block; 3714 vtoc->efi_parts[8].p_size = resv; 3715 vtoc->efi_parts[8].p_tag = V_RESERVED; 3716 3717 if (efi_write(fd, vtoc) != 0) { 3718 /* 3719 * Some block drivers (like pcata) may not support EFI 3720 * GPT labels. Print out a helpful error message dir- 3721 * ecting the user to manually label the disk and give 3722 * a specific slice. 3723 */ 3724 (void) close(fd); 3725 efi_free(vtoc); 3726 3727 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3728 "try using fdisk(1M) and then provide a specific slice")); 3729 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3730 } 3731 3732 (void) close(fd); 3733 efi_free(vtoc); 3734 return (0); 3735 } 3736 3737 static boolean_t 3738 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3739 { 3740 char *type; 3741 nvlist_t **child; 3742 uint_t children, c; 3743 3744 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3745 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 3746 strcmp(type, VDEV_TYPE_FILE) == 0 || 3747 strcmp(type, VDEV_TYPE_LOG) == 0 || 3748 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3749 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3750 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3751 "vdev type '%s' is not supported"), type); 3752 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3753 return (B_FALSE); 3754 } 3755 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3756 &child, &children) == 0) { 3757 for (c = 0; c < children; c++) { 3758 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3759 return (B_FALSE); 3760 } 3761 } 3762 return (B_TRUE); 3763 } 3764 3765 /* 3766 * check if this zvol is allowable for use as a dump device; zero if 3767 * it is, > 0 if it isn't, < 0 if it isn't a zvol 3768 */ 3769 int 3770 zvol_check_dump_config(char *arg) 3771 { 3772 zpool_handle_t *zhp = NULL; 3773 nvlist_t *config, *nvroot; 3774 char *p, *volname; 3775 nvlist_t **top; 3776 uint_t toplevels; 3777 libzfs_handle_t *hdl; 3778 char errbuf[1024]; 3779 char poolname[ZPOOL_MAXNAMELEN]; 3780 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 3781 int ret = 1; 3782 3783 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 3784 return (-1); 3785 } 3786 3787 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3788 "dump is not supported on device '%s'"), arg); 3789 3790 if ((hdl = libzfs_init()) == NULL) 3791 return (1); 3792 libzfs_print_on_error(hdl, B_TRUE); 3793 3794 volname = arg + pathlen; 3795 3796 /* check the configuration of the pool */ 3797 if ((p = strchr(volname, '/')) == NULL) { 3798 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3799 "malformed dataset name")); 3800 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 3801 return (1); 3802 } else if (p - volname >= ZFS_MAXNAMELEN) { 3803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3804 "dataset name is too long")); 3805 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 3806 return (1); 3807 } else { 3808 (void) strncpy(poolname, volname, p - volname); 3809 poolname[p - volname] = '\0'; 3810 } 3811 3812 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 3813 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3814 "could not open pool '%s'"), poolname); 3815 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 3816 goto out; 3817 } 3818 config = zpool_get_config(zhp, NULL); 3819 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3820 &nvroot) != 0) { 3821 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3822 "could not obtain vdev configuration for '%s'"), poolname); 3823 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3824 goto out; 3825 } 3826 3827 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3828 &top, &toplevels) == 0); 3829 if (toplevels != 1) { 3830 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3831 "'%s' has multiple top level vdevs"), poolname); 3832 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3833 goto out; 3834 } 3835 3836 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 3837 goto out; 3838 } 3839 ret = 0; 3840 3841 out: 3842 if (zhp) 3843 zpool_close(zhp); 3844 libzfs_fini(hdl); 3845 return (ret); 3846 } 3847