1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 25 * Copyright 2019 Joyent, Inc. 26 * Copyright 2016 Nexenta Systems, Inc. 27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 28 * Copyright (c) 2017 Datto Inc. 29 * Copyright (c) 2017, Intel Corporation. 30 */ 31 32 #include <ctype.h> 33 #include <errno.h> 34 #include <devid.h> 35 #include <fcntl.h> 36 #include <libintl.h> 37 #include <stdio.h> 38 #include <stdlib.h> 39 #include <strings.h> 40 #include <unistd.h> 41 #include <libgen.h> 42 #include <sys/efi_partition.h> 43 #include <sys/vtoc.h> 44 #include <sys/zfs_ioctl.h> 45 #include <dlfcn.h> 46 #include <libzutil.h> 47 48 #include "zfs_namecheck.h" 49 #include "zfs_prop.h" 50 #include "libzfs_impl.h" 51 #include "zfs_comutil.h" 52 #include "zfeature_common.h" 53 54 static int read_efi_label(nvlist_t *, diskaddr_t *, boolean_t *); 55 static boolean_t zpool_vdev_is_interior(const char *name); 56 57 #define BACKUP_SLICE "s2" 58 59 typedef struct prop_flags { 60 int create:1; /* Validate property on creation */ 61 int import:1; /* Validate property on import */ 62 } prop_flags_t; 63 64 /* 65 * ==================================================================== 66 * zpool property functions 67 * ==================================================================== 68 */ 69 70 static int 71 zpool_get_all_props(zpool_handle_t *zhp) 72 { 73 zfs_cmd_t zc = { 0 }; 74 libzfs_handle_t *hdl = zhp->zpool_hdl; 75 76 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 77 78 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 79 return (-1); 80 81 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 82 if (errno == ENOMEM) { 83 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 84 zcmd_free_nvlists(&zc); 85 return (-1); 86 } 87 } else { 88 zcmd_free_nvlists(&zc); 89 return (-1); 90 } 91 } 92 93 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 94 zcmd_free_nvlists(&zc); 95 return (-1); 96 } 97 98 zcmd_free_nvlists(&zc); 99 100 return (0); 101 } 102 103 static int 104 zpool_props_refresh(zpool_handle_t *zhp) 105 { 106 nvlist_t *old_props; 107 108 old_props = zhp->zpool_props; 109 110 if (zpool_get_all_props(zhp) != 0) 111 return (-1); 112 113 nvlist_free(old_props); 114 return (0); 115 } 116 117 static char * 118 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 119 zprop_source_t *src) 120 { 121 nvlist_t *nv, *nvl; 122 uint64_t ival; 123 char *value; 124 zprop_source_t source; 125 126 nvl = zhp->zpool_props; 127 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 128 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 129 source = ival; 130 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 131 } else { 132 source = ZPROP_SRC_DEFAULT; 133 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 134 value = "-"; 135 } 136 137 if (src) 138 *src = source; 139 140 return (value); 141 } 142 143 uint64_t 144 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 145 { 146 nvlist_t *nv, *nvl; 147 uint64_t value; 148 zprop_source_t source; 149 150 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 151 /* 152 * zpool_get_all_props() has most likely failed because 153 * the pool is faulted, but if all we need is the top level 154 * vdev's guid then get it from the zhp config nvlist. 155 */ 156 if ((prop == ZPOOL_PROP_GUID) && 157 (nvlist_lookup_nvlist(zhp->zpool_config, 158 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 159 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 160 == 0)) { 161 return (value); 162 } 163 return (zpool_prop_default_numeric(prop)); 164 } 165 166 nvl = zhp->zpool_props; 167 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 168 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 169 source = value; 170 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 171 } else { 172 source = ZPROP_SRC_DEFAULT; 173 value = zpool_prop_default_numeric(prop); 174 } 175 176 if (src) 177 *src = source; 178 179 return (value); 180 } 181 182 /* 183 * Map VDEV STATE to printed strings. 184 */ 185 const char * 186 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 187 { 188 switch (state) { 189 case VDEV_STATE_CLOSED: 190 case VDEV_STATE_OFFLINE: 191 return (gettext("OFFLINE")); 192 case VDEV_STATE_REMOVED: 193 return (gettext("REMOVED")); 194 case VDEV_STATE_CANT_OPEN: 195 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 196 return (gettext("FAULTED")); 197 else if (aux == VDEV_AUX_SPLIT_POOL) 198 return (gettext("SPLIT")); 199 else 200 return (gettext("UNAVAIL")); 201 case VDEV_STATE_FAULTED: 202 return (gettext("FAULTED")); 203 case VDEV_STATE_DEGRADED: 204 return (gettext("DEGRADED")); 205 case VDEV_STATE_HEALTHY: 206 return (gettext("ONLINE")); 207 208 default: 209 break; 210 } 211 212 return (gettext("UNKNOWN")); 213 } 214 215 /* 216 * Map POOL STATE to printed strings. 217 */ 218 const char * 219 zpool_pool_state_to_name(pool_state_t state) 220 { 221 switch (state) { 222 case POOL_STATE_ACTIVE: 223 return (gettext("ACTIVE")); 224 case POOL_STATE_EXPORTED: 225 return (gettext("EXPORTED")); 226 case POOL_STATE_DESTROYED: 227 return (gettext("DESTROYED")); 228 case POOL_STATE_SPARE: 229 return (gettext("SPARE")); 230 case POOL_STATE_L2CACHE: 231 return (gettext("L2CACHE")); 232 case POOL_STATE_UNINITIALIZED: 233 return (gettext("UNINITIALIZED")); 234 case POOL_STATE_UNAVAIL: 235 return (gettext("UNAVAIL")); 236 case POOL_STATE_POTENTIALLY_ACTIVE: 237 return (gettext("POTENTIALLY_ACTIVE")); 238 } 239 240 return (gettext("UNKNOWN")); 241 } 242 243 /* 244 * Get a zpool property value for 'prop' and return the value in 245 * a pre-allocated buffer. 246 */ 247 int 248 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 249 zprop_source_t *srctype, boolean_t literal) 250 { 251 uint64_t intval; 252 const char *strval; 253 zprop_source_t src = ZPROP_SRC_NONE; 254 nvlist_t *nvroot; 255 vdev_stat_t *vs; 256 uint_t vsc; 257 258 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 259 switch (prop) { 260 case ZPOOL_PROP_NAME: 261 (void) strlcpy(buf, zpool_get_name(zhp), len); 262 break; 263 264 case ZPOOL_PROP_HEALTH: 265 (void) strlcpy(buf, "FAULTED", len); 266 break; 267 268 case ZPOOL_PROP_GUID: 269 intval = zpool_get_prop_int(zhp, prop, &src); 270 (void) snprintf(buf, len, "%llu", intval); 271 break; 272 273 case ZPOOL_PROP_ALTROOT: 274 case ZPOOL_PROP_CACHEFILE: 275 case ZPOOL_PROP_COMMENT: 276 if (zhp->zpool_props != NULL || 277 zpool_get_all_props(zhp) == 0) { 278 (void) strlcpy(buf, 279 zpool_get_prop_string(zhp, prop, &src), 280 len); 281 break; 282 } 283 /* FALLTHROUGH */ 284 default: 285 (void) strlcpy(buf, "-", len); 286 break; 287 } 288 289 if (srctype != NULL) 290 *srctype = src; 291 return (0); 292 } 293 294 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 295 prop != ZPOOL_PROP_NAME) 296 return (-1); 297 298 switch (zpool_prop_get_type(prop)) { 299 case PROP_TYPE_STRING: 300 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 301 len); 302 break; 303 304 case PROP_TYPE_NUMBER: 305 intval = zpool_get_prop_int(zhp, prop, &src); 306 307 switch (prop) { 308 case ZPOOL_PROP_SIZE: 309 case ZPOOL_PROP_ALLOCATED: 310 case ZPOOL_PROP_FREE: 311 case ZPOOL_PROP_FREEING: 312 case ZPOOL_PROP_LEAKED: 313 case ZPOOL_PROP_ASHIFT: 314 if (literal) { 315 (void) snprintf(buf, len, "%llu", 316 (u_longlong_t)intval); 317 } else { 318 (void) zfs_nicenum(intval, buf, len); 319 } 320 break; 321 case ZPOOL_PROP_BOOTSIZE: 322 case ZPOOL_PROP_EXPANDSZ: 323 case ZPOOL_PROP_CHECKPOINT: 324 if (intval == 0) { 325 (void) strlcpy(buf, "-", len); 326 } else if (literal) { 327 (void) snprintf(buf, len, "%llu", 328 (u_longlong_t)intval); 329 } else { 330 (void) zfs_nicenum(intval, buf, len); 331 } 332 break; 333 case ZPOOL_PROP_CAPACITY: 334 if (literal) { 335 (void) snprintf(buf, len, "%llu", 336 (u_longlong_t)intval); 337 } else { 338 (void) snprintf(buf, len, "%llu%%", 339 (u_longlong_t)intval); 340 } 341 break; 342 case ZPOOL_PROP_FRAGMENTATION: 343 if (intval == UINT64_MAX) { 344 (void) strlcpy(buf, "-", len); 345 } else { 346 (void) snprintf(buf, len, "%llu%%", 347 (u_longlong_t)intval); 348 } 349 break; 350 case ZPOOL_PROP_DEDUPRATIO: 351 (void) snprintf(buf, len, "%llu.%02llux", 352 (u_longlong_t)(intval / 100), 353 (u_longlong_t)(intval % 100)); 354 break; 355 case ZPOOL_PROP_HEALTH: 356 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 357 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 358 verify(nvlist_lookup_uint64_array(nvroot, 359 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 360 == 0); 361 362 (void) strlcpy(buf, zpool_state_to_name(intval, 363 vs->vs_aux), len); 364 break; 365 case ZPOOL_PROP_VERSION: 366 if (intval >= SPA_VERSION_FEATURES) { 367 (void) snprintf(buf, len, "-"); 368 break; 369 } 370 /* FALLTHROUGH */ 371 default: 372 (void) snprintf(buf, len, "%llu", intval); 373 } 374 break; 375 376 case PROP_TYPE_INDEX: 377 intval = zpool_get_prop_int(zhp, prop, &src); 378 if (zpool_prop_index_to_string(prop, intval, &strval) 379 != 0) 380 return (-1); 381 (void) strlcpy(buf, strval, len); 382 break; 383 384 default: 385 abort(); 386 } 387 388 if (srctype) 389 *srctype = src; 390 391 return (0); 392 } 393 394 /* 395 * Check if the bootfs name has the same pool name as it is set to. 396 * Assuming bootfs is a valid dataset name. 397 */ 398 static boolean_t 399 bootfs_name_valid(const char *pool, const char *bootfs) 400 { 401 int len = strlen(pool); 402 if (bootfs[0] == '\0') 403 return (B_TRUE); 404 405 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 406 return (B_FALSE); 407 408 if (strncmp(pool, bootfs, len) == 0 && 409 (bootfs[len] == '/' || bootfs[len] == '\0')) 410 return (B_TRUE); 411 412 return (B_FALSE); 413 } 414 415 boolean_t 416 zpool_is_bootable(zpool_handle_t *zhp) 417 { 418 char bootfs[ZFS_MAX_DATASET_NAME_LEN]; 419 420 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 421 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 422 sizeof (bootfs)) != 0); 423 } 424 425 426 /* 427 * Given an nvlist of zpool properties to be set, validate that they are 428 * correct, and parse any numeric properties (index, boolean, etc) if they are 429 * specified as strings. 430 */ 431 static nvlist_t * 432 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 433 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 434 { 435 nvpair_t *elem; 436 nvlist_t *retprops; 437 zpool_prop_t prop; 438 char *strval; 439 uint64_t intval; 440 char *slash, *check; 441 struct stat64 statbuf; 442 zpool_handle_t *zhp; 443 444 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 445 (void) no_memory(hdl); 446 return (NULL); 447 } 448 449 elem = NULL; 450 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 451 const char *propname = nvpair_name(elem); 452 453 prop = zpool_name_to_prop(propname); 454 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) { 455 int err; 456 char *fname = strchr(propname, '@') + 1; 457 458 err = zfeature_lookup_name(fname, NULL); 459 if (err != 0) { 460 ASSERT3U(err, ==, ENOENT); 461 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 462 "invalid feature '%s', '%s'"), fname, 463 propname); 464 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 465 goto error; 466 } 467 468 if (nvpair_type(elem) != DATA_TYPE_STRING) { 469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 470 "'%s' must be a string"), propname); 471 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 472 goto error; 473 } 474 475 (void) nvpair_value_string(elem, &strval); 476 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 && 477 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) { 478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 479 "property '%s' can only be set to " 480 "'enabled' or 'disabled'"), propname); 481 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 482 goto error; 483 } 484 485 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 486 (void) no_memory(hdl); 487 goto error; 488 } 489 continue; 490 } 491 492 /* 493 * Make sure this property is valid and applies to this type. 494 */ 495 if (prop == ZPOOL_PROP_INVAL) { 496 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 497 "invalid property '%s'"), propname); 498 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 499 goto error; 500 } 501 502 if (zpool_prop_readonly(prop)) { 503 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 504 "is readonly"), propname); 505 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 506 goto error; 507 } 508 509 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 510 &strval, &intval, errbuf) != 0) 511 goto error; 512 513 /* 514 * Perform additional checking for specific properties. 515 */ 516 switch (prop) { 517 case ZPOOL_PROP_VERSION: 518 if (intval < version || 519 !SPA_VERSION_IS_SUPPORTED(intval)) { 520 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 521 "property '%s' number %d is invalid."), 522 propname, intval); 523 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 524 goto error; 525 } 526 break; 527 528 case ZPOOL_PROP_BOOTSIZE: 529 if (!flags.create) { 530 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 531 "property '%s' can only be set during pool " 532 "creation"), propname); 533 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 534 goto error; 535 } 536 break; 537 538 case ZPOOL_PROP_ASHIFT: 539 if (intval != 0 && 540 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) { 541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 542 "invalid '%s=%d' property: only values " 543 "between %" PRId32 " and %" PRId32 " " 544 "are allowed.\n"), 545 propname, intval, ASHIFT_MIN, ASHIFT_MAX); 546 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 547 goto error; 548 } 549 break; 550 551 case ZPOOL_PROP_BOOTFS: 552 if (flags.create || flags.import) { 553 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 554 "property '%s' cannot be set at creation " 555 "or import time"), propname); 556 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 557 goto error; 558 } 559 560 if (version < SPA_VERSION_BOOTFS) { 561 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 562 "pool must be upgraded to support " 563 "'%s' property"), propname); 564 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 565 goto error; 566 } 567 568 /* 569 * bootfs property value has to be a dataset name and 570 * the dataset has to be in the same pool as it sets to. 571 */ 572 if (!bootfs_name_valid(poolname, strval)) { 573 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 574 "is an invalid name"), strval); 575 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 576 goto error; 577 } 578 579 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 581 "could not open pool '%s'"), poolname); 582 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 583 goto error; 584 } 585 zpool_close(zhp); 586 break; 587 588 case ZPOOL_PROP_ALTROOT: 589 if (!flags.create && !flags.import) { 590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 591 "property '%s' can only be set during pool " 592 "creation or import"), propname); 593 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 594 goto error; 595 } 596 597 if (strval[0] != '/') { 598 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 599 "bad alternate root '%s'"), strval); 600 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 601 goto error; 602 } 603 break; 604 605 case ZPOOL_PROP_CACHEFILE: 606 if (strval[0] == '\0') 607 break; 608 609 if (strcmp(strval, "none") == 0) 610 break; 611 612 if (strval[0] != '/') { 613 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 614 "property '%s' must be empty, an " 615 "absolute path, or 'none'"), propname); 616 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 617 goto error; 618 } 619 620 slash = strrchr(strval, '/'); 621 622 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 623 strcmp(slash, "/..") == 0) { 624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 625 "'%s' is not a valid file"), strval); 626 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 627 goto error; 628 } 629 630 *slash = '\0'; 631 632 if (strval[0] != '\0' && 633 (stat64(strval, &statbuf) != 0 || 634 !S_ISDIR(statbuf.st_mode))) { 635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 636 "'%s' is not a valid directory"), 637 strval); 638 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 639 goto error; 640 } 641 642 *slash = '/'; 643 break; 644 645 case ZPOOL_PROP_COMMENT: 646 for (check = strval; *check != '\0'; check++) { 647 if (!isprint(*check)) { 648 zfs_error_aux(hdl, 649 dgettext(TEXT_DOMAIN, 650 "comment may only have printable " 651 "characters")); 652 (void) zfs_error(hdl, EZFS_BADPROP, 653 errbuf); 654 goto error; 655 } 656 } 657 if (strlen(strval) > ZPROP_MAX_COMMENT) { 658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 659 "comment must not exceed %d characters"), 660 ZPROP_MAX_COMMENT); 661 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 662 goto error; 663 } 664 break; 665 666 case ZPOOL_PROP_READONLY: 667 if (!flags.import) { 668 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 669 "property '%s' can only be set at " 670 "import time"), propname); 671 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 672 goto error; 673 } 674 break; 675 676 case ZPOOL_PROP_TNAME: 677 if (!flags.create) { 678 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 679 "property '%s' can only be set at " 680 "creation time"), propname); 681 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 682 goto error; 683 } 684 break; 685 686 case ZPOOL_PROP_MULTIHOST: 687 if (get_system_hostid() == 0) { 688 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 689 "requires a non-zero system hostid")); 690 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 691 goto error; 692 } 693 break; 694 695 default: 696 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 697 "property '%s'(%d) not defined"), propname, prop); 698 break; 699 } 700 } 701 702 return (retprops); 703 error: 704 nvlist_free(retprops); 705 return (NULL); 706 } 707 708 /* 709 * Set zpool property : propname=propval. 710 */ 711 int 712 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 713 { 714 zfs_cmd_t zc = { 0 }; 715 int ret = -1; 716 char errbuf[1024]; 717 nvlist_t *nvl = NULL; 718 nvlist_t *realprops; 719 uint64_t version; 720 prop_flags_t flags = { 0 }; 721 722 (void) snprintf(errbuf, sizeof (errbuf), 723 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 724 zhp->zpool_name); 725 726 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 727 return (no_memory(zhp->zpool_hdl)); 728 729 if (nvlist_add_string(nvl, propname, propval) != 0) { 730 nvlist_free(nvl); 731 return (no_memory(zhp->zpool_hdl)); 732 } 733 734 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 735 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 736 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 737 nvlist_free(nvl); 738 return (-1); 739 } 740 741 nvlist_free(nvl); 742 nvl = realprops; 743 744 /* 745 * Execute the corresponding ioctl() to set this property. 746 */ 747 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 748 749 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 750 nvlist_free(nvl); 751 return (-1); 752 } 753 754 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 755 756 zcmd_free_nvlists(&zc); 757 nvlist_free(nvl); 758 759 if (ret) 760 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 761 else 762 (void) zpool_props_refresh(zhp); 763 764 return (ret); 765 } 766 767 int 768 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 769 { 770 libzfs_handle_t *hdl = zhp->zpool_hdl; 771 zprop_list_t *entry; 772 char buf[ZFS_MAXPROPLEN]; 773 nvlist_t *features = NULL; 774 zprop_list_t **last; 775 boolean_t firstexpand = (NULL == *plp); 776 777 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 778 return (-1); 779 780 last = plp; 781 while (*last != NULL) 782 last = &(*last)->pl_next; 783 784 if ((*plp)->pl_all) 785 features = zpool_get_features(zhp); 786 787 if ((*plp)->pl_all && firstexpand) { 788 for (int i = 0; i < SPA_FEATURES; i++) { 789 zprop_list_t *entry = zfs_alloc(hdl, 790 sizeof (zprop_list_t)); 791 entry->pl_prop = ZPROP_INVAL; 792 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 793 spa_feature_table[i].fi_uname); 794 entry->pl_width = strlen(entry->pl_user_prop); 795 entry->pl_all = B_TRUE; 796 797 *last = entry; 798 last = &entry->pl_next; 799 } 800 } 801 802 /* add any unsupported features */ 803 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 804 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 805 char *propname; 806 boolean_t found; 807 zprop_list_t *entry; 808 809 if (zfeature_is_supported(nvpair_name(nvp))) 810 continue; 811 812 propname = zfs_asprintf(hdl, "unsupported@%s", 813 nvpair_name(nvp)); 814 815 /* 816 * Before adding the property to the list make sure that no 817 * other pool already added the same property. 818 */ 819 found = B_FALSE; 820 entry = *plp; 821 while (entry != NULL) { 822 if (entry->pl_user_prop != NULL && 823 strcmp(propname, entry->pl_user_prop) == 0) { 824 found = B_TRUE; 825 break; 826 } 827 entry = entry->pl_next; 828 } 829 if (found) { 830 free(propname); 831 continue; 832 } 833 834 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 835 entry->pl_prop = ZPROP_INVAL; 836 entry->pl_user_prop = propname; 837 entry->pl_width = strlen(entry->pl_user_prop); 838 entry->pl_all = B_TRUE; 839 840 *last = entry; 841 last = &entry->pl_next; 842 } 843 844 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 845 846 if (entry->pl_fixed) 847 continue; 848 849 if (entry->pl_prop != ZPROP_INVAL && 850 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 851 NULL, B_FALSE) == 0) { 852 if (strlen(buf) > entry->pl_width) 853 entry->pl_width = strlen(buf); 854 } 855 } 856 857 return (0); 858 } 859 860 /* 861 * Get the state for the given feature on the given ZFS pool. 862 */ 863 int 864 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 865 size_t len) 866 { 867 uint64_t refcount; 868 boolean_t found = B_FALSE; 869 nvlist_t *features = zpool_get_features(zhp); 870 boolean_t supported; 871 const char *feature = strchr(propname, '@') + 1; 872 873 supported = zpool_prop_feature(propname); 874 ASSERT(supported || zpool_prop_unsupported(propname)); 875 876 /* 877 * Convert from feature name to feature guid. This conversion is 878 * unecessary for unsupported@... properties because they already 879 * use guids. 880 */ 881 if (supported) { 882 int ret; 883 spa_feature_t fid; 884 885 ret = zfeature_lookup_name(feature, &fid); 886 if (ret != 0) { 887 (void) strlcpy(buf, "-", len); 888 return (ENOTSUP); 889 } 890 feature = spa_feature_table[fid].fi_guid; 891 } 892 893 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 894 found = B_TRUE; 895 896 if (supported) { 897 if (!found) { 898 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 899 } else { 900 if (refcount == 0) 901 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 902 else 903 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 904 } 905 } else { 906 if (found) { 907 if (refcount == 0) { 908 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 909 } else { 910 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 911 } 912 } else { 913 (void) strlcpy(buf, "-", len); 914 return (ENOTSUP); 915 } 916 } 917 918 return (0); 919 } 920 921 /* 922 * Don't start the slice at the default block of 34; many storage 923 * devices will use a stripe width of 128k, so start there instead. 924 */ 925 #define NEW_START_BLOCK 256 926 927 /* 928 * Validate the given pool name, optionally putting an extended error message in 929 * 'buf'. 930 */ 931 boolean_t 932 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 933 { 934 namecheck_err_t why; 935 char what; 936 int ret; 937 938 ret = pool_namecheck(pool, &why, &what); 939 940 /* 941 * The rules for reserved pool names were extended at a later point. 942 * But we need to support users with existing pools that may now be 943 * invalid. So we only check for this expanded set of names during a 944 * create (or import), and only in userland. 945 */ 946 if (ret == 0 && !isopen && 947 (strncmp(pool, "mirror", 6) == 0 || 948 strncmp(pool, "raidz", 5) == 0 || 949 strncmp(pool, "spare", 5) == 0 || 950 strcmp(pool, "log") == 0)) { 951 if (hdl != NULL) 952 zfs_error_aux(hdl, 953 dgettext(TEXT_DOMAIN, "name is reserved")); 954 return (B_FALSE); 955 } 956 957 958 if (ret != 0) { 959 if (hdl != NULL) { 960 switch (why) { 961 case NAME_ERR_TOOLONG: 962 zfs_error_aux(hdl, 963 dgettext(TEXT_DOMAIN, "name is too long")); 964 break; 965 966 case NAME_ERR_INVALCHAR: 967 zfs_error_aux(hdl, 968 dgettext(TEXT_DOMAIN, "invalid character " 969 "'%c' in pool name"), what); 970 break; 971 972 case NAME_ERR_NOLETTER: 973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 974 "name must begin with a letter")); 975 break; 976 977 case NAME_ERR_RESERVED: 978 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 979 "name is reserved")); 980 break; 981 982 case NAME_ERR_DISKLIKE: 983 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 984 "pool name is reserved")); 985 break; 986 987 case NAME_ERR_LEADING_SLASH: 988 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 989 "leading slash in name")); 990 break; 991 992 case NAME_ERR_EMPTY_COMPONENT: 993 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 994 "empty component in name")); 995 break; 996 997 case NAME_ERR_TRAILING_SLASH: 998 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 999 "trailing slash in name")); 1000 break; 1001 1002 case NAME_ERR_MULTIPLE_DELIMITERS: 1003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1004 "multiple '@' and/or '#' delimiters in " 1005 "name")); 1006 break; 1007 1008 default: 1009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1010 "(%d) not defined"), why); 1011 break; 1012 } 1013 } 1014 return (B_FALSE); 1015 } 1016 1017 return (B_TRUE); 1018 } 1019 1020 /* 1021 * Open a handle to the given pool, even if the pool is currently in the FAULTED 1022 * state. 1023 */ 1024 zpool_handle_t * 1025 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 1026 { 1027 zpool_handle_t *zhp; 1028 boolean_t missing; 1029 1030 /* 1031 * Make sure the pool name is valid. 1032 */ 1033 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 1034 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1035 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1036 pool); 1037 return (NULL); 1038 } 1039 1040 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1041 return (NULL); 1042 1043 zhp->zpool_hdl = hdl; 1044 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1045 1046 if (zpool_refresh_stats(zhp, &missing) != 0) { 1047 zpool_close(zhp); 1048 return (NULL); 1049 } 1050 1051 if (missing) { 1052 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1053 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1054 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1055 zpool_close(zhp); 1056 return (NULL); 1057 } 1058 1059 return (zhp); 1060 } 1061 1062 /* 1063 * Like the above, but silent on error. Used when iterating over pools (because 1064 * the configuration cache may be out of date). 1065 */ 1066 int 1067 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1068 { 1069 zpool_handle_t *zhp; 1070 boolean_t missing; 1071 1072 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1073 return (-1); 1074 1075 zhp->zpool_hdl = hdl; 1076 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1077 1078 if (zpool_refresh_stats(zhp, &missing) != 0) { 1079 zpool_close(zhp); 1080 return (-1); 1081 } 1082 1083 if (missing) { 1084 zpool_close(zhp); 1085 *ret = NULL; 1086 return (0); 1087 } 1088 1089 *ret = zhp; 1090 return (0); 1091 } 1092 1093 /* 1094 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1095 * state. 1096 */ 1097 zpool_handle_t * 1098 zpool_open(libzfs_handle_t *hdl, const char *pool) 1099 { 1100 zpool_handle_t *zhp; 1101 1102 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1103 return (NULL); 1104 1105 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1106 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1107 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1108 zpool_close(zhp); 1109 return (NULL); 1110 } 1111 1112 return (zhp); 1113 } 1114 1115 /* 1116 * Close the handle. Simply frees the memory associated with the handle. 1117 */ 1118 void 1119 zpool_close(zpool_handle_t *zhp) 1120 { 1121 nvlist_free(zhp->zpool_config); 1122 nvlist_free(zhp->zpool_old_config); 1123 nvlist_free(zhp->zpool_props); 1124 free(zhp); 1125 } 1126 1127 /* 1128 * Return the name of the pool. 1129 */ 1130 const char * 1131 zpool_get_name(zpool_handle_t *zhp) 1132 { 1133 return (zhp->zpool_name); 1134 } 1135 1136 1137 /* 1138 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1139 */ 1140 int 1141 zpool_get_state(zpool_handle_t *zhp) 1142 { 1143 return (zhp->zpool_state); 1144 } 1145 1146 /* 1147 * Check if vdev list contains a special vdev 1148 */ 1149 static boolean_t 1150 zpool_has_special_vdev(nvlist_t *nvroot) 1151 { 1152 nvlist_t **child; 1153 uint_t children; 1154 1155 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child, 1156 &children) == 0) { 1157 for (uint_t c = 0; c < children; c++) { 1158 char *bias; 1159 1160 if (nvlist_lookup_string(child[c], 1161 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 && 1162 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) { 1163 return (B_TRUE); 1164 } 1165 } 1166 } 1167 return (B_FALSE); 1168 } 1169 1170 /* 1171 * Create the named pool, using the provided vdev list. It is assumed 1172 * that the consumer has already validated the contents of the nvlist, so we 1173 * don't have to worry about error semantics. 1174 */ 1175 int 1176 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1177 nvlist_t *props, nvlist_t *fsprops) 1178 { 1179 zfs_cmd_t zc = { 0 }; 1180 nvlist_t *zc_fsprops = NULL; 1181 nvlist_t *zc_props = NULL; 1182 nvlist_t *hidden_args = NULL; 1183 uint8_t *wkeydata = NULL; 1184 uint_t wkeylen = 0; 1185 char msg[1024]; 1186 int ret = -1; 1187 1188 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1189 "cannot create '%s'"), pool); 1190 1191 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1192 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1193 1194 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1195 return (-1); 1196 1197 if (props) { 1198 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1199 1200 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1201 SPA_VERSION_1, flags, msg)) == NULL) { 1202 goto create_failed; 1203 } 1204 } 1205 1206 if (fsprops) { 1207 uint64_t zoned; 1208 char *zonestr; 1209 1210 zoned = ((nvlist_lookup_string(fsprops, 1211 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1212 strcmp(zonestr, "on") == 0); 1213 1214 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1215 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) { 1216 goto create_failed; 1217 } 1218 1219 if (nvlist_exists(zc_fsprops, 1220 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) && 1221 !zpool_has_special_vdev(nvroot)) { 1222 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1223 "%s property requires a special vdev"), 1224 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)); 1225 (void) zfs_error(hdl, EZFS_BADPROP, msg); 1226 goto create_failed; 1227 } 1228 1229 if (!zc_props && 1230 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1231 goto create_failed; 1232 } 1233 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE, 1234 &wkeydata, &wkeylen) != 0) { 1235 (void) zfs_error(hdl, EZFS_CRYPTOFAILED, msg); 1236 goto create_failed; 1237 } 1238 if (nvlist_add_nvlist(zc_props, 1239 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1240 goto create_failed; 1241 } 1242 if (wkeydata != NULL) { 1243 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0) 1244 goto create_failed; 1245 1246 if (nvlist_add_uint8_array(hidden_args, "wkeydata", 1247 wkeydata, wkeylen) != 0) 1248 goto create_failed; 1249 1250 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS, 1251 hidden_args) != 0) 1252 goto create_failed; 1253 } 1254 } 1255 1256 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1257 goto create_failed; 1258 1259 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1260 1261 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1262 1263 zcmd_free_nvlists(&zc); 1264 nvlist_free(zc_props); 1265 nvlist_free(zc_fsprops); 1266 nvlist_free(hidden_args); 1267 if (wkeydata != NULL) 1268 free(wkeydata); 1269 1270 switch (errno) { 1271 case EBUSY: 1272 /* 1273 * This can happen if the user has specified the same 1274 * device multiple times. We can't reliably detect this 1275 * until we try to add it and see we already have a 1276 * label. 1277 */ 1278 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1279 "one or more vdevs refer to the same device")); 1280 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1281 1282 case ERANGE: 1283 /* 1284 * This happens if the record size is smaller or larger 1285 * than the allowed size range, or not a power of 2. 1286 * 1287 * NOTE: although zfs_valid_proplist is called earlier, 1288 * this case may have slipped through since the 1289 * pool does not exist yet and it is therefore 1290 * impossible to read properties e.g. max blocksize 1291 * from the pool. 1292 */ 1293 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1294 "record size invalid")); 1295 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1296 1297 case EOVERFLOW: 1298 /* 1299 * This occurs when one of the devices is below 1300 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1301 * device was the problem device since there's no 1302 * reliable way to determine device size from userland. 1303 */ 1304 { 1305 char buf[64]; 1306 1307 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1308 1309 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1310 "one or more devices is less than the " 1311 "minimum size (%s)"), buf); 1312 } 1313 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1314 1315 case ENOSPC: 1316 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1317 "one or more devices is out of space")); 1318 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1319 1320 default: 1321 return (zpool_standard_error(hdl, errno, msg)); 1322 } 1323 } 1324 1325 create_failed: 1326 zcmd_free_nvlists(&zc); 1327 nvlist_free(zc_props); 1328 nvlist_free(zc_fsprops); 1329 nvlist_free(hidden_args); 1330 if (wkeydata != NULL) 1331 free(wkeydata); 1332 return (ret); 1333 } 1334 1335 /* 1336 * Destroy the given pool. It is up to the caller to ensure that there are no 1337 * datasets left in the pool. 1338 */ 1339 int 1340 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1341 { 1342 zfs_cmd_t zc = { 0 }; 1343 zfs_handle_t *zfp = NULL; 1344 libzfs_handle_t *hdl = zhp->zpool_hdl; 1345 char msg[1024]; 1346 1347 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1348 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1349 return (-1); 1350 1351 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1352 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1353 1354 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1355 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1356 "cannot destroy '%s'"), zhp->zpool_name); 1357 1358 if (errno == EROFS) { 1359 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1360 "one or more devices is read only")); 1361 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1362 } else { 1363 (void) zpool_standard_error(hdl, errno, msg); 1364 } 1365 1366 if (zfp) 1367 zfs_close(zfp); 1368 return (-1); 1369 } 1370 1371 if (zfp) { 1372 remove_mountpoint(zfp); 1373 zfs_close(zfp); 1374 } 1375 1376 return (0); 1377 } 1378 1379 /* 1380 * Create a checkpoint in the given pool. 1381 */ 1382 int 1383 zpool_checkpoint(zpool_handle_t *zhp) 1384 { 1385 libzfs_handle_t *hdl = zhp->zpool_hdl; 1386 char msg[1024]; 1387 int error; 1388 1389 error = lzc_pool_checkpoint(zhp->zpool_name); 1390 if (error != 0) { 1391 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1392 "cannot checkpoint '%s'"), zhp->zpool_name); 1393 (void) zpool_standard_error(hdl, error, msg); 1394 return (-1); 1395 } 1396 1397 return (0); 1398 } 1399 1400 /* 1401 * Discard the checkpoint from the given pool. 1402 */ 1403 int 1404 zpool_discard_checkpoint(zpool_handle_t *zhp) 1405 { 1406 libzfs_handle_t *hdl = zhp->zpool_hdl; 1407 char msg[1024]; 1408 int error; 1409 1410 error = lzc_pool_checkpoint_discard(zhp->zpool_name); 1411 if (error != 0) { 1412 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1413 "cannot discard checkpoint in '%s'"), zhp->zpool_name); 1414 (void) zpool_standard_error(hdl, error, msg); 1415 return (-1); 1416 } 1417 1418 return (0); 1419 } 1420 1421 /* 1422 * Add the given vdevs to the pool. The caller must have already performed the 1423 * necessary verification to ensure that the vdev specification is well-formed. 1424 */ 1425 int 1426 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1427 { 1428 zfs_cmd_t zc = { 0 }; 1429 int ret; 1430 libzfs_handle_t *hdl = zhp->zpool_hdl; 1431 char msg[1024]; 1432 nvlist_t **spares, **l2cache; 1433 uint_t nspares, nl2cache; 1434 1435 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1436 "cannot add to '%s'"), zhp->zpool_name); 1437 1438 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1439 SPA_VERSION_SPARES && 1440 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1441 &spares, &nspares) == 0) { 1442 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1443 "upgraded to add hot spares")); 1444 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1445 } 1446 1447 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1448 SPA_VERSION_L2CACHE && 1449 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1450 &l2cache, &nl2cache) == 0) { 1451 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1452 "upgraded to add cache devices")); 1453 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1454 } 1455 1456 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1457 return (-1); 1458 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1459 1460 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1461 switch (errno) { 1462 case EBUSY: 1463 /* 1464 * This can happen if the user has specified the same 1465 * device multiple times. We can't reliably detect this 1466 * until we try to add it and see we already have a 1467 * label. 1468 */ 1469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1470 "one or more vdevs refer to the same device")); 1471 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1472 break; 1473 1474 case EINVAL: 1475 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1476 "invalid config; a pool with removing/removed " 1477 "vdevs does not support adding raidz vdevs")); 1478 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1479 break; 1480 1481 case EOVERFLOW: 1482 /* 1483 * This occurrs when one of the devices is below 1484 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1485 * device was the problem device since there's no 1486 * reliable way to determine device size from userland. 1487 */ 1488 { 1489 char buf[64]; 1490 1491 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1492 1493 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1494 "device is less than the minimum " 1495 "size (%s)"), buf); 1496 } 1497 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1498 break; 1499 1500 case ENOTSUP: 1501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1502 "pool must be upgraded to add these vdevs")); 1503 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1504 break; 1505 1506 default: 1507 (void) zpool_standard_error(hdl, errno, msg); 1508 } 1509 1510 ret = -1; 1511 } else { 1512 ret = 0; 1513 } 1514 1515 zcmd_free_nvlists(&zc); 1516 1517 return (ret); 1518 } 1519 1520 /* 1521 * Exports the pool from the system. The caller must ensure that there are no 1522 * mounted datasets in the pool. 1523 */ 1524 static int 1525 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1526 const char *log_str) 1527 { 1528 zfs_cmd_t zc = { 0 }; 1529 char msg[1024]; 1530 1531 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1532 "cannot export '%s'"), zhp->zpool_name); 1533 1534 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1535 zc.zc_cookie = force; 1536 zc.zc_guid = hardforce; 1537 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1538 1539 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1540 switch (errno) { 1541 case EXDEV: 1542 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1543 "use '-f' to override the following errors:\n" 1544 "'%s' has an active shared spare which could be" 1545 " used by other pools once '%s' is exported."), 1546 zhp->zpool_name, zhp->zpool_name); 1547 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1548 msg)); 1549 default: 1550 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1551 msg)); 1552 } 1553 } 1554 1555 return (0); 1556 } 1557 1558 int 1559 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1560 { 1561 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1562 } 1563 1564 int 1565 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1566 { 1567 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1568 } 1569 1570 static void 1571 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1572 nvlist_t *config) 1573 { 1574 nvlist_t *nv = NULL; 1575 uint64_t rewindto; 1576 int64_t loss = -1; 1577 struct tm t; 1578 char timestr[128]; 1579 1580 if (!hdl->libzfs_printerr || config == NULL) 1581 return; 1582 1583 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1584 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1585 return; 1586 } 1587 1588 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1589 return; 1590 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1591 1592 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1593 strftime(timestr, 128, 0, &t) != 0) { 1594 if (dryrun) { 1595 (void) printf(dgettext(TEXT_DOMAIN, 1596 "Would be able to return %s " 1597 "to its state as of %s.\n"), 1598 name, timestr); 1599 } else { 1600 (void) printf(dgettext(TEXT_DOMAIN, 1601 "Pool %s returned to its state as of %s.\n"), 1602 name, timestr); 1603 } 1604 if (loss > 120) { 1605 (void) printf(dgettext(TEXT_DOMAIN, 1606 "%s approximately %lld "), 1607 dryrun ? "Would discard" : "Discarded", 1608 (loss + 30) / 60); 1609 (void) printf(dgettext(TEXT_DOMAIN, 1610 "minutes of transactions.\n")); 1611 } else if (loss > 0) { 1612 (void) printf(dgettext(TEXT_DOMAIN, 1613 "%s approximately %lld "), 1614 dryrun ? "Would discard" : "Discarded", loss); 1615 (void) printf(dgettext(TEXT_DOMAIN, 1616 "seconds of transactions.\n")); 1617 } 1618 } 1619 } 1620 1621 void 1622 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1623 nvlist_t *config) 1624 { 1625 nvlist_t *nv = NULL; 1626 int64_t loss = -1; 1627 uint64_t edata = UINT64_MAX; 1628 uint64_t rewindto; 1629 struct tm t; 1630 char timestr[128]; 1631 1632 if (!hdl->libzfs_printerr) 1633 return; 1634 1635 if (reason >= 0) 1636 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1637 else 1638 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1639 1640 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1641 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1642 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1643 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1644 goto no_info; 1645 1646 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1647 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1648 &edata); 1649 1650 (void) printf(dgettext(TEXT_DOMAIN, 1651 "Recovery is possible, but will result in some data loss.\n")); 1652 1653 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1654 strftime(timestr, 128, 0, &t) != 0) { 1655 (void) printf(dgettext(TEXT_DOMAIN, 1656 "\tReturning the pool to its state as of %s\n" 1657 "\tshould correct the problem. "), 1658 timestr); 1659 } else { 1660 (void) printf(dgettext(TEXT_DOMAIN, 1661 "\tReverting the pool to an earlier state " 1662 "should correct the problem.\n\t")); 1663 } 1664 1665 if (loss > 120) { 1666 (void) printf(dgettext(TEXT_DOMAIN, 1667 "Approximately %lld minutes of data\n" 1668 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1669 } else if (loss > 0) { 1670 (void) printf(dgettext(TEXT_DOMAIN, 1671 "Approximately %lld seconds of data\n" 1672 "\tmust be discarded, irreversibly. "), loss); 1673 } 1674 if (edata != 0 && edata != UINT64_MAX) { 1675 if (edata == 1) { 1676 (void) printf(dgettext(TEXT_DOMAIN, 1677 "After rewind, at least\n" 1678 "\tone persistent user-data error will remain. ")); 1679 } else { 1680 (void) printf(dgettext(TEXT_DOMAIN, 1681 "After rewind, several\n" 1682 "\tpersistent user-data errors will remain. ")); 1683 } 1684 } 1685 (void) printf(dgettext(TEXT_DOMAIN, 1686 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1687 reason >= 0 ? "clear" : "import", name); 1688 1689 (void) printf(dgettext(TEXT_DOMAIN, 1690 "A scrub of the pool\n" 1691 "\tis strongly recommended after recovery.\n")); 1692 return; 1693 1694 no_info: 1695 (void) printf(dgettext(TEXT_DOMAIN, 1696 "Destroy and re-create the pool from\n\ta backup source.\n")); 1697 } 1698 1699 /* 1700 * zpool_import() is a contracted interface. Should be kept the same 1701 * if possible. 1702 * 1703 * Applications should use zpool_import_props() to import a pool with 1704 * new properties value to be set. 1705 */ 1706 int 1707 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1708 char *altroot) 1709 { 1710 nvlist_t *props = NULL; 1711 int ret; 1712 1713 if (altroot != NULL) { 1714 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1715 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1716 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1717 newname)); 1718 } 1719 1720 if (nvlist_add_string(props, 1721 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1722 nvlist_add_string(props, 1723 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1724 nvlist_free(props); 1725 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1726 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1727 newname)); 1728 } 1729 } 1730 1731 ret = zpool_import_props(hdl, config, newname, props, 1732 ZFS_IMPORT_NORMAL); 1733 nvlist_free(props); 1734 return (ret); 1735 } 1736 1737 static void 1738 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1739 int indent) 1740 { 1741 nvlist_t **child; 1742 uint_t c, children; 1743 char *vname; 1744 uint64_t is_log = 0; 1745 1746 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1747 &is_log); 1748 1749 if (name != NULL) 1750 (void) printf("\t%*s%s%s\n", indent, "", name, 1751 is_log ? " [log]" : ""); 1752 1753 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1754 &child, &children) != 0) 1755 return; 1756 1757 for (c = 0; c < children; c++) { 1758 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID); 1759 print_vdev_tree(hdl, vname, child[c], indent + 2); 1760 free(vname); 1761 } 1762 } 1763 1764 void 1765 zpool_print_unsup_feat(nvlist_t *config) 1766 { 1767 nvlist_t *nvinfo, *unsup_feat; 1768 1769 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1770 0); 1771 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1772 &unsup_feat) == 0); 1773 1774 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1775 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1776 char *desc; 1777 1778 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1779 verify(nvpair_value_string(nvp, &desc) == 0); 1780 1781 if (strlen(desc) > 0) 1782 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1783 else 1784 (void) printf("\t%s\n", nvpair_name(nvp)); 1785 } 1786 } 1787 1788 /* 1789 * Import the given pool using the known configuration and a list of 1790 * properties to be set. The configuration should have come from 1791 * zpool_find_import(). The 'newname' parameters control whether the pool 1792 * is imported with a different name. 1793 */ 1794 int 1795 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1796 nvlist_t *props, int flags) 1797 { 1798 zfs_cmd_t zc = { 0 }; 1799 zpool_load_policy_t policy; 1800 nvlist_t *nv = NULL; 1801 nvlist_t *nvinfo = NULL; 1802 nvlist_t *missing = NULL; 1803 char *thename; 1804 char *origname; 1805 int ret; 1806 int error = 0; 1807 char errbuf[1024]; 1808 1809 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1810 &origname) == 0); 1811 1812 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1813 "cannot import pool '%s'"), origname); 1814 1815 if (newname != NULL) { 1816 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1817 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1818 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1819 newname)); 1820 thename = (char *)newname; 1821 } else { 1822 thename = origname; 1823 } 1824 1825 if (props != NULL) { 1826 uint64_t version; 1827 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1828 1829 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1830 &version) == 0); 1831 1832 if ((props = zpool_valid_proplist(hdl, origname, 1833 props, version, flags, errbuf)) == NULL) 1834 return (-1); 1835 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1836 nvlist_free(props); 1837 return (-1); 1838 } 1839 nvlist_free(props); 1840 } 1841 1842 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1843 1844 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1845 &zc.zc_guid) == 0); 1846 1847 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1848 zcmd_free_nvlists(&zc); 1849 return (-1); 1850 } 1851 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1852 zcmd_free_nvlists(&zc); 1853 return (-1); 1854 } 1855 1856 zc.zc_cookie = flags; 1857 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1858 errno == ENOMEM) { 1859 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1860 zcmd_free_nvlists(&zc); 1861 return (-1); 1862 } 1863 } 1864 if (ret != 0) 1865 error = errno; 1866 1867 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1868 1869 zcmd_free_nvlists(&zc); 1870 1871 zpool_get_load_policy(config, &policy); 1872 1873 if (error) { 1874 char desc[1024]; 1875 char aux[256]; 1876 1877 /* 1878 * Dry-run failed, but we print out what success 1879 * looks like if we found a best txg 1880 */ 1881 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) { 1882 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1883 B_TRUE, nv); 1884 nvlist_free(nv); 1885 return (-1); 1886 } 1887 1888 if (newname == NULL) 1889 (void) snprintf(desc, sizeof (desc), 1890 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1891 thename); 1892 else 1893 (void) snprintf(desc, sizeof (desc), 1894 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1895 origname, thename); 1896 1897 switch (error) { 1898 case ENOTSUP: 1899 if (nv != NULL && nvlist_lookup_nvlist(nv, 1900 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1901 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1902 (void) printf(dgettext(TEXT_DOMAIN, "This " 1903 "pool uses the following feature(s) not " 1904 "supported by this system:\n")); 1905 zpool_print_unsup_feat(nv); 1906 if (nvlist_exists(nvinfo, 1907 ZPOOL_CONFIG_CAN_RDONLY)) { 1908 (void) printf(dgettext(TEXT_DOMAIN, 1909 "All unsupported features are only " 1910 "required for writing to the pool." 1911 "\nThe pool can be imported using " 1912 "'-o readonly=on'.\n")); 1913 } 1914 } 1915 /* 1916 * Unsupported version. 1917 */ 1918 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1919 break; 1920 1921 case EREMOTEIO: 1922 if (nv != NULL && nvlist_lookup_nvlist(nv, 1923 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) { 1924 char *hostname = "<unknown>"; 1925 uint64_t hostid = 0; 1926 mmp_state_t mmp_state; 1927 1928 mmp_state = fnvlist_lookup_uint64(nvinfo, 1929 ZPOOL_CONFIG_MMP_STATE); 1930 1931 if (nvlist_exists(nvinfo, 1932 ZPOOL_CONFIG_MMP_HOSTNAME)) 1933 hostname = fnvlist_lookup_string(nvinfo, 1934 ZPOOL_CONFIG_MMP_HOSTNAME); 1935 1936 if (nvlist_exists(nvinfo, 1937 ZPOOL_CONFIG_MMP_HOSTID)) 1938 hostid = fnvlist_lookup_uint64(nvinfo, 1939 ZPOOL_CONFIG_MMP_HOSTID); 1940 1941 if (mmp_state == MMP_STATE_ACTIVE) { 1942 (void) snprintf(aux, sizeof (aux), 1943 dgettext(TEXT_DOMAIN, "pool is imp" 1944 "orted on host '%s' (hostid=%lx).\n" 1945 "Export the pool on the other " 1946 "system, then run 'zpool import'."), 1947 hostname, (unsigned long) hostid); 1948 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 1949 (void) snprintf(aux, sizeof (aux), 1950 dgettext(TEXT_DOMAIN, "pool has " 1951 "the multihost property on and " 1952 "the\nsystem's hostid is not " 1953 "set.\n")); 1954 } 1955 1956 (void) zfs_error_aux(hdl, aux); 1957 } 1958 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc); 1959 break; 1960 1961 case EINVAL: 1962 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1963 break; 1964 1965 case EROFS: 1966 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1967 "one or more devices is read only")); 1968 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1969 break; 1970 1971 case ENXIO: 1972 if (nv && nvlist_lookup_nvlist(nv, 1973 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1974 nvlist_lookup_nvlist(nvinfo, 1975 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1976 (void) printf(dgettext(TEXT_DOMAIN, 1977 "The devices below are missing or " 1978 "corrupted, use '-m' to import the pool " 1979 "anyway:\n")); 1980 print_vdev_tree(hdl, NULL, missing, 2); 1981 (void) printf("\n"); 1982 } 1983 (void) zpool_standard_error(hdl, error, desc); 1984 break; 1985 1986 case EEXIST: 1987 (void) zpool_standard_error(hdl, error, desc); 1988 break; 1989 case ENAMETOOLONG: 1990 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1991 "new name of at least one dataset is longer than " 1992 "the maximum allowable length")); 1993 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 1994 break; 1995 default: 1996 (void) zpool_standard_error(hdl, error, desc); 1997 zpool_explain_recover(hdl, 1998 newname ? origname : thename, -error, nv); 1999 break; 2000 } 2001 2002 nvlist_free(nv); 2003 ret = -1; 2004 } else { 2005 zpool_handle_t *zhp; 2006 2007 /* 2008 * This should never fail, but play it safe anyway. 2009 */ 2010 if (zpool_open_silent(hdl, thename, &zhp) != 0) 2011 ret = -1; 2012 else if (zhp != NULL) 2013 zpool_close(zhp); 2014 if (policy.zlp_rewind & 2015 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2016 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2017 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv); 2018 } 2019 nvlist_free(nv); 2020 return (0); 2021 } 2022 2023 return (ret); 2024 } 2025 2026 /* 2027 * Translate vdev names to guids. If a vdev_path is determined to be 2028 * unsuitable then a vd_errlist is allocated and the vdev path and errno 2029 * are added to it. 2030 */ 2031 static int 2032 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds, 2033 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist) 2034 { 2035 nvlist_t *errlist = NULL; 2036 int error = 0; 2037 2038 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL; 2039 elem = nvlist_next_nvpair(vds, elem)) { 2040 boolean_t spare, cache; 2041 2042 char *vd_path = nvpair_name(elem); 2043 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache, 2044 NULL); 2045 2046 if ((tgt == NULL) || cache || spare) { 2047 if (errlist == NULL) { 2048 errlist = fnvlist_alloc(); 2049 error = EINVAL; 2050 } 2051 2052 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE : 2053 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE); 2054 fnvlist_add_int64(errlist, vd_path, err); 2055 continue; 2056 } 2057 2058 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 2059 fnvlist_add_uint64(vdev_guids, vd_path, guid); 2060 2061 char msg[MAXNAMELEN]; 2062 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid); 2063 fnvlist_add_string(guids_to_paths, msg, vd_path); 2064 } 2065 2066 if (error != 0) { 2067 verify(errlist != NULL); 2068 if (vd_errlist != NULL) 2069 *vd_errlist = errlist; 2070 else 2071 fnvlist_free(errlist); 2072 } 2073 2074 return (error); 2075 } 2076 2077 /* 2078 * Scan the pool. 2079 */ 2080 int 2081 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) 2082 { 2083 zfs_cmd_t zc = { 0 }; 2084 char msg[1024]; 2085 int err; 2086 libzfs_handle_t *hdl = zhp->zpool_hdl; 2087 2088 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2089 zc.zc_cookie = func; 2090 zc.zc_flags = cmd; 2091 2092 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0) 2093 return (0); 2094 2095 err = errno; 2096 2097 /* ECANCELED on a scrub means we resumed a paused scrub */ 2098 if (err == ECANCELED && func == POOL_SCAN_SCRUB && 2099 cmd == POOL_SCRUB_NORMAL) 2100 return (0); 2101 2102 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL) 2103 return (0); 2104 2105 if (func == POOL_SCAN_SCRUB) { 2106 if (cmd == POOL_SCRUB_PAUSE) { 2107 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2108 "cannot pause scrubbing %s"), zc.zc_name); 2109 } else { 2110 assert(cmd == POOL_SCRUB_NORMAL); 2111 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2112 "cannot scrub %s"), zc.zc_name); 2113 } 2114 } else if (func == POOL_SCAN_RESILVER) { 2115 assert(cmd == POOL_SCRUB_NORMAL); 2116 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2117 "cannot restart resilver on %s"), zc.zc_name); 2118 } else if (func == POOL_SCAN_NONE) { 2119 (void) snprintf(msg, sizeof (msg), 2120 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 2121 zc.zc_name); 2122 } else { 2123 assert(!"unexpected result"); 2124 } 2125 2126 if (err == EBUSY) { 2127 nvlist_t *nvroot; 2128 pool_scan_stat_t *ps = NULL; 2129 uint_t psc; 2130 2131 verify(nvlist_lookup_nvlist(zhp->zpool_config, 2132 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 2133 (void) nvlist_lookup_uint64_array(nvroot, 2134 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 2135 if (ps && ps->pss_func == POOL_SCAN_SCRUB) { 2136 if (cmd == POOL_SCRUB_PAUSE) 2137 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg)); 2138 else 2139 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 2140 } else { 2141 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 2142 } 2143 } else if (err == ENOENT) { 2144 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 2145 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) { 2146 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg)); 2147 } else { 2148 return (zpool_standard_error(hdl, err, msg)); 2149 } 2150 } 2151 2152 static int 2153 xlate_init_err(int err) 2154 { 2155 switch (err) { 2156 case ENODEV: 2157 return (EZFS_NODEVICE); 2158 case EINVAL: 2159 case EROFS: 2160 return (EZFS_BADDEV); 2161 case EBUSY: 2162 return (EZFS_INITIALIZING); 2163 case ESRCH: 2164 return (EZFS_NO_INITIALIZE); 2165 } 2166 return (err); 2167 } 2168 2169 /* 2170 * Begin, suspend, or cancel the initialization (initializing of all free 2171 * blocks) for the given vdevs in the given pool. 2172 */ 2173 int 2174 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2175 nvlist_t *vds) 2176 { 2177 char msg[1024]; 2178 int err; 2179 2180 nvlist_t *vdev_guids = fnvlist_alloc(); 2181 nvlist_t *guids_to_paths = fnvlist_alloc(); 2182 nvlist_t *vd_errlist = NULL; 2183 nvlist_t *errlist; 2184 nvpair_t *elem; 2185 2186 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2187 guids_to_paths, &vd_errlist); 2188 2189 if (err == 0) { 2190 err = lzc_initialize(zhp->zpool_name, cmd_type, 2191 vdev_guids, &errlist); 2192 if (err == 0) { 2193 fnvlist_free(vdev_guids); 2194 fnvlist_free(guids_to_paths); 2195 return (0); 2196 } 2197 2198 if (errlist != NULL) { 2199 vd_errlist = fnvlist_lookup_nvlist(errlist, 2200 ZPOOL_INITIALIZE_VDEVS); 2201 } 2202 2203 (void) snprintf(msg, sizeof (msg), 2204 dgettext(TEXT_DOMAIN, "operation failed")); 2205 } else { 2206 verify(vd_errlist != NULL); 2207 } 2208 2209 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL; 2210 elem = nvlist_next_nvpair(vd_errlist, elem)) { 2211 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem)); 2212 char *path; 2213 2214 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2215 &path) != 0) 2216 path = nvpair_name(elem); 2217 2218 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2219 "cannot initialize '%s'", path); 2220 } 2221 2222 fnvlist_free(vdev_guids); 2223 fnvlist_free(guids_to_paths); 2224 2225 if (vd_errlist != NULL) { 2226 fnvlist_free(vd_errlist); 2227 return (-1); 2228 } 2229 2230 return (zpool_standard_error(zhp->zpool_hdl, err, msg)); 2231 } 2232 2233 static int 2234 xlate_trim_err(int err) 2235 { 2236 switch (err) { 2237 case ENODEV: 2238 return (EZFS_NODEVICE); 2239 case EINVAL: 2240 case EROFS: 2241 return (EZFS_BADDEV); 2242 case EBUSY: 2243 return (EZFS_TRIMMING); 2244 case ESRCH: 2245 return (EZFS_NO_TRIM); 2246 case EOPNOTSUPP: 2247 return (EZFS_TRIM_NOTSUP); 2248 } 2249 return (err); 2250 } 2251 2252 /* 2253 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for 2254 * the given vdevs in the given pool. 2255 */ 2256 int 2257 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds, 2258 trimflags_t *trim_flags) 2259 { 2260 char msg[1024]; 2261 int err; 2262 2263 nvlist_t *vdev_guids = fnvlist_alloc(); 2264 nvlist_t *guids_to_paths = fnvlist_alloc(); 2265 nvlist_t *vd_errlist = NULL; 2266 nvlist_t *errlist; 2267 nvpair_t *elem; 2268 2269 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2270 guids_to_paths, &vd_errlist); 2271 if (err == 0) { 2272 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate, 2273 trim_flags->secure, vdev_guids, &errlist); 2274 if (err == 0) { 2275 fnvlist_free(vdev_guids); 2276 fnvlist_free(guids_to_paths); 2277 return (0); 2278 } 2279 2280 if (errlist != NULL) { 2281 vd_errlist = fnvlist_lookup_nvlist(errlist, 2282 ZPOOL_TRIM_VDEVS); 2283 } 2284 2285 (void) snprintf(msg, sizeof (msg), 2286 dgettext(TEXT_DOMAIN, "operation failed")); 2287 } else { 2288 verify(vd_errlist != NULL); 2289 } 2290 2291 for (elem = nvlist_next_nvpair(vd_errlist, NULL); 2292 elem != NULL; elem = nvlist_next_nvpair(vd_errlist, elem)) { 2293 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem)); 2294 char *path; 2295 /* 2296 * If only the pool was specified, and it was not a secure 2297 * trim then suppress warnings for individual vdevs which 2298 * do not support trimming. 2299 */ 2300 if (vd_error == EZFS_TRIM_NOTSUP && 2301 trim_flags->fullpool && 2302 !trim_flags->secure) { 2303 continue; 2304 } 2305 2306 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2307 &path) != 0) 2308 path = nvpair_name(elem); 2309 2310 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2311 "cannot trim '%s'", path); 2312 } 2313 2314 fnvlist_free(vdev_guids); 2315 fnvlist_free(guids_to_paths); 2316 2317 if (vd_errlist != NULL) { 2318 fnvlist_free(vd_errlist); 2319 return (-1); 2320 } 2321 2322 return (zpool_standard_error(zhp->zpool_hdl, err, msg)); 2323 } 2324 2325 /* 2326 * This provides a very minimal check whether a given string is likely a 2327 * c#t#d# style string. Users of this are expected to do their own 2328 * verification of the s# part. 2329 */ 2330 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 2331 2332 /* 2333 * More elaborate version for ones which may start with "/dev/dsk/" 2334 * and the like. 2335 */ 2336 static int 2337 ctd_check_path(char *str) 2338 { 2339 /* 2340 * If it starts with a slash, check the last component. 2341 */ 2342 if (str && str[0] == '/') { 2343 char *tmp = strrchr(str, '/'); 2344 2345 /* 2346 * If it ends in "/old", check the second-to-last 2347 * component of the string instead. 2348 */ 2349 if (tmp != str && strcmp(tmp, "/old") == 0) { 2350 for (tmp--; *tmp != '/'; tmp--) 2351 ; 2352 } 2353 str = tmp + 1; 2354 } 2355 return (CTD_CHECK(str)); 2356 } 2357 2358 /* 2359 * Find a vdev that matches the search criteria specified. We use the 2360 * the nvpair name to determine how we should look for the device. 2361 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 2362 * spare; but FALSE if its an INUSE spare. 2363 */ 2364 static nvlist_t * 2365 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 2366 boolean_t *l2cache, boolean_t *log) 2367 { 2368 uint_t c, children; 2369 nvlist_t **child; 2370 nvlist_t *ret; 2371 uint64_t is_log; 2372 char *srchkey; 2373 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 2374 2375 /* Nothing to look for */ 2376 if (search == NULL || pair == NULL) 2377 return (NULL); 2378 2379 /* Obtain the key we will use to search */ 2380 srchkey = nvpair_name(pair); 2381 2382 switch (nvpair_type(pair)) { 2383 case DATA_TYPE_UINT64: 2384 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 2385 uint64_t srchval, theguid; 2386 2387 verify(nvpair_value_uint64(pair, &srchval) == 0); 2388 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2389 &theguid) == 0); 2390 if (theguid == srchval) 2391 return (nv); 2392 } 2393 break; 2394 2395 case DATA_TYPE_STRING: { 2396 char *srchval, *val; 2397 2398 verify(nvpair_value_string(pair, &srchval) == 0); 2399 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 2400 break; 2401 2402 /* 2403 * Search for the requested value. Special cases: 2404 * 2405 * - ZPOOL_CONFIG_PATH for whole disk entries. To support 2406 * UEFI boot, these end in "s0" or "s0/old" or "s1" or 2407 * "s1/old". The "s0" or "s1" part is hidden from the user, 2408 * but included in the string, so this matches around it. 2409 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2410 * 2411 * Otherwise, all other searches are simple string compares. 2412 */ 2413 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 2414 ctd_check_path(val)) { 2415 uint64_t wholedisk = 0; 2416 2417 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2418 &wholedisk); 2419 if (wholedisk) { 2420 int slen = strlen(srchval); 2421 int vlen = strlen(val); 2422 2423 if (slen != vlen - 2) 2424 break; 2425 2426 /* 2427 * make_leaf_vdev() should only set 2428 * wholedisk for ZPOOL_CONFIG_PATHs which 2429 * will include "/dev/dsk/", giving plenty of 2430 * room for the indices used next. 2431 */ 2432 ASSERT(vlen >= 6); 2433 2434 /* 2435 * strings identical except trailing "s0" 2436 */ 2437 if ((strcmp(&val[vlen - 2], "s0") == 0 || 2438 strcmp(&val[vlen - 2], "s1") == 0) && 2439 strncmp(srchval, val, slen) == 0) 2440 return (nv); 2441 2442 /* 2443 * strings identical except trailing "s0/old" 2444 */ 2445 if ((strcmp(&val[vlen - 6], "s0/old") == 0 || 2446 strcmp(&val[vlen - 6], "s1/old") == 0) && 2447 strcmp(&srchval[slen - 4], "/old") == 0 && 2448 strncmp(srchval, val, slen - 4) == 0) 2449 return (nv); 2450 2451 break; 2452 } 2453 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2454 char *type, *idx, *end, *p; 2455 uint64_t id, vdev_id; 2456 2457 /* 2458 * Determine our vdev type, keeping in mind 2459 * that the srchval is composed of a type and 2460 * vdev id pair (i.e. mirror-4). 2461 */ 2462 if ((type = strdup(srchval)) == NULL) 2463 return (NULL); 2464 2465 if ((p = strrchr(type, '-')) == NULL) { 2466 free(type); 2467 break; 2468 } 2469 idx = p + 1; 2470 *p = '\0'; 2471 2472 /* 2473 * If the types don't match then keep looking. 2474 */ 2475 if (strncmp(val, type, strlen(val)) != 0) { 2476 free(type); 2477 break; 2478 } 2479 2480 verify(zpool_vdev_is_interior(type)); 2481 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2482 &id) == 0); 2483 2484 errno = 0; 2485 vdev_id = strtoull(idx, &end, 10); 2486 2487 free(type); 2488 if (errno != 0) 2489 return (NULL); 2490 2491 /* 2492 * Now verify that we have the correct vdev id. 2493 */ 2494 if (vdev_id == id) 2495 return (nv); 2496 } 2497 2498 /* 2499 * Common case 2500 */ 2501 if (strcmp(srchval, val) == 0) 2502 return (nv); 2503 break; 2504 } 2505 2506 default: 2507 break; 2508 } 2509 2510 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2511 &child, &children) != 0) 2512 return (NULL); 2513 2514 for (c = 0; c < children; c++) { 2515 if ((ret = vdev_to_nvlist_iter(child[c], search, 2516 avail_spare, l2cache, NULL)) != NULL) { 2517 /* 2518 * The 'is_log' value is only set for the toplevel 2519 * vdev, not the leaf vdevs. So we always lookup the 2520 * log device from the root of the vdev tree (where 2521 * 'log' is non-NULL). 2522 */ 2523 if (log != NULL && 2524 nvlist_lookup_uint64(child[c], 2525 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2526 is_log) { 2527 *log = B_TRUE; 2528 } 2529 return (ret); 2530 } 2531 } 2532 2533 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2534 &child, &children) == 0) { 2535 for (c = 0; c < children; c++) { 2536 if ((ret = vdev_to_nvlist_iter(child[c], search, 2537 avail_spare, l2cache, NULL)) != NULL) { 2538 *avail_spare = B_TRUE; 2539 return (ret); 2540 } 2541 } 2542 } 2543 2544 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2545 &child, &children) == 0) { 2546 for (c = 0; c < children; c++) { 2547 if ((ret = vdev_to_nvlist_iter(child[c], search, 2548 avail_spare, l2cache, NULL)) != NULL) { 2549 *l2cache = B_TRUE; 2550 return (ret); 2551 } 2552 } 2553 } 2554 2555 return (NULL); 2556 } 2557 2558 /* 2559 * Given a physical path (minus the "/devices" prefix), find the 2560 * associated vdev. 2561 */ 2562 nvlist_t * 2563 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2564 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2565 { 2566 nvlist_t *search, *nvroot, *ret; 2567 2568 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2569 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2570 2571 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2572 &nvroot) == 0); 2573 2574 *avail_spare = B_FALSE; 2575 *l2cache = B_FALSE; 2576 if (log != NULL) 2577 *log = B_FALSE; 2578 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2579 nvlist_free(search); 2580 2581 return (ret); 2582 } 2583 2584 /* 2585 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2586 */ 2587 static boolean_t 2588 zpool_vdev_is_interior(const char *name) 2589 { 2590 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2591 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || 2592 strncmp(name, 2593 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || 2594 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2595 return (B_TRUE); 2596 return (B_FALSE); 2597 } 2598 2599 nvlist_t * 2600 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2601 boolean_t *l2cache, boolean_t *log) 2602 { 2603 char buf[MAXPATHLEN]; 2604 char *end; 2605 nvlist_t *nvroot, *search, *ret; 2606 uint64_t guid; 2607 2608 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2609 2610 guid = strtoull(path, &end, 10); 2611 if (guid != 0 && *end == '\0') { 2612 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2613 } else if (zpool_vdev_is_interior(path)) { 2614 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2615 } else if (path[0] != '/') { 2616 (void) snprintf(buf, sizeof (buf), "%s/%s", ZFS_DISK_ROOT, 2617 path); 2618 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2619 } else { 2620 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2621 } 2622 2623 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2624 &nvroot) == 0); 2625 2626 *avail_spare = B_FALSE; 2627 *l2cache = B_FALSE; 2628 if (log != NULL) 2629 *log = B_FALSE; 2630 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2631 nvlist_free(search); 2632 2633 return (ret); 2634 } 2635 2636 static int 2637 vdev_is_online(nvlist_t *nv) 2638 { 2639 uint64_t ival; 2640 2641 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2642 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2643 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2644 return (0); 2645 2646 return (1); 2647 } 2648 2649 /* 2650 * Helper function for zpool_get_physpaths(). 2651 */ 2652 static int 2653 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2654 size_t *bytes_written) 2655 { 2656 size_t bytes_left, pos, rsz; 2657 char *tmppath; 2658 const char *format; 2659 2660 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2661 &tmppath) != 0) 2662 return (EZFS_NODEVICE); 2663 2664 pos = *bytes_written; 2665 bytes_left = physpath_size - pos; 2666 format = (pos == 0) ? "%s" : " %s"; 2667 2668 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2669 *bytes_written += rsz; 2670 2671 if (rsz >= bytes_left) { 2672 /* if physpath was not copied properly, clear it */ 2673 if (bytes_left != 0) { 2674 physpath[pos] = 0; 2675 } 2676 return (EZFS_NOSPC); 2677 } 2678 return (0); 2679 } 2680 2681 static int 2682 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2683 size_t *rsz, boolean_t is_spare) 2684 { 2685 char *type; 2686 int ret; 2687 2688 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2689 return (EZFS_INVALCONFIG); 2690 2691 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2692 /* 2693 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2694 * For a spare vdev, we only want to boot from the active 2695 * spare device. 2696 */ 2697 if (is_spare) { 2698 uint64_t spare = 0; 2699 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2700 &spare); 2701 if (!spare) 2702 return (EZFS_INVALCONFIG); 2703 } 2704 2705 if (vdev_is_online(nv)) { 2706 if ((ret = vdev_get_one_physpath(nv, physpath, 2707 phypath_size, rsz)) != 0) 2708 return (ret); 2709 } 2710 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2711 strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2712 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2713 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2714 nvlist_t **child; 2715 uint_t count; 2716 int i, ret; 2717 2718 if (nvlist_lookup_nvlist_array(nv, 2719 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2720 return (EZFS_INVALCONFIG); 2721 2722 for (i = 0; i < count; i++) { 2723 ret = vdev_get_physpaths(child[i], physpath, 2724 phypath_size, rsz, is_spare); 2725 if (ret == EZFS_NOSPC) 2726 return (ret); 2727 } 2728 } 2729 2730 return (EZFS_POOL_INVALARG); 2731 } 2732 2733 /* 2734 * Get phys_path for a root pool config. 2735 * Return 0 on success; non-zero on failure. 2736 */ 2737 static int 2738 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2739 { 2740 size_t rsz; 2741 nvlist_t *vdev_root; 2742 nvlist_t **child; 2743 uint_t count; 2744 char *type; 2745 2746 rsz = 0; 2747 2748 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2749 &vdev_root) != 0) 2750 return (EZFS_INVALCONFIG); 2751 2752 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2753 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2754 &child, &count) != 0) 2755 return (EZFS_INVALCONFIG); 2756 2757 /* 2758 * root pool can only have a single top-level vdev. 2759 */ 2760 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2761 return (EZFS_POOL_INVALARG); 2762 2763 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2764 B_FALSE); 2765 2766 /* No online devices */ 2767 if (rsz == 0) 2768 return (EZFS_NODEVICE); 2769 2770 return (0); 2771 } 2772 2773 /* 2774 * Get phys_path for a root pool 2775 * Return 0 on success; non-zero on failure. 2776 */ 2777 int 2778 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2779 { 2780 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2781 phypath_size)); 2782 } 2783 2784 /* 2785 * If the device has being dynamically expanded then we need to relabel 2786 * the disk to use the new unallocated space. 2787 */ 2788 static int 2789 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2790 { 2791 char path[MAXPATHLEN]; 2792 char errbuf[1024]; 2793 int fd, error; 2794 int (*_efi_use_whole_disk)(int); 2795 2796 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2797 "efi_use_whole_disk")) == NULL) 2798 return (-1); 2799 2800 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name); 2801 2802 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2804 "relabel '%s': unable to open device"), name); 2805 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2806 } 2807 2808 /* 2809 * It's possible that we might encounter an error if the device 2810 * does not have any unallocated space left. If so, we simply 2811 * ignore that error and continue on. 2812 */ 2813 error = _efi_use_whole_disk(fd); 2814 (void) close(fd); 2815 if (error && error != VT_ENOSPC) { 2816 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2817 "relabel '%s': unable to read disk capacity"), name); 2818 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2819 } 2820 return (0); 2821 } 2822 2823 /* 2824 * Bring the specified vdev online. The 'flags' parameter is a set of the 2825 * ZFS_ONLINE_* flags. 2826 */ 2827 int 2828 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2829 vdev_state_t *newstate) 2830 { 2831 zfs_cmd_t zc = { 0 }; 2832 char msg[1024]; 2833 char *pathname; 2834 nvlist_t *tgt; 2835 boolean_t avail_spare, l2cache, islog; 2836 libzfs_handle_t *hdl = zhp->zpool_hdl; 2837 2838 if (flags & ZFS_ONLINE_EXPAND) { 2839 (void) snprintf(msg, sizeof (msg), 2840 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2841 } else { 2842 (void) snprintf(msg, sizeof (msg), 2843 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2844 } 2845 2846 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2847 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2848 &islog)) == NULL) 2849 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2850 2851 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2852 2853 if (avail_spare) 2854 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2855 2856 if ((flags & ZFS_ONLINE_EXPAND || 2857 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) && 2858 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) { 2859 uint64_t wholedisk = 0; 2860 2861 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2862 &wholedisk); 2863 2864 /* 2865 * XXX - L2ARC 1.0 devices can't support expansion. 2866 */ 2867 if (l2cache) { 2868 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2869 "cannot expand cache devices")); 2870 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2871 } 2872 2873 if (wholedisk) { 2874 pathname += strlen(ZFS_DISK_ROOT) + 1; 2875 (void) zpool_relabel_disk(hdl, pathname); 2876 } 2877 } 2878 2879 zc.zc_cookie = VDEV_STATE_ONLINE; 2880 zc.zc_obj = flags; 2881 2882 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2883 if (errno == EINVAL) { 2884 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2885 "from this pool into a new one. Use '%s' " 2886 "instead"), "zpool detach"); 2887 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2888 } 2889 return (zpool_standard_error(hdl, errno, msg)); 2890 } 2891 2892 *newstate = zc.zc_cookie; 2893 return (0); 2894 } 2895 2896 /* 2897 * Take the specified vdev offline 2898 */ 2899 int 2900 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2901 { 2902 zfs_cmd_t zc = { 0 }; 2903 char msg[1024]; 2904 nvlist_t *tgt; 2905 boolean_t avail_spare, l2cache; 2906 libzfs_handle_t *hdl = zhp->zpool_hdl; 2907 2908 (void) snprintf(msg, sizeof (msg), 2909 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2910 2911 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2912 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2913 NULL)) == NULL) 2914 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2915 2916 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2917 2918 if (avail_spare) 2919 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2920 2921 zc.zc_cookie = VDEV_STATE_OFFLINE; 2922 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2923 2924 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2925 return (0); 2926 2927 switch (errno) { 2928 case EBUSY: 2929 2930 /* 2931 * There are no other replicas of this device. 2932 */ 2933 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2934 2935 case EEXIST: 2936 /* 2937 * The log device has unplayed logs 2938 */ 2939 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2940 2941 default: 2942 return (zpool_standard_error(hdl, errno, msg)); 2943 } 2944 } 2945 2946 /* 2947 * Mark the given vdev faulted. 2948 */ 2949 int 2950 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2951 { 2952 zfs_cmd_t zc = { 0 }; 2953 char msg[1024]; 2954 libzfs_handle_t *hdl = zhp->zpool_hdl; 2955 2956 (void) snprintf(msg, sizeof (msg), 2957 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2958 2959 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2960 zc.zc_guid = guid; 2961 zc.zc_cookie = VDEV_STATE_FAULTED; 2962 zc.zc_obj = aux; 2963 2964 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2965 return (0); 2966 2967 switch (errno) { 2968 case EBUSY: 2969 2970 /* 2971 * There are no other replicas of this device. 2972 */ 2973 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2974 2975 default: 2976 return (zpool_standard_error(hdl, errno, msg)); 2977 } 2978 2979 } 2980 2981 /* 2982 * Mark the given vdev degraded. 2983 */ 2984 int 2985 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2986 { 2987 zfs_cmd_t zc = { 0 }; 2988 char msg[1024]; 2989 libzfs_handle_t *hdl = zhp->zpool_hdl; 2990 2991 (void) snprintf(msg, sizeof (msg), 2992 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2993 2994 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2995 zc.zc_guid = guid; 2996 zc.zc_cookie = VDEV_STATE_DEGRADED; 2997 zc.zc_obj = aux; 2998 2999 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3000 return (0); 3001 3002 return (zpool_standard_error(hdl, errno, msg)); 3003 } 3004 3005 /* 3006 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 3007 * a hot spare. 3008 */ 3009 static boolean_t 3010 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 3011 { 3012 nvlist_t **child; 3013 uint_t c, children; 3014 char *type; 3015 3016 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 3017 &children) == 0) { 3018 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 3019 &type) == 0); 3020 3021 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 3022 children == 2 && child[which] == tgt) 3023 return (B_TRUE); 3024 3025 for (c = 0; c < children; c++) 3026 if (is_replacing_spare(child[c], tgt, which)) 3027 return (B_TRUE); 3028 } 3029 3030 return (B_FALSE); 3031 } 3032 3033 /* 3034 * Attach new_disk (fully described by nvroot) to old_disk. 3035 * If 'replacing' is specified, the new disk will replace the old one. 3036 */ 3037 int 3038 zpool_vdev_attach(zpool_handle_t *zhp, 3039 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 3040 { 3041 zfs_cmd_t zc = { 0 }; 3042 char msg[1024]; 3043 int ret; 3044 nvlist_t *tgt, *newvd; 3045 boolean_t avail_spare, l2cache, islog; 3046 uint64_t val; 3047 char *newname; 3048 nvlist_t **child; 3049 uint_t children; 3050 nvlist_t *config_root; 3051 libzfs_handle_t *hdl = zhp->zpool_hdl; 3052 boolean_t rootpool = zpool_is_bootable(zhp); 3053 3054 if (replacing) 3055 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 3056 "cannot replace %s with %s"), old_disk, new_disk); 3057 else 3058 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 3059 "cannot attach %s to %s"), new_disk, old_disk); 3060 3061 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3062 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 3063 &islog)) == NULL) 3064 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3065 3066 if (avail_spare) 3067 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3068 3069 if (l2cache) 3070 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3071 3072 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3073 zc.zc_cookie = replacing; 3074 3075 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3076 &child, &children) != 0 || children != 1) { 3077 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3078 "new device must be a single disk")); 3079 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 3080 } 3081 3082 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 3083 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 3084 3085 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL) 3086 return (-1); 3087 3088 newvd = zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, NULL); 3089 /* 3090 * If the target is a hot spare that has been swapped in, we can only 3091 * replace it with another hot spare. 3092 */ 3093 if (replacing && 3094 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 3095 (newvd == NULL || !avail_spare) && 3096 is_replacing_spare(config_root, tgt, 1)) { 3097 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3098 "can only be replaced by another hot spare")); 3099 free(newname); 3100 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 3101 } 3102 3103 free(newname); 3104 3105 if (replacing && avail_spare && !vdev_is_online(newvd)) { 3106 (void) zpool_standard_error(hdl, ENXIO, msg); 3107 return (-1); 3108 } 3109 3110 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 3111 return (-1); 3112 3113 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 3114 3115 zcmd_free_nvlists(&zc); 3116 3117 if (ret == 0) { 3118 if (rootpool) { 3119 /* 3120 * XXX need a better way to prevent user from 3121 * booting up a half-baked vdev. 3122 */ 3123 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 3124 "sure to wait until resilver is done " 3125 "before rebooting.\n")); 3126 } 3127 return (0); 3128 } 3129 3130 switch (errno) { 3131 case ENOTSUP: 3132 /* 3133 * Can't attach to or replace this type of vdev. 3134 */ 3135 if (replacing) { 3136 uint64_t version = zpool_get_prop_int(zhp, 3137 ZPOOL_PROP_VERSION, NULL); 3138 3139 if (islog) 3140 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3141 "cannot replace a log with a spare")); 3142 else if (version >= SPA_VERSION_MULTI_REPLACE) 3143 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3144 "already in replacing/spare config; wait " 3145 "for completion or use 'zpool detach'")); 3146 else 3147 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3148 "cannot replace a replacing device")); 3149 } else { 3150 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3151 "can only attach to mirrors and top-level " 3152 "disks")); 3153 } 3154 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3155 break; 3156 3157 case EINVAL: 3158 /* 3159 * The new device must be a single disk. 3160 */ 3161 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3162 "new device must be a single disk")); 3163 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3164 break; 3165 3166 case EBUSY: 3167 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, " 3168 "or device removal is in progress"), 3169 new_disk); 3170 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3171 break; 3172 3173 case EOVERFLOW: 3174 /* 3175 * The new device is too small. 3176 */ 3177 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3178 "device is too small")); 3179 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3180 break; 3181 3182 case EDOM: 3183 /* 3184 * The new device has a different optimal sector size. 3185 */ 3186 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3187 "new device has a different optimal sector size; use the " 3188 "option '-o ashift=N' to override the optimal size")); 3189 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3190 break; 3191 3192 case ENAMETOOLONG: 3193 /* 3194 * The resulting top-level vdev spec won't fit in the label. 3195 */ 3196 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 3197 break; 3198 3199 default: 3200 (void) zpool_standard_error(hdl, errno, msg); 3201 } 3202 3203 return (-1); 3204 } 3205 3206 /* 3207 * Detach the specified device. 3208 */ 3209 int 3210 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 3211 { 3212 zfs_cmd_t zc = { 0 }; 3213 char msg[1024]; 3214 nvlist_t *tgt; 3215 boolean_t avail_spare, l2cache; 3216 libzfs_handle_t *hdl = zhp->zpool_hdl; 3217 3218 (void) snprintf(msg, sizeof (msg), 3219 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 3220 3221 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3222 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3223 NULL)) == NULL) 3224 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3225 3226 if (avail_spare) 3227 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3228 3229 if (l2cache) 3230 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3231 3232 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3233 3234 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 3235 return (0); 3236 3237 switch (errno) { 3238 3239 case ENOTSUP: 3240 /* 3241 * Can't detach from this type of vdev. 3242 */ 3243 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 3244 "applicable to mirror and replacing vdevs")); 3245 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3246 break; 3247 3248 case EBUSY: 3249 /* 3250 * There are no other replicas of this device. 3251 */ 3252 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 3253 break; 3254 3255 default: 3256 (void) zpool_standard_error(hdl, errno, msg); 3257 } 3258 3259 return (-1); 3260 } 3261 3262 /* 3263 * Find a mirror vdev in the source nvlist. 3264 * 3265 * The mchild array contains a list of disks in one of the top-level mirrors 3266 * of the source pool. The schild array contains a list of disks that the 3267 * user specified on the command line. We loop over the mchild array to 3268 * see if any entry in the schild array matches. 3269 * 3270 * If a disk in the mchild array is found in the schild array, we return 3271 * the index of that entry. Otherwise we return -1. 3272 */ 3273 static int 3274 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 3275 nvlist_t **schild, uint_t schildren) 3276 { 3277 uint_t mc; 3278 3279 for (mc = 0; mc < mchildren; mc++) { 3280 uint_t sc; 3281 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3282 mchild[mc], 0); 3283 3284 for (sc = 0; sc < schildren; sc++) { 3285 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3286 schild[sc], 0); 3287 boolean_t result = (strcmp(mpath, spath) == 0); 3288 3289 free(spath); 3290 if (result) { 3291 free(mpath); 3292 return (mc); 3293 } 3294 } 3295 3296 free(mpath); 3297 } 3298 3299 return (-1); 3300 } 3301 3302 /* 3303 * Split a mirror pool. If newroot points to null, then a new nvlist 3304 * is generated and it is the responsibility of the caller to free it. 3305 */ 3306 int 3307 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 3308 nvlist_t *props, splitflags_t flags) 3309 { 3310 zfs_cmd_t zc = { 0 }; 3311 char msg[1024]; 3312 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 3313 nvlist_t **varray = NULL, *zc_props = NULL; 3314 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 3315 libzfs_handle_t *hdl = zhp->zpool_hdl; 3316 uint64_t vers; 3317 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 3318 int retval = 0; 3319 3320 (void) snprintf(msg, sizeof (msg), 3321 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 3322 3323 if (!zpool_name_valid(hdl, B_FALSE, newname)) 3324 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 3325 3326 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 3327 (void) fprintf(stderr, gettext("Internal error: unable to " 3328 "retrieve pool configuration\n")); 3329 return (-1); 3330 } 3331 3332 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 3333 == 0); 3334 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 3335 3336 if (props) { 3337 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 3338 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 3339 props, vers, flags, msg)) == NULL) 3340 return (-1); 3341 } 3342 3343 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 3344 &children) != 0) { 3345 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3346 "Source pool is missing vdev tree")); 3347 nvlist_free(zc_props); 3348 return (-1); 3349 } 3350 3351 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 3352 vcount = 0; 3353 3354 if (*newroot == NULL || 3355 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 3356 &newchild, &newchildren) != 0) 3357 newchildren = 0; 3358 3359 for (c = 0; c < children; c++) { 3360 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 3361 char *type; 3362 nvlist_t **mchild, *vdev; 3363 uint_t mchildren; 3364 int entry; 3365 3366 /* 3367 * Unlike cache & spares, slogs are stored in the 3368 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 3369 */ 3370 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3371 &is_log); 3372 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3373 &is_hole); 3374 if (is_log || is_hole) { 3375 /* 3376 * Create a hole vdev and put it in the config. 3377 */ 3378 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 3379 goto out; 3380 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 3381 VDEV_TYPE_HOLE) != 0) 3382 goto out; 3383 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 3384 1) != 0) 3385 goto out; 3386 if (lastlog == 0) 3387 lastlog = vcount; 3388 varray[vcount++] = vdev; 3389 continue; 3390 } 3391 lastlog = 0; 3392 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 3393 == 0); 3394 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 3395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3396 "Source pool must be composed only of mirrors\n")); 3397 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3398 goto out; 3399 } 3400 3401 verify(nvlist_lookup_nvlist_array(child[c], 3402 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3403 3404 /* find or add an entry for this top-level vdev */ 3405 if (newchildren > 0 && 3406 (entry = find_vdev_entry(zhp, mchild, mchildren, 3407 newchild, newchildren)) >= 0) { 3408 /* We found a disk that the user specified. */ 3409 vdev = mchild[entry]; 3410 ++found; 3411 } else { 3412 /* User didn't specify a disk for this vdev. */ 3413 vdev = mchild[mchildren - 1]; 3414 } 3415 3416 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3417 goto out; 3418 } 3419 3420 /* did we find every disk the user specified? */ 3421 if (found != newchildren) { 3422 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3423 "include at most one disk from each mirror")); 3424 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3425 goto out; 3426 } 3427 3428 /* Prepare the nvlist for populating. */ 3429 if (*newroot == NULL) { 3430 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3431 goto out; 3432 freelist = B_TRUE; 3433 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3434 VDEV_TYPE_ROOT) != 0) 3435 goto out; 3436 } else { 3437 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3438 } 3439 3440 /* Add all the children we found */ 3441 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3442 lastlog == 0 ? vcount : lastlog) != 0) 3443 goto out; 3444 3445 /* 3446 * If we're just doing a dry run, exit now with success. 3447 */ 3448 if (flags.dryrun) { 3449 memory_err = B_FALSE; 3450 freelist = B_FALSE; 3451 goto out; 3452 } 3453 3454 /* now build up the config list & call the ioctl */ 3455 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3456 goto out; 3457 3458 if (nvlist_add_nvlist(newconfig, 3459 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3460 nvlist_add_string(newconfig, 3461 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3462 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3463 goto out; 3464 3465 /* 3466 * The new pool is automatically part of the namespace unless we 3467 * explicitly export it. 3468 */ 3469 if (!flags.import) 3470 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3471 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3472 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3473 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3474 goto out; 3475 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3476 goto out; 3477 3478 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3479 retval = zpool_standard_error(hdl, errno, msg); 3480 goto out; 3481 } 3482 3483 freelist = B_FALSE; 3484 memory_err = B_FALSE; 3485 3486 out: 3487 if (varray != NULL) { 3488 int v; 3489 3490 for (v = 0; v < vcount; v++) 3491 nvlist_free(varray[v]); 3492 free(varray); 3493 } 3494 zcmd_free_nvlists(&zc); 3495 nvlist_free(zc_props); 3496 nvlist_free(newconfig); 3497 if (freelist) { 3498 nvlist_free(*newroot); 3499 *newroot = NULL; 3500 } 3501 3502 if (retval != 0) 3503 return (retval); 3504 3505 if (memory_err) 3506 return (no_memory(hdl)); 3507 3508 return (0); 3509 } 3510 3511 /* 3512 * Remove the given device. 3513 */ 3514 int 3515 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3516 { 3517 zfs_cmd_t zc = { 0 }; 3518 char msg[1024]; 3519 nvlist_t *tgt; 3520 boolean_t avail_spare, l2cache, islog; 3521 libzfs_handle_t *hdl = zhp->zpool_hdl; 3522 uint64_t version; 3523 3524 (void) snprintf(msg, sizeof (msg), 3525 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3526 3527 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3528 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3529 &islog)) == NULL) 3530 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3531 3532 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3533 if (islog && version < SPA_VERSION_HOLES) { 3534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3535 "pool must be upgraded to support log removal")); 3536 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3537 } 3538 3539 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3540 3541 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3542 return (0); 3543 3544 switch (errno) { 3545 3546 case EINVAL: 3547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3548 "invalid config; all top-level vdevs must " 3549 "have the same sector size and not be raidz.")); 3550 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3551 break; 3552 3553 case EBUSY: 3554 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3555 "Pool busy; removal may already be in progress")); 3556 (void) zfs_error(hdl, EZFS_BUSY, msg); 3557 break; 3558 3559 default: 3560 (void) zpool_standard_error(hdl, errno, msg); 3561 } 3562 return (-1); 3563 } 3564 3565 int 3566 zpool_vdev_remove_cancel(zpool_handle_t *zhp) 3567 { 3568 zfs_cmd_t zc = { 0 }; 3569 char msg[1024]; 3570 libzfs_handle_t *hdl = zhp->zpool_hdl; 3571 3572 (void) snprintf(msg, sizeof (msg), 3573 dgettext(TEXT_DOMAIN, "cannot cancel removal")); 3574 3575 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3576 zc.zc_cookie = 1; 3577 3578 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3579 return (0); 3580 3581 return (zpool_standard_error(hdl, errno, msg)); 3582 } 3583 3584 int 3585 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path, 3586 uint64_t *sizep) 3587 { 3588 char msg[1024]; 3589 nvlist_t *tgt; 3590 boolean_t avail_spare, l2cache, islog; 3591 libzfs_handle_t *hdl = zhp->zpool_hdl; 3592 3593 (void) snprintf(msg, sizeof (msg), 3594 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"), 3595 path); 3596 3597 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3598 &islog)) == NULL) 3599 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3600 3601 if (avail_spare || l2cache || islog) { 3602 *sizep = 0; 3603 return (0); 3604 } 3605 3606 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) { 3607 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3608 "indirect size not available")); 3609 return (zfs_error(hdl, EINVAL, msg)); 3610 } 3611 return (0); 3612 } 3613 3614 /* 3615 * Clear the errors for the pool, or the particular device if specified. 3616 */ 3617 int 3618 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3619 { 3620 zfs_cmd_t zc = { 0 }; 3621 char msg[1024]; 3622 nvlist_t *tgt; 3623 zpool_load_policy_t policy; 3624 boolean_t avail_spare, l2cache; 3625 libzfs_handle_t *hdl = zhp->zpool_hdl; 3626 nvlist_t *nvi = NULL; 3627 int error; 3628 3629 if (path) 3630 (void) snprintf(msg, sizeof (msg), 3631 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3632 path); 3633 else 3634 (void) snprintf(msg, sizeof (msg), 3635 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3636 zhp->zpool_name); 3637 3638 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3639 if (path) { 3640 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3641 &l2cache, NULL)) == NULL) 3642 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3643 3644 /* 3645 * Don't allow error clearing for hot spares. Do allow 3646 * error clearing for l2cache devices. 3647 */ 3648 if (avail_spare) 3649 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3650 3651 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3652 &zc.zc_guid) == 0); 3653 } 3654 3655 zpool_get_load_policy(rewindnvl, &policy); 3656 zc.zc_cookie = policy.zlp_rewind; 3657 3658 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3659 return (-1); 3660 3661 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3662 return (-1); 3663 3664 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3665 errno == ENOMEM) { 3666 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3667 zcmd_free_nvlists(&zc); 3668 return (-1); 3669 } 3670 } 3671 3672 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) && 3673 errno != EPERM && errno != EACCES)) { 3674 if (policy.zlp_rewind & 3675 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3676 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3677 zpool_rewind_exclaim(hdl, zc.zc_name, 3678 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), 3679 nvi); 3680 nvlist_free(nvi); 3681 } 3682 zcmd_free_nvlists(&zc); 3683 return (0); 3684 } 3685 3686 zcmd_free_nvlists(&zc); 3687 return (zpool_standard_error(hdl, errno, msg)); 3688 } 3689 3690 /* 3691 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3692 */ 3693 int 3694 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3695 { 3696 zfs_cmd_t zc = { 0 }; 3697 char msg[1024]; 3698 libzfs_handle_t *hdl = zhp->zpool_hdl; 3699 3700 (void) snprintf(msg, sizeof (msg), 3701 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3702 guid); 3703 3704 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3705 zc.zc_guid = guid; 3706 zc.zc_cookie = ZPOOL_NO_REWIND; 3707 3708 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3709 return (0); 3710 3711 return (zpool_standard_error(hdl, errno, msg)); 3712 } 3713 3714 /* 3715 * Change the GUID for a pool. 3716 */ 3717 int 3718 zpool_reguid(zpool_handle_t *zhp) 3719 { 3720 char msg[1024]; 3721 libzfs_handle_t *hdl = zhp->zpool_hdl; 3722 zfs_cmd_t zc = { 0 }; 3723 3724 (void) snprintf(msg, sizeof (msg), 3725 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3726 3727 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3728 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3729 return (0); 3730 3731 return (zpool_standard_error(hdl, errno, msg)); 3732 } 3733 3734 /* 3735 * Reopen the pool. 3736 */ 3737 int 3738 zpool_reopen(zpool_handle_t *zhp) 3739 { 3740 zfs_cmd_t zc = { 0 }; 3741 char msg[1024]; 3742 libzfs_handle_t *hdl = zhp->zpool_hdl; 3743 3744 (void) snprintf(msg, sizeof (msg), 3745 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3746 zhp->zpool_name); 3747 3748 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3749 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3750 return (0); 3751 return (zpool_standard_error(hdl, errno, msg)); 3752 } 3753 3754 /* call into libzfs_core to execute the sync IOCTL per pool */ 3755 int 3756 zpool_sync_one(zpool_handle_t *zhp, void *data) 3757 { 3758 int ret; 3759 libzfs_handle_t *hdl = zpool_get_handle(zhp); 3760 const char *pool_name = zpool_get_name(zhp); 3761 boolean_t *force = data; 3762 nvlist_t *innvl = fnvlist_alloc(); 3763 3764 fnvlist_add_boolean_value(innvl, "force", *force); 3765 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) { 3766 nvlist_free(innvl); 3767 return (zpool_standard_error_fmt(hdl, ret, 3768 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name)); 3769 } 3770 nvlist_free(innvl); 3771 3772 return (0); 3773 } 3774 3775 /* 3776 * Convert from a devid string to a path. 3777 */ 3778 static char * 3779 devid_to_path(char *devid_str) 3780 { 3781 ddi_devid_t devid; 3782 char *minor; 3783 char *path; 3784 devid_nmlist_t *list = NULL; 3785 int ret; 3786 3787 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3788 return (NULL); 3789 3790 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3791 3792 devid_str_free(minor); 3793 devid_free(devid); 3794 3795 if (ret != 0) 3796 return (NULL); 3797 3798 /* 3799 * In a case the strdup() fails, we will just return NULL below. 3800 */ 3801 path = strdup(list[0].devname); 3802 3803 devid_free_nmlist(list); 3804 3805 return (path); 3806 } 3807 3808 /* 3809 * Convert from a path to a devid string. 3810 */ 3811 static char * 3812 path_to_devid(const char *path) 3813 { 3814 int fd; 3815 ddi_devid_t devid; 3816 char *minor, *ret; 3817 3818 if ((fd = open(path, O_RDONLY)) < 0) 3819 return (NULL); 3820 3821 minor = NULL; 3822 ret = NULL; 3823 if (devid_get(fd, &devid) == 0) { 3824 if (devid_get_minor_name(fd, &minor) == 0) 3825 ret = devid_str_encode(devid, minor); 3826 if (minor != NULL) 3827 devid_str_free(minor); 3828 devid_free(devid); 3829 } 3830 (void) close(fd); 3831 3832 return (ret); 3833 } 3834 3835 struct path_from_physpath_walker_args { 3836 char *pfpwa_path; 3837 }; 3838 3839 /* 3840 * Walker for use with di_devlink_walk(). Stores the "/dev" path of the first 3841 * primary devlink (i.e., the first devlink which refers to our "/devices" 3842 * node) and stops walking. 3843 */ 3844 static int 3845 path_from_physpath_walker(di_devlink_t devlink, void *arg) 3846 { 3847 struct path_from_physpath_walker_args *pfpwa = arg; 3848 3849 if (di_devlink_type(devlink) != DI_PRIMARY_LINK) { 3850 return (DI_WALK_CONTINUE); 3851 } 3852 3853 verify(pfpwa->pfpwa_path == NULL); 3854 if ((pfpwa->pfpwa_path = strdup(di_devlink_path(devlink))) != NULL) { 3855 return (DI_WALK_TERMINATE); 3856 } 3857 3858 return (DI_WALK_CONTINUE); 3859 } 3860 3861 /* 3862 * Search for a "/dev" path that refers to our physical path. Returns the new 3863 * path if one is found and it does not match the existing "path" value. If 3864 * the value is unchanged, or one could not be found, returns NULL. 3865 */ 3866 static char * 3867 path_from_physpath(libzfs_handle_t *hdl, const char *path, 3868 const char *physpath) 3869 { 3870 struct path_from_physpath_walker_args pfpwa; 3871 3872 if (physpath == NULL) { 3873 return (NULL); 3874 } 3875 3876 if (hdl->libzfs_devlink == NULL) { 3877 if ((hdl->libzfs_devlink = di_devlink_init(NULL, 0)) == 3878 DI_LINK_NIL) { 3879 /* 3880 * We may not be able to open a handle if this process 3881 * is insufficiently privileged, or we are too early in 3882 * boot for devfsadm to be ready. Ignore this error 3883 * and defer the path check to a subsequent run. 3884 */ 3885 return (NULL); 3886 } 3887 } 3888 3889 pfpwa.pfpwa_path = NULL; 3890 (void) di_devlink_walk(hdl->libzfs_devlink, NULL, physpath, 3891 DI_PRIMARY_LINK, &pfpwa, path_from_physpath_walker); 3892 3893 if (path != NULL && pfpwa.pfpwa_path != NULL && 3894 strcmp(path, pfpwa.pfpwa_path) == 0) { 3895 /* 3896 * If the path is already correct, no change is required. 3897 */ 3898 free(pfpwa.pfpwa_path); 3899 return (NULL); 3900 } 3901 3902 return (pfpwa.pfpwa_path); 3903 } 3904 3905 /* 3906 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3907 * ignore any failure here, since a common case is for an unprivileged user to 3908 * type 'zpool status', and we'll display the correct information anyway. 3909 */ 3910 static void 3911 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3912 { 3913 zfs_cmd_t zc = { 0 }; 3914 3915 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3916 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3917 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3918 &zc.zc_guid) == 0); 3919 3920 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3921 } 3922 3923 /* 3924 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3925 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3926 * We also check if this is a whole disk, in which case we strip off the 3927 * trailing 's0' slice name. 3928 * 3929 * This routine is also responsible for identifying when disks have been 3930 * reconfigured in a new location. The kernel will have opened the device by 3931 * devid, but the path will still refer to the old location. To catch this, we 3932 * first do a path -> devid translation (which is fast for the common case). If 3933 * the devid matches, we're done. If not, we do a reverse devid -> path 3934 * translation and issue the appropriate ioctl() to update the path of the vdev. 3935 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3936 * of these checks. 3937 */ 3938 char * 3939 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3940 int name_flags) 3941 { 3942 char *path, *type, *env; 3943 uint64_t value; 3944 char buf[64]; 3945 3946 /* 3947 * vdev_name will be "root"/"root-0" for the root vdev, but it is the 3948 * zpool name that will be displayed to the user. 3949 */ 3950 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 3951 if (zhp != NULL && strcmp(type, "root") == 0) 3952 return (zfs_strdup(hdl, zpool_get_name(zhp))); 3953 3954 env = getenv("ZPOOL_VDEV_NAME_PATH"); 3955 if (env && (strtoul(env, NULL, 0) > 0 || 3956 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 3957 name_flags |= VDEV_NAME_PATH; 3958 3959 env = getenv("ZPOOL_VDEV_NAME_GUID"); 3960 if (env && (strtoul(env, NULL, 0) > 0 || 3961 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 3962 name_flags |= VDEV_NAME_GUID; 3963 3964 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS"); 3965 if (env && (strtoul(env, NULL, 0) > 0 || 3966 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 3967 name_flags |= VDEV_NAME_FOLLOW_LINKS; 3968 3969 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3970 name_flags & VDEV_NAME_GUID) { 3971 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value); 3972 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value); 3973 path = buf; 3974 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3975 vdev_stat_t *vs; 3976 uint_t vsc; 3977 char *newpath = NULL; 3978 char *physpath = NULL; 3979 char *devid = NULL; 3980 3981 /* 3982 * If the device is dead (faulted, offline, etc) then don't 3983 * bother opening it. Otherwise we may be forcing the user to 3984 * open a misbehaving device, which can have undesirable 3985 * effects. 3986 */ 3987 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3988 (uint64_t **)&vs, &vsc) != 0 || 3989 vs->vs_state < VDEV_STATE_DEGRADED || 3990 zhp == NULL) { 3991 goto after_open; 3992 } 3993 3994 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3995 /* 3996 * This vdev has a devid. We can use it to check the 3997 * current path. 3998 */ 3999 char *newdevid = path_to_devid(path); 4000 4001 if (newdevid == NULL || strcmp(devid, newdevid) != 0) { 4002 newpath = devid_to_path(devid); 4003 } 4004 4005 if (newdevid != NULL) 4006 devid_str_free(newdevid); 4007 4008 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 4009 &physpath) == 0) { 4010 /* 4011 * This vdev does not have a devid, but it does have a 4012 * physical path. Attempt to translate this to a /dev 4013 * path. 4014 */ 4015 newpath = path_from_physpath(hdl, path, physpath); 4016 } 4017 4018 if (newpath != NULL) { 4019 /* 4020 * Update the path appropriately. 4021 */ 4022 set_path(zhp, nv, newpath); 4023 if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, 4024 newpath) == 0) { 4025 verify(nvlist_lookup_string(nv, 4026 ZPOOL_CONFIG_PATH, &path) == 0); 4027 } 4028 free(newpath); 4029 } 4030 4031 if (name_flags & VDEV_NAME_FOLLOW_LINKS) { 4032 char *rp = realpath(path, NULL); 4033 if (rp) { 4034 strlcpy(buf, rp, sizeof (buf)); 4035 path = buf; 4036 free(rp); 4037 } 4038 } 4039 4040 after_open: 4041 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0) 4042 path += strlen(ZFS_DISK_ROOTD); 4043 4044 /* 4045 * Remove the partition from the path it this is a whole disk. 4046 */ 4047 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value) 4048 == 0 && value && !(name_flags & VDEV_NAME_PATH)) { 4049 int pathlen = strlen(path); 4050 char *tmp = zfs_strdup(hdl, path); 4051 4052 /* 4053 * If it starts with c#, and ends with "s0" or "s1", 4054 * chop the slice off, or if it ends with "s0/old" or 4055 * "s1/old", remove the slice from the middle. 4056 */ 4057 if (CTD_CHECK(tmp)) { 4058 if (strcmp(&tmp[pathlen - 2], "s0") == 0 || 4059 strcmp(&tmp[pathlen - 2], "s1") == 0) { 4060 tmp[pathlen - 2] = '\0'; 4061 } else if (pathlen > 6 && 4062 (strcmp(&tmp[pathlen - 6], "s0/old") == 0 || 4063 strcmp(&tmp[pathlen - 6], "s1/old") == 0)) { 4064 (void) strcpy(&tmp[pathlen - 6], 4065 "/old"); 4066 } 4067 } 4068 return (tmp); 4069 } 4070 } else { 4071 path = type; 4072 4073 /* 4074 * If it's a raidz device, we need to stick in the parity level. 4075 */ 4076 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 4077 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4078 &value) == 0); 4079 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 4080 (u_longlong_t)value); 4081 path = buf; 4082 } 4083 4084 /* 4085 * We identify each top-level vdev by using a <type-id> 4086 * naming convention. 4087 */ 4088 if (name_flags & VDEV_NAME_TYPE_ID) { 4089 uint64_t id; 4090 4091 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 4092 &id) == 0); 4093 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 4094 (u_longlong_t)id); 4095 path = buf; 4096 } 4097 } 4098 4099 return (zfs_strdup(hdl, path)); 4100 } 4101 4102 static int 4103 zbookmark_mem_compare(const void *a, const void *b) 4104 { 4105 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 4106 } 4107 4108 /* 4109 * Retrieve the persistent error log, uniquify the members, and return to the 4110 * caller. 4111 */ 4112 int 4113 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 4114 { 4115 zfs_cmd_t zc = { 0 }; 4116 uint64_t count; 4117 zbookmark_phys_t *zb = NULL; 4118 int i; 4119 4120 /* 4121 * Retrieve the raw error list from the kernel. If the number of errors 4122 * has increased, allocate more space and continue until we get the 4123 * entire list. 4124 */ 4125 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 4126 &count) == 0); 4127 if (count == 0) 4128 return (0); 4129 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 4130 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 4131 return (-1); 4132 zc.zc_nvlist_dst_size = count; 4133 (void) strcpy(zc.zc_name, zhp->zpool_name); 4134 for (;;) { 4135 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 4136 &zc) != 0) { 4137 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4138 if (errno == ENOMEM) { 4139 void *dst; 4140 4141 count = zc.zc_nvlist_dst_size; 4142 dst = zfs_alloc(zhp->zpool_hdl, count * 4143 sizeof (zbookmark_phys_t)); 4144 if (dst == NULL) 4145 return (-1); 4146 zc.zc_nvlist_dst = (uintptr_t)dst; 4147 } else { 4148 return (-1); 4149 } 4150 } else { 4151 break; 4152 } 4153 } 4154 4155 /* 4156 * Sort the resulting bookmarks. This is a little confusing due to the 4157 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 4158 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 4159 * _not_ copied as part of the process. So we point the start of our 4160 * array appropriate and decrement the total number of elements. 4161 */ 4162 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 4163 zc.zc_nvlist_dst_size; 4164 count -= zc.zc_nvlist_dst_size; 4165 4166 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 4167 4168 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 4169 4170 /* 4171 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 4172 */ 4173 for (i = 0; i < count; i++) { 4174 nvlist_t *nv; 4175 4176 /* ignoring zb_blkid and zb_level for now */ 4177 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 4178 zb[i-1].zb_object == zb[i].zb_object) 4179 continue; 4180 4181 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 4182 goto nomem; 4183 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 4184 zb[i].zb_objset) != 0) { 4185 nvlist_free(nv); 4186 goto nomem; 4187 } 4188 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 4189 zb[i].zb_object) != 0) { 4190 nvlist_free(nv); 4191 goto nomem; 4192 } 4193 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 4194 nvlist_free(nv); 4195 goto nomem; 4196 } 4197 nvlist_free(nv); 4198 } 4199 4200 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4201 return (0); 4202 4203 nomem: 4204 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4205 return (no_memory(zhp->zpool_hdl)); 4206 } 4207 4208 /* 4209 * Upgrade a ZFS pool to the latest on-disk version. 4210 */ 4211 int 4212 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 4213 { 4214 zfs_cmd_t zc = { 0 }; 4215 libzfs_handle_t *hdl = zhp->zpool_hdl; 4216 4217 (void) strcpy(zc.zc_name, zhp->zpool_name); 4218 zc.zc_cookie = new_version; 4219 4220 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 4221 return (zpool_standard_error_fmt(hdl, errno, 4222 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 4223 zhp->zpool_name)); 4224 return (0); 4225 } 4226 4227 void 4228 zfs_save_arguments(int argc, char **argv, char *string, int len) 4229 { 4230 (void) strlcpy(string, basename(argv[0]), len); 4231 for (int i = 1; i < argc; i++) { 4232 (void) strlcat(string, " ", len); 4233 (void) strlcat(string, argv[i], len); 4234 } 4235 } 4236 4237 int 4238 zpool_log_history(libzfs_handle_t *hdl, const char *message) 4239 { 4240 zfs_cmd_t zc = { 0 }; 4241 nvlist_t *args; 4242 int err; 4243 4244 args = fnvlist_alloc(); 4245 fnvlist_add_string(args, "message", message); 4246 err = zcmd_write_src_nvlist(hdl, &zc, args); 4247 if (err == 0) 4248 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 4249 nvlist_free(args); 4250 zcmd_free_nvlists(&zc); 4251 return (err); 4252 } 4253 4254 /* 4255 * Perform ioctl to get some command history of a pool. 4256 * 4257 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 4258 * logical offset of the history buffer to start reading from. 4259 * 4260 * Upon return, 'off' is the next logical offset to read from and 4261 * 'len' is the actual amount of bytes read into 'buf'. 4262 */ 4263 static int 4264 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 4265 { 4266 zfs_cmd_t zc = { 0 }; 4267 libzfs_handle_t *hdl = zhp->zpool_hdl; 4268 4269 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4270 4271 zc.zc_history = (uint64_t)(uintptr_t)buf; 4272 zc.zc_history_len = *len; 4273 zc.zc_history_offset = *off; 4274 4275 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 4276 switch (errno) { 4277 case EPERM: 4278 return (zfs_error_fmt(hdl, EZFS_PERM, 4279 dgettext(TEXT_DOMAIN, 4280 "cannot show history for pool '%s'"), 4281 zhp->zpool_name)); 4282 case ENOENT: 4283 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 4284 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4285 "'%s'"), zhp->zpool_name)); 4286 case ENOTSUP: 4287 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 4288 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4289 "'%s', pool must be upgraded"), zhp->zpool_name)); 4290 default: 4291 return (zpool_standard_error_fmt(hdl, errno, 4292 dgettext(TEXT_DOMAIN, 4293 "cannot get history for '%s'"), zhp->zpool_name)); 4294 } 4295 } 4296 4297 *len = zc.zc_history_len; 4298 *off = zc.zc_history_offset; 4299 4300 return (0); 4301 } 4302 4303 /* 4304 * Retrieve the command history of a pool. 4305 */ 4306 int 4307 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off, 4308 boolean_t *eof) 4309 { 4310 char *buf; 4311 int buflen = 128 * 1024; 4312 nvlist_t **records = NULL; 4313 uint_t numrecords = 0; 4314 int err = 0, i; 4315 uint64_t start = *off; 4316 4317 buf = malloc(buflen); 4318 if (buf == NULL) 4319 return (ENOMEM); 4320 /* process about 1MB a time */ 4321 while (*off - start < 1024 * 1024) { 4322 uint64_t bytes_read = buflen; 4323 uint64_t leftover; 4324 4325 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0) 4326 break; 4327 4328 /* if nothing else was read in, we're at EOF, just return */ 4329 if (!bytes_read) { 4330 *eof = B_TRUE; 4331 break; 4332 } 4333 4334 if ((err = zpool_history_unpack(buf, bytes_read, 4335 &leftover, &records, &numrecords)) != 0) 4336 break; 4337 *off -= leftover; 4338 if (leftover == bytes_read) { 4339 /* 4340 * no progress made, because buffer is not big enough 4341 * to hold this record; resize and retry. 4342 */ 4343 buflen *= 2; 4344 free(buf); 4345 buf = malloc(buflen); 4346 if (buf == NULL) 4347 return (ENOMEM); 4348 } 4349 } 4350 4351 free(buf); 4352 4353 if (!err) { 4354 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 4355 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 4356 records, numrecords) == 0); 4357 } 4358 for (i = 0; i < numrecords; i++) 4359 nvlist_free(records[i]); 4360 free(records); 4361 4362 return (err); 4363 } 4364 4365 void 4366 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4367 char *pathname, size_t len) 4368 { 4369 zfs_cmd_t zc = { 0 }; 4370 boolean_t mounted = B_FALSE; 4371 char *mntpnt = NULL; 4372 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 4373 4374 if (dsobj == 0) { 4375 /* special case for the MOS */ 4376 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 4377 return; 4378 } 4379 4380 /* get the dataset's name */ 4381 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4382 zc.zc_obj = dsobj; 4383 if (ioctl(zhp->zpool_hdl->libzfs_fd, 4384 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 4385 /* just write out a path of two object numbers */ 4386 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 4387 dsobj, obj); 4388 return; 4389 } 4390 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 4391 4392 /* find out if the dataset is mounted */ 4393 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 4394 4395 /* get the corrupted object's path */ 4396 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 4397 zc.zc_obj = obj; 4398 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 4399 &zc) == 0) { 4400 if (mounted) { 4401 (void) snprintf(pathname, len, "%s%s", mntpnt, 4402 zc.zc_value); 4403 } else { 4404 (void) snprintf(pathname, len, "%s:%s", 4405 dsname, zc.zc_value); 4406 } 4407 } else { 4408 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 4409 } 4410 free(mntpnt); 4411 } 4412 4413 int 4414 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap) 4415 { 4416 int error = lzc_set_bootenv(zhp->zpool_name, envmap); 4417 if (error != 0) { 4418 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4419 dgettext(TEXT_DOMAIN, 4420 "error setting bootenv in pool '%s'"), zhp->zpool_name); 4421 } 4422 4423 return (error); 4424 } 4425 4426 int 4427 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp) 4428 { 4429 nvlist_t *nvl; 4430 int error; 4431 4432 nvl = NULL; 4433 error = lzc_get_bootenv(zhp->zpool_name, &nvl); 4434 if (error != 0) { 4435 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4436 dgettext(TEXT_DOMAIN, 4437 "error getting bootenv in pool '%s'"), zhp->zpool_name); 4438 } else { 4439 *nvlp = nvl; 4440 } 4441 4442 return (error); 4443 } 4444 4445 /* 4446 * Read the EFI label from the config, if a label does not exist then 4447 * pass back the error to the caller. If the caller has passed a non-NULL 4448 * diskaddr argument then we set it to the starting address of the EFI 4449 * partition. If the caller has passed a non-NULL boolean argument, then 4450 * we set it to indicate if the disk does have efi system partition. 4451 */ 4452 static int 4453 read_efi_label(nvlist_t *config, diskaddr_t *sb, boolean_t *system) 4454 { 4455 char *path; 4456 int fd; 4457 char diskname[MAXPATHLEN]; 4458 boolean_t boot = B_FALSE; 4459 int err = -1; 4460 int slice; 4461 4462 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 4463 return (err); 4464 4465 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT, 4466 strrchr(path, '/')); 4467 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 4468 struct dk_gpt *vtoc; 4469 4470 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 4471 for (slice = 0; slice < vtoc->efi_nparts; slice++) { 4472 if (vtoc->efi_parts[slice].p_tag == V_SYSTEM) 4473 boot = B_TRUE; 4474 if (vtoc->efi_parts[slice].p_tag == V_USR) 4475 break; 4476 } 4477 if (sb != NULL && vtoc->efi_parts[slice].p_tag == V_USR) 4478 *sb = vtoc->efi_parts[slice].p_start; 4479 if (system != NULL) 4480 *system = boot; 4481 efi_free(vtoc); 4482 } 4483 (void) close(fd); 4484 } 4485 return (err); 4486 } 4487 4488 /* 4489 * determine where a partition starts on a disk in the current 4490 * configuration 4491 */ 4492 static diskaddr_t 4493 find_start_block(nvlist_t *config) 4494 { 4495 nvlist_t **child; 4496 uint_t c, children; 4497 diskaddr_t sb = MAXOFFSET_T; 4498 uint64_t wholedisk; 4499 4500 if (nvlist_lookup_nvlist_array(config, 4501 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 4502 if (nvlist_lookup_uint64(config, 4503 ZPOOL_CONFIG_WHOLE_DISK, 4504 &wholedisk) != 0 || !wholedisk) { 4505 return (MAXOFFSET_T); 4506 } 4507 if (read_efi_label(config, &sb, NULL) < 0) 4508 sb = MAXOFFSET_T; 4509 return (sb); 4510 } 4511 4512 for (c = 0; c < children; c++) { 4513 sb = find_start_block(child[c]); 4514 if (sb != MAXOFFSET_T) { 4515 return (sb); 4516 } 4517 } 4518 return (MAXOFFSET_T); 4519 } 4520 4521 /* 4522 * Label an individual disk. The name provided is the short name, 4523 * stripped of any leading /dev path. 4524 */ 4525 int 4526 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name, 4527 zpool_boot_label_t boot_type, uint64_t boot_size, int *slice) 4528 { 4529 char path[MAXPATHLEN]; 4530 struct dk_gpt *vtoc; 4531 int fd; 4532 size_t resv; 4533 uint64_t slice_size; 4534 diskaddr_t start_block; 4535 char errbuf[1024]; 4536 4537 /* prepare an error message just in case */ 4538 (void) snprintf(errbuf, sizeof (errbuf), 4539 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 4540 4541 if (zhp) { 4542 nvlist_t *nvroot; 4543 4544 verify(nvlist_lookup_nvlist(zhp->zpool_config, 4545 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 4546 4547 if (zhp->zpool_start_block == 0) 4548 start_block = find_start_block(nvroot); 4549 else 4550 start_block = zhp->zpool_start_block; 4551 zhp->zpool_start_block = start_block; 4552 } else { 4553 /* new pool */ 4554 start_block = NEW_START_BLOCK; 4555 } 4556 4557 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name, 4558 BACKUP_SLICE); 4559 4560 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 4561 /* 4562 * This shouldn't happen. We've long since verified that this 4563 * is a valid device. 4564 */ 4565 zfs_error_aux(hdl, 4566 dgettext(TEXT_DOMAIN, "unable to open device")); 4567 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 4568 } 4569 4570 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 4571 /* 4572 * The only way this can fail is if we run out of memory, or we 4573 * were unable to read the disk's capacity 4574 */ 4575 if (errno == ENOMEM) 4576 (void) no_memory(hdl); 4577 4578 (void) close(fd); 4579 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4580 "unable to read disk capacity"), name); 4581 4582 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 4583 } 4584 resv = efi_reserved_sectors(vtoc); 4585 4586 /* 4587 * Why we use V_USR: V_BACKUP confuses users, and is considered 4588 * disposable by some EFI utilities (since EFI doesn't have a backup 4589 * slice). V_UNASSIGNED is supposed to be used only for zero size 4590 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 4591 * etc. were all pretty specific. V_USR is as close to reality as we 4592 * can get, in the absence of V_OTHER. 4593 */ 4594 /* first fix the partition start block */ 4595 if (start_block == MAXOFFSET_T) 4596 start_block = NEW_START_BLOCK; 4597 4598 /* 4599 * EFI System partition is using slice 0. 4600 * ZFS is on slice 1 and slice 8 is reserved. 4601 * We assume the GPT partition table without system 4602 * partition has zfs p_start == NEW_START_BLOCK. 4603 * If start_block != NEW_START_BLOCK, it means we have 4604 * system partition. Correct solution would be to query/cache vtoc 4605 * from existing vdev member. 4606 */ 4607 if (boot_type == ZPOOL_CREATE_BOOT_LABEL) { 4608 if (boot_size % vtoc->efi_lbasize != 0) { 4609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4610 "boot partition size must be a multiple of %d"), 4611 vtoc->efi_lbasize); 4612 (void) close(fd); 4613 efi_free(vtoc); 4614 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4615 } 4616 /* 4617 * System partition size checks. 4618 * Note the 1MB is quite arbitrary value, since we 4619 * are creating dedicated pool, it should be enough 4620 * to hold fat + efi bootloader. May need to be 4621 * adjusted if the bootloader size will grow. 4622 */ 4623 if (boot_size < 1024 * 1024) { 4624 char buf[64]; 4625 zfs_nicenum(boot_size, buf, sizeof (buf)); 4626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4627 "Specified size %s for EFI System partition is too " 4628 "small, the minimum size is 1MB."), buf); 4629 (void) close(fd); 4630 efi_free(vtoc); 4631 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4632 } 4633 /* 33MB is tested with mkfs -F pcfs */ 4634 if (hdl->libzfs_printerr && 4635 ((vtoc->efi_lbasize == 512 && 4636 boot_size < 33 * 1024 * 1024) || 4637 (vtoc->efi_lbasize == 4096 && 4638 boot_size < 256 * 1024 * 1024))) { 4639 char buf[64]; 4640 zfs_nicenum(boot_size, buf, sizeof (buf)); 4641 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, 4642 "Warning: EFI System partition size %s is " 4643 "not allowing to create FAT32 file\nsystem, which " 4644 "may result in unbootable system.\n"), buf); 4645 } 4646 /* Adjust zfs partition start by size of system partition. */ 4647 start_block += boot_size / vtoc->efi_lbasize; 4648 } 4649 4650 if (start_block == NEW_START_BLOCK) { 4651 /* 4652 * Use default layout. 4653 * ZFS is on slice 0 and slice 8 is reserved. 4654 */ 4655 slice_size = vtoc->efi_last_u_lba + 1; 4656 slice_size -= resv; 4657 slice_size -= start_block; 4658 if (slice != NULL) 4659 *slice = 0; 4660 4661 vtoc->efi_parts[0].p_start = start_block; 4662 vtoc->efi_parts[0].p_size = slice_size; 4663 4664 vtoc->efi_parts[0].p_tag = V_USR; 4665 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 4666 4667 vtoc->efi_parts[8].p_start = slice_size + start_block; 4668 vtoc->efi_parts[8].p_size = resv; 4669 vtoc->efi_parts[8].p_tag = V_RESERVED; 4670 } else { 4671 slice_size = start_block - NEW_START_BLOCK; 4672 vtoc->efi_parts[0].p_start = NEW_START_BLOCK; 4673 vtoc->efi_parts[0].p_size = slice_size; 4674 vtoc->efi_parts[0].p_tag = V_SYSTEM; 4675 (void) strcpy(vtoc->efi_parts[0].p_name, "loader"); 4676 if (slice != NULL) 4677 *slice = 1; 4678 /* prepare slice 1 */ 4679 slice_size = vtoc->efi_last_u_lba + 1 - slice_size; 4680 slice_size -= resv; 4681 slice_size -= NEW_START_BLOCK; 4682 vtoc->efi_parts[1].p_start = start_block; 4683 vtoc->efi_parts[1].p_size = slice_size; 4684 vtoc->efi_parts[1].p_tag = V_USR; 4685 (void) strcpy(vtoc->efi_parts[1].p_name, "zfs"); 4686 4687 vtoc->efi_parts[8].p_start = slice_size + start_block; 4688 vtoc->efi_parts[8].p_size = resv; 4689 vtoc->efi_parts[8].p_tag = V_RESERVED; 4690 } 4691 4692 if (efi_write(fd, vtoc) != 0) { 4693 /* 4694 * Some block drivers (like pcata) may not support EFI 4695 * GPT labels. Print out a helpful error message dir- 4696 * ecting the user to manually label the disk and give 4697 * a specific slice. 4698 */ 4699 (void) close(fd); 4700 efi_free(vtoc); 4701 4702 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4703 "try using fdisk(1M) and then provide a specific slice")); 4704 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4705 } 4706 4707 (void) close(fd); 4708 efi_free(vtoc); 4709 return (0); 4710 } 4711 4712 static boolean_t 4713 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4714 { 4715 char *type; 4716 nvlist_t **child; 4717 uint_t children, c; 4718 4719 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4720 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4721 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4722 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4723 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4724 "vdev type '%s' is not supported"), type); 4725 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4726 return (B_FALSE); 4727 } 4728 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4729 &child, &children) == 0) { 4730 for (c = 0; c < children; c++) { 4731 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4732 return (B_FALSE); 4733 } 4734 } 4735 return (B_TRUE); 4736 } 4737 4738 /* 4739 * Check if this zvol is allowable for use as a dump device; zero if 4740 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4741 * 4742 * Allowable storage configurations include mirrors, all raidz variants, and 4743 * pools with log, cache, and spare devices. Pools which are backed by files or 4744 * have missing/hole vdevs are not suitable. 4745 */ 4746 int 4747 zvol_check_dump_config(char *arg) 4748 { 4749 zpool_handle_t *zhp = NULL; 4750 nvlist_t *config, *nvroot; 4751 char *p, *volname; 4752 nvlist_t **top; 4753 uint_t toplevels; 4754 libzfs_handle_t *hdl; 4755 char errbuf[1024]; 4756 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 4757 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4758 int ret = 1; 4759 4760 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4761 return (-1); 4762 } 4763 4764 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4765 "dump is not supported on device '%s'"), arg); 4766 4767 if ((hdl = libzfs_init()) == NULL) 4768 return (1); 4769 libzfs_print_on_error(hdl, B_TRUE); 4770 4771 volname = arg + pathlen; 4772 4773 /* check the configuration of the pool */ 4774 if ((p = strchr(volname, '/')) == NULL) { 4775 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4776 "malformed dataset name")); 4777 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4778 return (1); 4779 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) { 4780 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4781 "dataset name is too long")); 4782 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4783 return (1); 4784 } else { 4785 (void) strncpy(poolname, volname, p - volname); 4786 poolname[p - volname] = '\0'; 4787 } 4788 4789 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4790 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4791 "could not open pool '%s'"), poolname); 4792 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4793 goto out; 4794 } 4795 config = zpool_get_config(zhp, NULL); 4796 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4797 &nvroot) != 0) { 4798 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4799 "could not obtain vdev configuration for '%s'"), poolname); 4800 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4801 goto out; 4802 } 4803 4804 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4805 &top, &toplevels) == 0); 4806 4807 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4808 goto out; 4809 } 4810 ret = 0; 4811 4812 out: 4813 if (zhp) 4814 zpool_close(zhp); 4815 libzfs_fini(hdl); 4816 return (ret); 4817 } 4818