1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 27 * Copyright (c) 2018 Datto Inc. 28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 29 * Copyright (c) 2017, Intel Corporation. 30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com> 31 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 32 */ 33 34 #include <errno.h> 35 #include <libintl.h> 36 #include <stdio.h> 37 #include <stdlib.h> 38 #include <strings.h> 39 #include <unistd.h> 40 #include <libgen.h> 41 #include <zone.h> 42 #include <sys/stat.h> 43 #include <sys/efi_partition.h> 44 #include <sys/systeminfo.h> 45 #include <sys/zfs_ioctl.h> 46 #include <sys/zfs_sysfs.h> 47 #include <sys/vdev_disk.h> 48 #include <sys/types.h> 49 #include <dlfcn.h> 50 #include <libzutil.h> 51 #include <fcntl.h> 52 #include <unistd.h> 53 54 #include "zfs_namecheck.h" 55 #include "zfs_prop.h" 56 #include "libzfs_impl.h" 57 #include "zfs_comutil.h" 58 #include "zfeature_common.h" 59 60 static boolean_t zpool_vdev_is_interior(const char *name); 61 62 typedef struct prop_flags { 63 int create:1; /* Validate property on creation */ 64 int import:1; /* Validate property on import */ 65 } prop_flags_t; 66 67 /* 68 * ==================================================================== 69 * zpool property functions 70 * ==================================================================== 71 */ 72 73 static int 74 zpool_get_all_props(zpool_handle_t *zhp) 75 { 76 zfs_cmd_t zc = {"\0"}; 77 libzfs_handle_t *hdl = zhp->zpool_hdl; 78 79 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 80 81 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 82 return (-1); 83 84 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 85 if (errno == ENOMEM) { 86 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 87 zcmd_free_nvlists(&zc); 88 return (-1); 89 } 90 } else { 91 zcmd_free_nvlists(&zc); 92 return (-1); 93 } 94 } 95 96 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 97 zcmd_free_nvlists(&zc); 98 return (-1); 99 } 100 101 zcmd_free_nvlists(&zc); 102 103 return (0); 104 } 105 106 int 107 zpool_props_refresh(zpool_handle_t *zhp) 108 { 109 nvlist_t *old_props; 110 111 old_props = zhp->zpool_props; 112 113 if (zpool_get_all_props(zhp) != 0) 114 return (-1); 115 116 nvlist_free(old_props); 117 return (0); 118 } 119 120 static const char * 121 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 122 zprop_source_t *src) 123 { 124 nvlist_t *nv, *nvl; 125 uint64_t ival; 126 char *value; 127 zprop_source_t source; 128 129 nvl = zhp->zpool_props; 130 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 131 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 132 source = ival; 133 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 134 } else { 135 source = ZPROP_SRC_DEFAULT; 136 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 137 value = "-"; 138 } 139 140 if (src) 141 *src = source; 142 143 return (value); 144 } 145 146 uint64_t 147 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 148 { 149 nvlist_t *nv, *nvl; 150 uint64_t value; 151 zprop_source_t source; 152 153 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 154 /* 155 * zpool_get_all_props() has most likely failed because 156 * the pool is faulted, but if all we need is the top level 157 * vdev's guid then get it from the zhp config nvlist. 158 */ 159 if ((prop == ZPOOL_PROP_GUID) && 160 (nvlist_lookup_nvlist(zhp->zpool_config, 161 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 162 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 163 == 0)) { 164 return (value); 165 } 166 return (zpool_prop_default_numeric(prop)); 167 } 168 169 nvl = zhp->zpool_props; 170 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 171 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 172 source = value; 173 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 174 } else { 175 source = ZPROP_SRC_DEFAULT; 176 value = zpool_prop_default_numeric(prop); 177 } 178 179 if (src) 180 *src = source; 181 182 return (value); 183 } 184 185 /* 186 * Map VDEV STATE to printed strings. 187 */ 188 const char * 189 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 190 { 191 switch (state) { 192 case VDEV_STATE_CLOSED: 193 case VDEV_STATE_OFFLINE: 194 return (gettext("OFFLINE")); 195 case VDEV_STATE_REMOVED: 196 return (gettext("REMOVED")); 197 case VDEV_STATE_CANT_OPEN: 198 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 199 return (gettext("FAULTED")); 200 else if (aux == VDEV_AUX_SPLIT_POOL) 201 return (gettext("SPLIT")); 202 else 203 return (gettext("UNAVAIL")); 204 case VDEV_STATE_FAULTED: 205 return (gettext("FAULTED")); 206 case VDEV_STATE_DEGRADED: 207 return (gettext("DEGRADED")); 208 case VDEV_STATE_HEALTHY: 209 return (gettext("ONLINE")); 210 211 default: 212 break; 213 } 214 215 return (gettext("UNKNOWN")); 216 } 217 218 /* 219 * Map POOL STATE to printed strings. 220 */ 221 const char * 222 zpool_pool_state_to_name(pool_state_t state) 223 { 224 switch (state) { 225 default: 226 break; 227 case POOL_STATE_ACTIVE: 228 return (gettext("ACTIVE")); 229 case POOL_STATE_EXPORTED: 230 return (gettext("EXPORTED")); 231 case POOL_STATE_DESTROYED: 232 return (gettext("DESTROYED")); 233 case POOL_STATE_SPARE: 234 return (gettext("SPARE")); 235 case POOL_STATE_L2CACHE: 236 return (gettext("L2CACHE")); 237 case POOL_STATE_UNINITIALIZED: 238 return (gettext("UNINITIALIZED")); 239 case POOL_STATE_UNAVAIL: 240 return (gettext("UNAVAIL")); 241 case POOL_STATE_POTENTIALLY_ACTIVE: 242 return (gettext("POTENTIALLY_ACTIVE")); 243 } 244 245 return (gettext("UNKNOWN")); 246 } 247 248 /* 249 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED", 250 * "SUSPENDED", etc). 251 */ 252 const char * 253 zpool_get_state_str(zpool_handle_t *zhp) 254 { 255 zpool_errata_t errata; 256 zpool_status_t status; 257 nvlist_t *nvroot; 258 vdev_stat_t *vs; 259 uint_t vsc; 260 const char *str; 261 262 status = zpool_get_status(zhp, NULL, &errata); 263 264 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 265 str = gettext("FAULTED"); 266 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT || 267 status == ZPOOL_STATUS_IO_FAILURE_MMP) { 268 str = gettext("SUSPENDED"); 269 } else { 270 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 271 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 272 verify(nvlist_lookup_uint64_array(nvroot, 273 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 274 == 0); 275 str = zpool_state_to_name(vs->vs_state, vs->vs_aux); 276 } 277 return (str); 278 } 279 280 /* 281 * Get a zpool property value for 'prop' and return the value in 282 * a pre-allocated buffer. 283 */ 284 int 285 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, 286 size_t len, zprop_source_t *srctype, boolean_t literal) 287 { 288 uint64_t intval; 289 const char *strval; 290 zprop_source_t src = ZPROP_SRC_NONE; 291 292 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 293 switch (prop) { 294 case ZPOOL_PROP_NAME: 295 (void) strlcpy(buf, zpool_get_name(zhp), len); 296 break; 297 298 case ZPOOL_PROP_HEALTH: 299 (void) strlcpy(buf, zpool_get_state_str(zhp), len); 300 break; 301 302 case ZPOOL_PROP_GUID: 303 intval = zpool_get_prop_int(zhp, prop, &src); 304 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval); 305 break; 306 307 case ZPOOL_PROP_ALTROOT: 308 case ZPOOL_PROP_CACHEFILE: 309 case ZPOOL_PROP_COMMENT: 310 case ZPOOL_PROP_COMPATIBILITY: 311 if (zhp->zpool_props != NULL || 312 zpool_get_all_props(zhp) == 0) { 313 (void) strlcpy(buf, 314 zpool_get_prop_string(zhp, prop, &src), 315 len); 316 break; 317 } 318 /* FALLTHROUGH */ 319 default: 320 (void) strlcpy(buf, "-", len); 321 break; 322 } 323 324 if (srctype != NULL) 325 *srctype = src; 326 return (0); 327 } 328 329 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 330 prop != ZPOOL_PROP_NAME) 331 return (-1); 332 333 switch (zpool_prop_get_type(prop)) { 334 case PROP_TYPE_STRING: 335 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 336 len); 337 break; 338 339 case PROP_TYPE_NUMBER: 340 intval = zpool_get_prop_int(zhp, prop, &src); 341 342 switch (prop) { 343 case ZPOOL_PROP_SIZE: 344 case ZPOOL_PROP_ALLOCATED: 345 case ZPOOL_PROP_FREE: 346 case ZPOOL_PROP_FREEING: 347 case ZPOOL_PROP_LEAKED: 348 case ZPOOL_PROP_ASHIFT: 349 if (literal) 350 (void) snprintf(buf, len, "%llu", 351 (u_longlong_t)intval); 352 else 353 (void) zfs_nicenum(intval, buf, len); 354 break; 355 356 case ZPOOL_PROP_EXPANDSZ: 357 case ZPOOL_PROP_CHECKPOINT: 358 if (intval == 0) { 359 (void) strlcpy(buf, "-", len); 360 } else if (literal) { 361 (void) snprintf(buf, len, "%llu", 362 (u_longlong_t)intval); 363 } else { 364 (void) zfs_nicebytes(intval, buf, len); 365 } 366 break; 367 368 case ZPOOL_PROP_CAPACITY: 369 if (literal) { 370 (void) snprintf(buf, len, "%llu", 371 (u_longlong_t)intval); 372 } else { 373 (void) snprintf(buf, len, "%llu%%", 374 (u_longlong_t)intval); 375 } 376 break; 377 378 case ZPOOL_PROP_FRAGMENTATION: 379 if (intval == UINT64_MAX) { 380 (void) strlcpy(buf, "-", len); 381 } else if (literal) { 382 (void) snprintf(buf, len, "%llu", 383 (u_longlong_t)intval); 384 } else { 385 (void) snprintf(buf, len, "%llu%%", 386 (u_longlong_t)intval); 387 } 388 break; 389 390 case ZPOOL_PROP_DEDUPRATIO: 391 if (literal) 392 (void) snprintf(buf, len, "%llu.%02llu", 393 (u_longlong_t)(intval / 100), 394 (u_longlong_t)(intval % 100)); 395 else 396 (void) snprintf(buf, len, "%llu.%02llux", 397 (u_longlong_t)(intval / 100), 398 (u_longlong_t)(intval % 100)); 399 break; 400 401 case ZPOOL_PROP_HEALTH: 402 (void) strlcpy(buf, zpool_get_state_str(zhp), len); 403 break; 404 case ZPOOL_PROP_VERSION: 405 if (intval >= SPA_VERSION_FEATURES) { 406 (void) snprintf(buf, len, "-"); 407 break; 408 } 409 /* FALLTHROUGH */ 410 default: 411 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval); 412 } 413 break; 414 415 case PROP_TYPE_INDEX: 416 intval = zpool_get_prop_int(zhp, prop, &src); 417 if (zpool_prop_index_to_string(prop, intval, &strval) 418 != 0) 419 return (-1); 420 (void) strlcpy(buf, strval, len); 421 break; 422 423 default: 424 abort(); 425 } 426 427 if (srctype) 428 *srctype = src; 429 430 return (0); 431 } 432 433 /* 434 * Check if the bootfs name has the same pool name as it is set to. 435 * Assuming bootfs is a valid dataset name. 436 */ 437 static boolean_t 438 bootfs_name_valid(const char *pool, const char *bootfs) 439 { 440 int len = strlen(pool); 441 if (bootfs[0] == '\0') 442 return (B_TRUE); 443 444 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 445 return (B_FALSE); 446 447 if (strncmp(pool, bootfs, len) == 0 && 448 (bootfs[len] == '/' || bootfs[len] == '\0')) 449 return (B_TRUE); 450 451 return (B_FALSE); 452 } 453 454 /* 455 * Given an nvlist of zpool properties to be set, validate that they are 456 * correct, and parse any numeric properties (index, boolean, etc) if they are 457 * specified as strings. 458 */ 459 static nvlist_t * 460 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 461 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 462 { 463 nvpair_t *elem; 464 nvlist_t *retprops; 465 zpool_prop_t prop; 466 char *strval; 467 uint64_t intval; 468 char *slash, *check; 469 struct stat64 statbuf; 470 zpool_handle_t *zhp; 471 char badword[ZFS_MAXPROPLEN]; 472 char badfile[MAXPATHLEN]; 473 474 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 475 (void) no_memory(hdl); 476 return (NULL); 477 } 478 479 elem = NULL; 480 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 481 const char *propname = nvpair_name(elem); 482 483 prop = zpool_name_to_prop(propname); 484 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) { 485 int err; 486 char *fname = strchr(propname, '@') + 1; 487 488 err = zfeature_lookup_name(fname, NULL); 489 if (err != 0) { 490 ASSERT3U(err, ==, ENOENT); 491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 492 "feature '%s' unsupported by kernel"), 493 fname); 494 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 495 goto error; 496 } 497 498 if (nvpair_type(elem) != DATA_TYPE_STRING) { 499 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 500 "'%s' must be a string"), propname); 501 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 502 goto error; 503 } 504 505 (void) nvpair_value_string(elem, &strval); 506 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 && 507 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) { 508 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 509 "property '%s' can only be set to " 510 "'enabled' or 'disabled'"), propname); 511 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 512 goto error; 513 } 514 515 if (!flags.create && 516 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) { 517 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 518 "property '%s' can only be set to " 519 "'disabled' at creation time"), propname); 520 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 521 goto error; 522 } 523 524 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 525 (void) no_memory(hdl); 526 goto error; 527 } 528 continue; 529 } 530 531 /* 532 * Make sure this property is valid and applies to this type. 533 */ 534 if (prop == ZPOOL_PROP_INVAL) { 535 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 536 "invalid property '%s'"), propname); 537 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 538 goto error; 539 } 540 541 if (zpool_prop_readonly(prop)) { 542 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 543 "is readonly"), propname); 544 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 545 goto error; 546 } 547 548 if (!flags.create && zpool_prop_setonce(prop)) { 549 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 550 "property '%s' can only be set at " 551 "creation time"), propname); 552 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 553 goto error; 554 } 555 556 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 557 &strval, &intval, errbuf) != 0) 558 goto error; 559 560 /* 561 * Perform additional checking for specific properties. 562 */ 563 switch (prop) { 564 case ZPOOL_PROP_VERSION: 565 if (intval < version || 566 !SPA_VERSION_IS_SUPPORTED(intval)) { 567 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 568 "property '%s' number %d is invalid."), 569 propname, intval); 570 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 571 goto error; 572 } 573 break; 574 575 case ZPOOL_PROP_ASHIFT: 576 if (intval != 0 && 577 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) { 578 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 579 "property '%s' number %d is invalid, only " 580 "values between %" PRId32 " and " 581 "%" PRId32 " are allowed."), 582 propname, intval, ASHIFT_MIN, ASHIFT_MAX); 583 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 584 goto error; 585 } 586 break; 587 588 case ZPOOL_PROP_BOOTFS: 589 if (flags.create || flags.import) { 590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 591 "property '%s' cannot be set at creation " 592 "or import time"), propname); 593 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 594 goto error; 595 } 596 597 if (version < SPA_VERSION_BOOTFS) { 598 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 599 "pool must be upgraded to support " 600 "'%s' property"), propname); 601 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 602 goto error; 603 } 604 605 /* 606 * bootfs property value has to be a dataset name and 607 * the dataset has to be in the same pool as it sets to. 608 */ 609 if (!bootfs_name_valid(poolname, strval)) { 610 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 611 "is an invalid name"), strval); 612 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 613 goto error; 614 } 615 616 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 618 "could not open pool '%s'"), poolname); 619 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 620 goto error; 621 } 622 zpool_close(zhp); 623 break; 624 625 case ZPOOL_PROP_ALTROOT: 626 if (!flags.create && !flags.import) { 627 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 628 "property '%s' can only be set during pool " 629 "creation or import"), propname); 630 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 631 goto error; 632 } 633 634 if (strval[0] != '/') { 635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 636 "bad alternate root '%s'"), strval); 637 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 638 goto error; 639 } 640 break; 641 642 case ZPOOL_PROP_CACHEFILE: 643 if (strval[0] == '\0') 644 break; 645 646 if (strcmp(strval, "none") == 0) 647 break; 648 649 if (strval[0] != '/') { 650 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 651 "property '%s' must be empty, an " 652 "absolute path, or 'none'"), propname); 653 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 654 goto error; 655 } 656 657 slash = strrchr(strval, '/'); 658 659 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 660 strcmp(slash, "/..") == 0) { 661 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 662 "'%s' is not a valid file"), strval); 663 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 664 goto error; 665 } 666 667 *slash = '\0'; 668 669 if (strval[0] != '\0' && 670 (stat64(strval, &statbuf) != 0 || 671 !S_ISDIR(statbuf.st_mode))) { 672 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 673 "'%s' is not a valid directory"), 674 strval); 675 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 676 goto error; 677 } 678 679 *slash = '/'; 680 break; 681 682 case ZPOOL_PROP_COMPATIBILITY: 683 switch (zpool_load_compat(strval, NULL, 684 badword, badfile)) { 685 case ZPOOL_COMPATIBILITY_OK: 686 break; 687 case ZPOOL_COMPATIBILITY_READERR: 688 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 689 "error reading feature file '%s'"), 690 badfile); 691 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 692 goto error; 693 case ZPOOL_COMPATIBILITY_BADFILE: 694 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 695 "feature file '%s' too large or not " 696 "newline-terminated"), 697 badfile); 698 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 699 goto error; 700 case ZPOOL_COMPATIBILITY_BADWORD: 701 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 702 "unknown feature '%s' in feature " 703 "file '%s'"), 704 badword, badfile); 705 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 706 goto error; 707 case ZPOOL_COMPATIBILITY_NOFILES: 708 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 709 "no feature files specified")); 710 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 711 goto error; 712 } 713 break; 714 715 case ZPOOL_PROP_COMMENT: 716 for (check = strval; *check != '\0'; check++) { 717 if (!isprint(*check)) { 718 zfs_error_aux(hdl, 719 dgettext(TEXT_DOMAIN, 720 "comment may only have printable " 721 "characters")); 722 (void) zfs_error(hdl, EZFS_BADPROP, 723 errbuf); 724 goto error; 725 } 726 } 727 if (strlen(strval) > ZPROP_MAX_COMMENT) { 728 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 729 "comment must not exceed %d characters"), 730 ZPROP_MAX_COMMENT); 731 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 732 goto error; 733 } 734 break; 735 case ZPOOL_PROP_READONLY: 736 if (!flags.import) { 737 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 738 "property '%s' can only be set at " 739 "import time"), propname); 740 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 741 goto error; 742 } 743 break; 744 case ZPOOL_PROP_MULTIHOST: 745 if (get_system_hostid() == 0) { 746 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 747 "requires a non-zero system hostid")); 748 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 749 goto error; 750 } 751 break; 752 case ZPOOL_PROP_DEDUPDITTO: 753 printf("Note: property '%s' no longer has " 754 "any effect\n", propname); 755 break; 756 757 default: 758 break; 759 } 760 } 761 762 return (retprops); 763 error: 764 nvlist_free(retprops); 765 return (NULL); 766 } 767 768 /* 769 * Set zpool property : propname=propval. 770 */ 771 int 772 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 773 { 774 zfs_cmd_t zc = {"\0"}; 775 int ret = -1; 776 char errbuf[1024]; 777 nvlist_t *nvl = NULL; 778 nvlist_t *realprops; 779 uint64_t version; 780 prop_flags_t flags = { 0 }; 781 782 (void) snprintf(errbuf, sizeof (errbuf), 783 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 784 zhp->zpool_name); 785 786 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 787 return (no_memory(zhp->zpool_hdl)); 788 789 if (nvlist_add_string(nvl, propname, propval) != 0) { 790 nvlist_free(nvl); 791 return (no_memory(zhp->zpool_hdl)); 792 } 793 794 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 795 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 796 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 797 nvlist_free(nvl); 798 return (-1); 799 } 800 801 nvlist_free(nvl); 802 nvl = realprops; 803 804 /* 805 * Execute the corresponding ioctl() to set this property. 806 */ 807 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 808 809 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 810 nvlist_free(nvl); 811 return (-1); 812 } 813 814 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 815 816 zcmd_free_nvlists(&zc); 817 nvlist_free(nvl); 818 819 if (ret) 820 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 821 else 822 (void) zpool_props_refresh(zhp); 823 824 return (ret); 825 } 826 827 int 828 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp, 829 boolean_t literal) 830 { 831 libzfs_handle_t *hdl = zhp->zpool_hdl; 832 zprop_list_t *entry; 833 char buf[ZFS_MAXPROPLEN]; 834 nvlist_t *features = NULL; 835 nvpair_t *nvp; 836 zprop_list_t **last; 837 boolean_t firstexpand = (NULL == *plp); 838 int i; 839 840 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 841 return (-1); 842 843 last = plp; 844 while (*last != NULL) 845 last = &(*last)->pl_next; 846 847 if ((*plp)->pl_all) 848 features = zpool_get_features(zhp); 849 850 if ((*plp)->pl_all && firstexpand) { 851 for (i = 0; i < SPA_FEATURES; i++) { 852 zprop_list_t *entry = zfs_alloc(hdl, 853 sizeof (zprop_list_t)); 854 entry->pl_prop = ZPROP_INVAL; 855 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 856 spa_feature_table[i].fi_uname); 857 entry->pl_width = strlen(entry->pl_user_prop); 858 entry->pl_all = B_TRUE; 859 860 *last = entry; 861 last = &entry->pl_next; 862 } 863 } 864 865 /* add any unsupported features */ 866 for (nvp = nvlist_next_nvpair(features, NULL); 867 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 868 char *propname; 869 boolean_t found; 870 zprop_list_t *entry; 871 872 if (zfeature_is_supported(nvpair_name(nvp))) 873 continue; 874 875 propname = zfs_asprintf(hdl, "unsupported@%s", 876 nvpair_name(nvp)); 877 878 /* 879 * Before adding the property to the list make sure that no 880 * other pool already added the same property. 881 */ 882 found = B_FALSE; 883 entry = *plp; 884 while (entry != NULL) { 885 if (entry->pl_user_prop != NULL && 886 strcmp(propname, entry->pl_user_prop) == 0) { 887 found = B_TRUE; 888 break; 889 } 890 entry = entry->pl_next; 891 } 892 if (found) { 893 free(propname); 894 continue; 895 } 896 897 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 898 entry->pl_prop = ZPROP_INVAL; 899 entry->pl_user_prop = propname; 900 entry->pl_width = strlen(entry->pl_user_prop); 901 entry->pl_all = B_TRUE; 902 903 *last = entry; 904 last = &entry->pl_next; 905 } 906 907 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 908 if (entry->pl_fixed && !literal) 909 continue; 910 911 if (entry->pl_prop != ZPROP_INVAL && 912 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 913 NULL, literal) == 0) { 914 if (strlen(buf) > entry->pl_width) 915 entry->pl_width = strlen(buf); 916 } 917 } 918 919 return (0); 920 } 921 922 /* 923 * Get the state for the given feature on the given ZFS pool. 924 */ 925 int 926 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 927 size_t len) 928 { 929 uint64_t refcount; 930 boolean_t found = B_FALSE; 931 nvlist_t *features = zpool_get_features(zhp); 932 boolean_t supported; 933 const char *feature = strchr(propname, '@') + 1; 934 935 supported = zpool_prop_feature(propname); 936 ASSERT(supported || zpool_prop_unsupported(propname)); 937 938 /* 939 * Convert from feature name to feature guid. This conversion is 940 * unnecessary for unsupported@... properties because they already 941 * use guids. 942 */ 943 if (supported) { 944 int ret; 945 spa_feature_t fid; 946 947 ret = zfeature_lookup_name(feature, &fid); 948 if (ret != 0) { 949 (void) strlcpy(buf, "-", len); 950 return (ENOTSUP); 951 } 952 feature = spa_feature_table[fid].fi_guid; 953 } 954 955 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 956 found = B_TRUE; 957 958 if (supported) { 959 if (!found) { 960 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 961 } else { 962 if (refcount == 0) 963 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 964 else 965 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 966 } 967 } else { 968 if (found) { 969 if (refcount == 0) { 970 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 971 } else { 972 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 973 } 974 } else { 975 (void) strlcpy(buf, "-", len); 976 return (ENOTSUP); 977 } 978 } 979 980 return (0); 981 } 982 983 /* 984 * Validate the given pool name, optionally putting an extended error message in 985 * 'buf'. 986 */ 987 boolean_t 988 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 989 { 990 namecheck_err_t why; 991 char what; 992 int ret; 993 994 ret = pool_namecheck(pool, &why, &what); 995 996 /* 997 * The rules for reserved pool names were extended at a later point. 998 * But we need to support users with existing pools that may now be 999 * invalid. So we only check for this expanded set of names during a 1000 * create (or import), and only in userland. 1001 */ 1002 if (ret == 0 && !isopen && 1003 (strncmp(pool, "mirror", 6) == 0 || 1004 strncmp(pool, "raidz", 5) == 0 || 1005 strncmp(pool, "draid", 5) == 0 || 1006 strncmp(pool, "spare", 5) == 0 || 1007 strcmp(pool, "log") == 0)) { 1008 if (hdl != NULL) 1009 zfs_error_aux(hdl, 1010 dgettext(TEXT_DOMAIN, "name is reserved")); 1011 return (B_FALSE); 1012 } 1013 1014 1015 if (ret != 0) { 1016 if (hdl != NULL) { 1017 switch (why) { 1018 case NAME_ERR_TOOLONG: 1019 zfs_error_aux(hdl, 1020 dgettext(TEXT_DOMAIN, "name is too long")); 1021 break; 1022 1023 case NAME_ERR_INVALCHAR: 1024 zfs_error_aux(hdl, 1025 dgettext(TEXT_DOMAIN, "invalid character " 1026 "'%c' in pool name"), what); 1027 break; 1028 1029 case NAME_ERR_NOLETTER: 1030 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1031 "name must begin with a letter")); 1032 break; 1033 1034 case NAME_ERR_RESERVED: 1035 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1036 "name is reserved")); 1037 break; 1038 1039 case NAME_ERR_DISKLIKE: 1040 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1041 "pool name is reserved")); 1042 break; 1043 1044 case NAME_ERR_LEADING_SLASH: 1045 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1046 "leading slash in name")); 1047 break; 1048 1049 case NAME_ERR_EMPTY_COMPONENT: 1050 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1051 "empty component in name")); 1052 break; 1053 1054 case NAME_ERR_TRAILING_SLASH: 1055 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1056 "trailing slash in name")); 1057 break; 1058 1059 case NAME_ERR_MULTIPLE_DELIMITERS: 1060 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1061 "multiple '@' and/or '#' delimiters in " 1062 "name")); 1063 break; 1064 1065 case NAME_ERR_NO_AT: 1066 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1067 "permission set is missing '@'")); 1068 break; 1069 1070 default: 1071 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1072 "(%d) not defined"), why); 1073 break; 1074 } 1075 } 1076 return (B_FALSE); 1077 } 1078 1079 return (B_TRUE); 1080 } 1081 1082 /* 1083 * Open a handle to the given pool, even if the pool is currently in the FAULTED 1084 * state. 1085 */ 1086 zpool_handle_t * 1087 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 1088 { 1089 zpool_handle_t *zhp; 1090 boolean_t missing; 1091 1092 /* 1093 * Make sure the pool name is valid. 1094 */ 1095 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 1096 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1097 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1098 pool); 1099 return (NULL); 1100 } 1101 1102 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1103 return (NULL); 1104 1105 zhp->zpool_hdl = hdl; 1106 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1107 1108 if (zpool_refresh_stats(zhp, &missing) != 0) { 1109 zpool_close(zhp); 1110 return (NULL); 1111 } 1112 1113 if (missing) { 1114 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1115 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1116 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1117 zpool_close(zhp); 1118 return (NULL); 1119 } 1120 1121 return (zhp); 1122 } 1123 1124 /* 1125 * Like the above, but silent on error. Used when iterating over pools (because 1126 * the configuration cache may be out of date). 1127 */ 1128 int 1129 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1130 { 1131 zpool_handle_t *zhp; 1132 boolean_t missing; 1133 1134 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1135 return (-1); 1136 1137 zhp->zpool_hdl = hdl; 1138 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1139 1140 if (zpool_refresh_stats(zhp, &missing) != 0) { 1141 zpool_close(zhp); 1142 return (-1); 1143 } 1144 1145 if (missing) { 1146 zpool_close(zhp); 1147 *ret = NULL; 1148 return (0); 1149 } 1150 1151 *ret = zhp; 1152 return (0); 1153 } 1154 1155 /* 1156 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1157 * state. 1158 */ 1159 zpool_handle_t * 1160 zpool_open(libzfs_handle_t *hdl, const char *pool) 1161 { 1162 zpool_handle_t *zhp; 1163 1164 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1165 return (NULL); 1166 1167 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1168 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1169 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1170 zpool_close(zhp); 1171 return (NULL); 1172 } 1173 1174 return (zhp); 1175 } 1176 1177 /* 1178 * Close the handle. Simply frees the memory associated with the handle. 1179 */ 1180 void 1181 zpool_close(zpool_handle_t *zhp) 1182 { 1183 nvlist_free(zhp->zpool_config); 1184 nvlist_free(zhp->zpool_old_config); 1185 nvlist_free(zhp->zpool_props); 1186 free(zhp); 1187 } 1188 1189 /* 1190 * Return the name of the pool. 1191 */ 1192 const char * 1193 zpool_get_name(zpool_handle_t *zhp) 1194 { 1195 return (zhp->zpool_name); 1196 } 1197 1198 1199 /* 1200 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1201 */ 1202 int 1203 zpool_get_state(zpool_handle_t *zhp) 1204 { 1205 return (zhp->zpool_state); 1206 } 1207 1208 /* 1209 * Check if vdev list contains a special vdev 1210 */ 1211 static boolean_t 1212 zpool_has_special_vdev(nvlist_t *nvroot) 1213 { 1214 nvlist_t **child; 1215 uint_t children; 1216 1217 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child, 1218 &children) == 0) { 1219 for (uint_t c = 0; c < children; c++) { 1220 char *bias; 1221 1222 if (nvlist_lookup_string(child[c], 1223 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 && 1224 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) { 1225 return (B_TRUE); 1226 } 1227 } 1228 } 1229 return (B_FALSE); 1230 } 1231 1232 /* 1233 * Check if vdev list contains a dRAID vdev 1234 */ 1235 static boolean_t 1236 zpool_has_draid_vdev(nvlist_t *nvroot) 1237 { 1238 nvlist_t **child; 1239 uint_t children; 1240 1241 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1242 &child, &children) == 0) { 1243 for (uint_t c = 0; c < children; c++) { 1244 char *type; 1245 1246 if (nvlist_lookup_string(child[c], 1247 ZPOOL_CONFIG_TYPE, &type) == 0 && 1248 strcmp(type, VDEV_TYPE_DRAID) == 0) { 1249 return (B_TRUE); 1250 } 1251 } 1252 } 1253 return (B_FALSE); 1254 } 1255 1256 /* 1257 * Output a dRAID top-level vdev name in to the provided buffer. 1258 */ 1259 static char * 1260 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity, 1261 uint64_t spares, uint64_t children) 1262 { 1263 snprintf(name, len, "%s%llu:%llud:%lluc:%llus", 1264 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data, 1265 (u_longlong_t)children, (u_longlong_t)spares); 1266 1267 return (name); 1268 } 1269 1270 /* 1271 * Return B_TRUE if the provided name is a dRAID spare name. 1272 */ 1273 boolean_t 1274 zpool_is_draid_spare(const char *name) 1275 { 1276 uint64_t spare_id, parity, vdev_id; 1277 1278 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu", 1279 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id, 1280 (u_longlong_t *)&spare_id) == 3) { 1281 return (B_TRUE); 1282 } 1283 1284 return (B_FALSE); 1285 } 1286 1287 /* 1288 * Create the named pool, using the provided vdev list. It is assumed 1289 * that the consumer has already validated the contents of the nvlist, so we 1290 * don't have to worry about error semantics. 1291 */ 1292 int 1293 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1294 nvlist_t *props, nvlist_t *fsprops) 1295 { 1296 zfs_cmd_t zc = {"\0"}; 1297 nvlist_t *zc_fsprops = NULL; 1298 nvlist_t *zc_props = NULL; 1299 nvlist_t *hidden_args = NULL; 1300 uint8_t *wkeydata = NULL; 1301 uint_t wkeylen = 0; 1302 char msg[1024]; 1303 int ret = -1; 1304 1305 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1306 "cannot create '%s'"), pool); 1307 1308 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1309 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1310 1311 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1312 return (-1); 1313 1314 if (props) { 1315 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1316 1317 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1318 SPA_VERSION_1, flags, msg)) == NULL) { 1319 goto create_failed; 1320 } 1321 } 1322 1323 if (fsprops) { 1324 uint64_t zoned; 1325 char *zonestr; 1326 1327 zoned = ((nvlist_lookup_string(fsprops, 1328 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1329 strcmp(zonestr, "on") == 0); 1330 1331 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1332 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) { 1333 goto create_failed; 1334 } 1335 1336 if (nvlist_exists(zc_fsprops, 1337 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) && 1338 !zpool_has_special_vdev(nvroot)) { 1339 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1340 "%s property requires a special vdev"), 1341 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)); 1342 (void) zfs_error(hdl, EZFS_BADPROP, msg); 1343 goto create_failed; 1344 } 1345 1346 if (!zc_props && 1347 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1348 goto create_failed; 1349 } 1350 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE, 1351 &wkeydata, &wkeylen) != 0) { 1352 zfs_error(hdl, EZFS_CRYPTOFAILED, msg); 1353 goto create_failed; 1354 } 1355 if (nvlist_add_nvlist(zc_props, 1356 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1357 goto create_failed; 1358 } 1359 if (wkeydata != NULL) { 1360 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0) 1361 goto create_failed; 1362 1363 if (nvlist_add_uint8_array(hidden_args, "wkeydata", 1364 wkeydata, wkeylen) != 0) 1365 goto create_failed; 1366 1367 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS, 1368 hidden_args) != 0) 1369 goto create_failed; 1370 } 1371 } 1372 1373 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1374 goto create_failed; 1375 1376 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1377 1378 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1379 1380 zcmd_free_nvlists(&zc); 1381 nvlist_free(zc_props); 1382 nvlist_free(zc_fsprops); 1383 nvlist_free(hidden_args); 1384 if (wkeydata != NULL) 1385 free(wkeydata); 1386 1387 switch (errno) { 1388 case EBUSY: 1389 /* 1390 * This can happen if the user has specified the same 1391 * device multiple times. We can't reliably detect this 1392 * until we try to add it and see we already have a 1393 * label. This can also happen under if the device is 1394 * part of an active md or lvm device. 1395 */ 1396 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1397 "one or more vdevs refer to the same device, or " 1398 "one of\nthe devices is part of an active md or " 1399 "lvm device")); 1400 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1401 1402 case ERANGE: 1403 /* 1404 * This happens if the record size is smaller or larger 1405 * than the allowed size range, or not a power of 2. 1406 * 1407 * NOTE: although zfs_valid_proplist is called earlier, 1408 * this case may have slipped through since the 1409 * pool does not exist yet and it is therefore 1410 * impossible to read properties e.g. max blocksize 1411 * from the pool. 1412 */ 1413 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1414 "record size invalid")); 1415 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1416 1417 case EOVERFLOW: 1418 /* 1419 * This occurs when one of the devices is below 1420 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1421 * device was the problem device since there's no 1422 * reliable way to determine device size from userland. 1423 */ 1424 { 1425 char buf[64]; 1426 1427 zfs_nicebytes(SPA_MINDEVSIZE, buf, 1428 sizeof (buf)); 1429 1430 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1431 "one or more devices is less than the " 1432 "minimum size (%s)"), buf); 1433 } 1434 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1435 1436 case ENOSPC: 1437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1438 "one or more devices is out of space")); 1439 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1440 1441 case EINVAL: 1442 if (zpool_has_draid_vdev(nvroot) && 1443 zfeature_lookup_name("draid", NULL) != 0) { 1444 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1445 "dRAID vdevs are unsupported by the " 1446 "kernel")); 1447 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1448 } else { 1449 return (zpool_standard_error(hdl, errno, msg)); 1450 } 1451 1452 default: 1453 return (zpool_standard_error(hdl, errno, msg)); 1454 } 1455 } 1456 1457 create_failed: 1458 zcmd_free_nvlists(&zc); 1459 nvlist_free(zc_props); 1460 nvlist_free(zc_fsprops); 1461 nvlist_free(hidden_args); 1462 if (wkeydata != NULL) 1463 free(wkeydata); 1464 return (ret); 1465 } 1466 1467 /* 1468 * Destroy the given pool. It is up to the caller to ensure that there are no 1469 * datasets left in the pool. 1470 */ 1471 int 1472 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1473 { 1474 zfs_cmd_t zc = {"\0"}; 1475 zfs_handle_t *zfp = NULL; 1476 libzfs_handle_t *hdl = zhp->zpool_hdl; 1477 char msg[1024]; 1478 1479 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1480 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1481 return (-1); 1482 1483 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1484 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1485 1486 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1487 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1488 "cannot destroy '%s'"), zhp->zpool_name); 1489 1490 if (errno == EROFS) { 1491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1492 "one or more devices is read only")); 1493 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1494 } else { 1495 (void) zpool_standard_error(hdl, errno, msg); 1496 } 1497 1498 if (zfp) 1499 zfs_close(zfp); 1500 return (-1); 1501 } 1502 1503 if (zfp) { 1504 remove_mountpoint(zfp); 1505 zfs_close(zfp); 1506 } 1507 1508 return (0); 1509 } 1510 1511 /* 1512 * Create a checkpoint in the given pool. 1513 */ 1514 int 1515 zpool_checkpoint(zpool_handle_t *zhp) 1516 { 1517 libzfs_handle_t *hdl = zhp->zpool_hdl; 1518 char msg[1024]; 1519 int error; 1520 1521 error = lzc_pool_checkpoint(zhp->zpool_name); 1522 if (error != 0) { 1523 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1524 "cannot checkpoint '%s'"), zhp->zpool_name); 1525 (void) zpool_standard_error(hdl, error, msg); 1526 return (-1); 1527 } 1528 1529 return (0); 1530 } 1531 1532 /* 1533 * Discard the checkpoint from the given pool. 1534 */ 1535 int 1536 zpool_discard_checkpoint(zpool_handle_t *zhp) 1537 { 1538 libzfs_handle_t *hdl = zhp->zpool_hdl; 1539 char msg[1024]; 1540 int error; 1541 1542 error = lzc_pool_checkpoint_discard(zhp->zpool_name); 1543 if (error != 0) { 1544 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1545 "cannot discard checkpoint in '%s'"), zhp->zpool_name); 1546 (void) zpool_standard_error(hdl, error, msg); 1547 return (-1); 1548 } 1549 1550 return (0); 1551 } 1552 1553 /* 1554 * Add the given vdevs to the pool. The caller must have already performed the 1555 * necessary verification to ensure that the vdev specification is well-formed. 1556 */ 1557 int 1558 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1559 { 1560 zfs_cmd_t zc = {"\0"}; 1561 int ret; 1562 libzfs_handle_t *hdl = zhp->zpool_hdl; 1563 char msg[1024]; 1564 nvlist_t **spares, **l2cache; 1565 uint_t nspares, nl2cache; 1566 1567 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1568 "cannot add to '%s'"), zhp->zpool_name); 1569 1570 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1571 SPA_VERSION_SPARES && 1572 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1573 &spares, &nspares) == 0) { 1574 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1575 "upgraded to add hot spares")); 1576 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1577 } 1578 1579 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1580 SPA_VERSION_L2CACHE && 1581 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1582 &l2cache, &nl2cache) == 0) { 1583 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1584 "upgraded to add cache devices")); 1585 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1586 } 1587 1588 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1589 return (-1); 1590 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1591 1592 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1593 switch (errno) { 1594 case EBUSY: 1595 /* 1596 * This can happen if the user has specified the same 1597 * device multiple times. We can't reliably detect this 1598 * until we try to add it and see we already have a 1599 * label. 1600 */ 1601 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1602 "one or more vdevs refer to the same device")); 1603 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1604 break; 1605 1606 case EINVAL: 1607 1608 if (zpool_has_draid_vdev(nvroot) && 1609 zfeature_lookup_name("draid", NULL) != 0) { 1610 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1611 "dRAID vdevs are unsupported by the " 1612 "kernel")); 1613 } else { 1614 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1615 "invalid config; a pool with removing/" 1616 "removed vdevs does not support adding " 1617 "raidz or dRAID vdevs")); 1618 } 1619 1620 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1621 break; 1622 1623 case EOVERFLOW: 1624 /* 1625 * This occurs when one of the devices is below 1626 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1627 * device was the problem device since there's no 1628 * reliable way to determine device size from userland. 1629 */ 1630 { 1631 char buf[64]; 1632 1633 zfs_nicebytes(SPA_MINDEVSIZE, buf, 1634 sizeof (buf)); 1635 1636 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1637 "device is less than the minimum " 1638 "size (%s)"), buf); 1639 } 1640 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1641 break; 1642 1643 case ENOTSUP: 1644 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1645 "pool must be upgraded to add these vdevs")); 1646 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1647 break; 1648 1649 default: 1650 (void) zpool_standard_error(hdl, errno, msg); 1651 } 1652 1653 ret = -1; 1654 } else { 1655 ret = 0; 1656 } 1657 1658 zcmd_free_nvlists(&zc); 1659 1660 return (ret); 1661 } 1662 1663 /* 1664 * Exports the pool from the system. The caller must ensure that there are no 1665 * mounted datasets in the pool. 1666 */ 1667 static int 1668 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1669 const char *log_str) 1670 { 1671 zfs_cmd_t zc = {"\0"}; 1672 char msg[1024]; 1673 1674 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1675 "cannot export '%s'"), zhp->zpool_name); 1676 1677 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1678 zc.zc_cookie = force; 1679 zc.zc_guid = hardforce; 1680 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1681 1682 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1683 switch (errno) { 1684 case EXDEV: 1685 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1686 "use '-f' to override the following errors:\n" 1687 "'%s' has an active shared spare which could be" 1688 " used by other pools once '%s' is exported."), 1689 zhp->zpool_name, zhp->zpool_name); 1690 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1691 msg)); 1692 default: 1693 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1694 msg)); 1695 } 1696 } 1697 1698 return (0); 1699 } 1700 1701 int 1702 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1703 { 1704 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1705 } 1706 1707 int 1708 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1709 { 1710 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1711 } 1712 1713 static void 1714 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1715 nvlist_t *config) 1716 { 1717 nvlist_t *nv = NULL; 1718 uint64_t rewindto; 1719 int64_t loss = -1; 1720 struct tm t; 1721 char timestr[128]; 1722 1723 if (!hdl->libzfs_printerr || config == NULL) 1724 return; 1725 1726 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1727 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1728 return; 1729 } 1730 1731 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1732 return; 1733 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1734 1735 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1736 strftime(timestr, 128, "%c", &t) != 0) { 1737 if (dryrun) { 1738 (void) printf(dgettext(TEXT_DOMAIN, 1739 "Would be able to return %s " 1740 "to its state as of %s.\n"), 1741 name, timestr); 1742 } else { 1743 (void) printf(dgettext(TEXT_DOMAIN, 1744 "Pool %s returned to its state as of %s.\n"), 1745 name, timestr); 1746 } 1747 if (loss > 120) { 1748 (void) printf(dgettext(TEXT_DOMAIN, 1749 "%s approximately %lld "), 1750 dryrun ? "Would discard" : "Discarded", 1751 ((longlong_t)loss + 30) / 60); 1752 (void) printf(dgettext(TEXT_DOMAIN, 1753 "minutes of transactions.\n")); 1754 } else if (loss > 0) { 1755 (void) printf(dgettext(TEXT_DOMAIN, 1756 "%s approximately %lld "), 1757 dryrun ? "Would discard" : "Discarded", 1758 (longlong_t)loss); 1759 (void) printf(dgettext(TEXT_DOMAIN, 1760 "seconds of transactions.\n")); 1761 } 1762 } 1763 } 1764 1765 void 1766 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1767 nvlist_t *config) 1768 { 1769 nvlist_t *nv = NULL; 1770 int64_t loss = -1; 1771 uint64_t edata = UINT64_MAX; 1772 uint64_t rewindto; 1773 struct tm t; 1774 char timestr[128]; 1775 1776 if (!hdl->libzfs_printerr) 1777 return; 1778 1779 if (reason >= 0) 1780 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1781 else 1782 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1783 1784 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1785 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1786 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1787 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1788 goto no_info; 1789 1790 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1791 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1792 &edata); 1793 1794 (void) printf(dgettext(TEXT_DOMAIN, 1795 "Recovery is possible, but will result in some data loss.\n")); 1796 1797 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1798 strftime(timestr, 128, "%c", &t) != 0) { 1799 (void) printf(dgettext(TEXT_DOMAIN, 1800 "\tReturning the pool to its state as of %s\n" 1801 "\tshould correct the problem. "), 1802 timestr); 1803 } else { 1804 (void) printf(dgettext(TEXT_DOMAIN, 1805 "\tReverting the pool to an earlier state " 1806 "should correct the problem.\n\t")); 1807 } 1808 1809 if (loss > 120) { 1810 (void) printf(dgettext(TEXT_DOMAIN, 1811 "Approximately %lld minutes of data\n" 1812 "\tmust be discarded, irreversibly. "), 1813 ((longlong_t)loss + 30) / 60); 1814 } else if (loss > 0) { 1815 (void) printf(dgettext(TEXT_DOMAIN, 1816 "Approximately %lld seconds of data\n" 1817 "\tmust be discarded, irreversibly. "), 1818 (longlong_t)loss); 1819 } 1820 if (edata != 0 && edata != UINT64_MAX) { 1821 if (edata == 1) { 1822 (void) printf(dgettext(TEXT_DOMAIN, 1823 "After rewind, at least\n" 1824 "\tone persistent user-data error will remain. ")); 1825 } else { 1826 (void) printf(dgettext(TEXT_DOMAIN, 1827 "After rewind, several\n" 1828 "\tpersistent user-data errors will remain. ")); 1829 } 1830 } 1831 (void) printf(dgettext(TEXT_DOMAIN, 1832 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1833 reason >= 0 ? "clear" : "import", name); 1834 1835 (void) printf(dgettext(TEXT_DOMAIN, 1836 "A scrub of the pool\n" 1837 "\tis strongly recommended after recovery.\n")); 1838 return; 1839 1840 no_info: 1841 (void) printf(dgettext(TEXT_DOMAIN, 1842 "Destroy and re-create the pool from\n\ta backup source.\n")); 1843 } 1844 1845 /* 1846 * zpool_import() is a contracted interface. Should be kept the same 1847 * if possible. 1848 * 1849 * Applications should use zpool_import_props() to import a pool with 1850 * new properties value to be set. 1851 */ 1852 int 1853 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1854 char *altroot) 1855 { 1856 nvlist_t *props = NULL; 1857 int ret; 1858 1859 if (altroot != NULL) { 1860 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1861 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1862 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1863 newname)); 1864 } 1865 1866 if (nvlist_add_string(props, 1867 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1868 nvlist_add_string(props, 1869 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1870 nvlist_free(props); 1871 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1872 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1873 newname)); 1874 } 1875 } 1876 1877 ret = zpool_import_props(hdl, config, newname, props, 1878 ZFS_IMPORT_NORMAL); 1879 nvlist_free(props); 1880 return (ret); 1881 } 1882 1883 static void 1884 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1885 int indent) 1886 { 1887 nvlist_t **child; 1888 uint_t c, children; 1889 char *vname; 1890 uint64_t is_log = 0; 1891 1892 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1893 &is_log); 1894 1895 if (name != NULL) 1896 (void) printf("\t%*s%s%s\n", indent, "", name, 1897 is_log ? " [log]" : ""); 1898 1899 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1900 &child, &children) != 0) 1901 return; 1902 1903 for (c = 0; c < children; c++) { 1904 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID); 1905 print_vdev_tree(hdl, vname, child[c], indent + 2); 1906 free(vname); 1907 } 1908 } 1909 1910 void 1911 zpool_print_unsup_feat(nvlist_t *config) 1912 { 1913 nvlist_t *nvinfo, *unsup_feat; 1914 nvpair_t *nvp; 1915 1916 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1917 0); 1918 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1919 &unsup_feat) == 0); 1920 1921 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1922 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1923 char *desc; 1924 1925 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1926 verify(nvpair_value_string(nvp, &desc) == 0); 1927 1928 if (strlen(desc) > 0) 1929 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1930 else 1931 (void) printf("\t%s\n", nvpair_name(nvp)); 1932 } 1933 } 1934 1935 /* 1936 * Import the given pool using the known configuration and a list of 1937 * properties to be set. The configuration should have come from 1938 * zpool_find_import(). The 'newname' parameters control whether the pool 1939 * is imported with a different name. 1940 */ 1941 int 1942 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1943 nvlist_t *props, int flags) 1944 { 1945 zfs_cmd_t zc = {"\0"}; 1946 zpool_load_policy_t policy; 1947 nvlist_t *nv = NULL; 1948 nvlist_t *nvinfo = NULL; 1949 nvlist_t *missing = NULL; 1950 char *thename; 1951 char *origname; 1952 int ret; 1953 int error = 0; 1954 char errbuf[1024]; 1955 1956 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1957 &origname) == 0); 1958 1959 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1960 "cannot import pool '%s'"), origname); 1961 1962 if (newname != NULL) { 1963 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1964 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1965 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1966 newname)); 1967 thename = (char *)newname; 1968 } else { 1969 thename = origname; 1970 } 1971 1972 if (props != NULL) { 1973 uint64_t version; 1974 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1975 1976 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1977 &version) == 0); 1978 1979 if ((props = zpool_valid_proplist(hdl, origname, 1980 props, version, flags, errbuf)) == NULL) 1981 return (-1); 1982 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1983 nvlist_free(props); 1984 return (-1); 1985 } 1986 nvlist_free(props); 1987 } 1988 1989 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1990 1991 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1992 &zc.zc_guid) == 0); 1993 1994 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1995 zcmd_free_nvlists(&zc); 1996 return (-1); 1997 } 1998 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1999 zcmd_free_nvlists(&zc); 2000 return (-1); 2001 } 2002 2003 zc.zc_cookie = flags; 2004 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 2005 errno == ENOMEM) { 2006 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 2007 zcmd_free_nvlists(&zc); 2008 return (-1); 2009 } 2010 } 2011 if (ret != 0) 2012 error = errno; 2013 2014 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 2015 2016 zcmd_free_nvlists(&zc); 2017 2018 zpool_get_load_policy(config, &policy); 2019 2020 if (error) { 2021 char desc[1024]; 2022 char aux[256]; 2023 2024 /* 2025 * Dry-run failed, but we print out what success 2026 * looks like if we found a best txg 2027 */ 2028 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) { 2029 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2030 B_TRUE, nv); 2031 nvlist_free(nv); 2032 return (-1); 2033 } 2034 2035 if (newname == NULL) 2036 (void) snprintf(desc, sizeof (desc), 2037 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 2038 thename); 2039 else 2040 (void) snprintf(desc, sizeof (desc), 2041 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 2042 origname, thename); 2043 2044 switch (error) { 2045 case ENOTSUP: 2046 if (nv != NULL && nvlist_lookup_nvlist(nv, 2047 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 2048 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 2049 (void) printf(dgettext(TEXT_DOMAIN, "This " 2050 "pool uses the following feature(s) not " 2051 "supported by this system:\n")); 2052 zpool_print_unsup_feat(nv); 2053 if (nvlist_exists(nvinfo, 2054 ZPOOL_CONFIG_CAN_RDONLY)) { 2055 (void) printf(dgettext(TEXT_DOMAIN, 2056 "All unsupported features are only " 2057 "required for writing to the pool." 2058 "\nThe pool can be imported using " 2059 "'-o readonly=on'.\n")); 2060 } 2061 } 2062 /* 2063 * Unsupported version. 2064 */ 2065 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 2066 break; 2067 2068 case EREMOTEIO: 2069 if (nv != NULL && nvlist_lookup_nvlist(nv, 2070 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) { 2071 char *hostname = "<unknown>"; 2072 uint64_t hostid = 0; 2073 mmp_state_t mmp_state; 2074 2075 mmp_state = fnvlist_lookup_uint64(nvinfo, 2076 ZPOOL_CONFIG_MMP_STATE); 2077 2078 if (nvlist_exists(nvinfo, 2079 ZPOOL_CONFIG_MMP_HOSTNAME)) 2080 hostname = fnvlist_lookup_string(nvinfo, 2081 ZPOOL_CONFIG_MMP_HOSTNAME); 2082 2083 if (nvlist_exists(nvinfo, 2084 ZPOOL_CONFIG_MMP_HOSTID)) 2085 hostid = fnvlist_lookup_uint64(nvinfo, 2086 ZPOOL_CONFIG_MMP_HOSTID); 2087 2088 if (mmp_state == MMP_STATE_ACTIVE) { 2089 (void) snprintf(aux, sizeof (aux), 2090 dgettext(TEXT_DOMAIN, "pool is imp" 2091 "orted on host '%s' (hostid=%lx).\n" 2092 "Export the pool on the other " 2093 "system, then run 'zpool import'."), 2094 hostname, (unsigned long) hostid); 2095 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 2096 (void) snprintf(aux, sizeof (aux), 2097 dgettext(TEXT_DOMAIN, "pool has " 2098 "the multihost property on and " 2099 "the\nsystem's hostid is not set. " 2100 "Set a unique system hostid with " 2101 "the zgenhostid(8) command.\n")); 2102 } 2103 2104 (void) zfs_error_aux(hdl, aux); 2105 } 2106 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc); 2107 break; 2108 2109 case EINVAL: 2110 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 2111 break; 2112 2113 case EROFS: 2114 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2115 "one or more devices is read only")); 2116 (void) zfs_error(hdl, EZFS_BADDEV, desc); 2117 break; 2118 2119 case ENXIO: 2120 if (nv && nvlist_lookup_nvlist(nv, 2121 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 2122 nvlist_lookup_nvlist(nvinfo, 2123 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 2124 (void) printf(dgettext(TEXT_DOMAIN, 2125 "The devices below are missing or " 2126 "corrupted, use '-m' to import the pool " 2127 "anyway:\n")); 2128 print_vdev_tree(hdl, NULL, missing, 2); 2129 (void) printf("\n"); 2130 } 2131 (void) zpool_standard_error(hdl, error, desc); 2132 break; 2133 2134 case EEXIST: 2135 (void) zpool_standard_error(hdl, error, desc); 2136 break; 2137 2138 case EBUSY: 2139 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2140 "one or more devices are already in use\n")); 2141 (void) zfs_error(hdl, EZFS_BADDEV, desc); 2142 break; 2143 case ENAMETOOLONG: 2144 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2145 "new name of at least one dataset is longer than " 2146 "the maximum allowable length")); 2147 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 2148 break; 2149 default: 2150 (void) zpool_standard_error(hdl, error, desc); 2151 zpool_explain_recover(hdl, 2152 newname ? origname : thename, -error, nv); 2153 break; 2154 } 2155 2156 nvlist_free(nv); 2157 ret = -1; 2158 } else { 2159 zpool_handle_t *zhp; 2160 2161 /* 2162 * This should never fail, but play it safe anyway. 2163 */ 2164 if (zpool_open_silent(hdl, thename, &zhp) != 0) 2165 ret = -1; 2166 else if (zhp != NULL) 2167 zpool_close(zhp); 2168 if (policy.zlp_rewind & 2169 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2170 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2171 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv); 2172 } 2173 nvlist_free(nv); 2174 return (0); 2175 } 2176 2177 return (ret); 2178 } 2179 2180 /* 2181 * Translate vdev names to guids. If a vdev_path is determined to be 2182 * unsuitable then a vd_errlist is allocated and the vdev path and errno 2183 * are added to it. 2184 */ 2185 static int 2186 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds, 2187 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist) 2188 { 2189 nvlist_t *errlist = NULL; 2190 int error = 0; 2191 2192 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL; 2193 elem = nvlist_next_nvpair(vds, elem)) { 2194 boolean_t spare, cache; 2195 2196 char *vd_path = nvpair_name(elem); 2197 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache, 2198 NULL); 2199 2200 if ((tgt == NULL) || cache || spare) { 2201 if (errlist == NULL) { 2202 errlist = fnvlist_alloc(); 2203 error = EINVAL; 2204 } 2205 2206 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE : 2207 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE); 2208 fnvlist_add_int64(errlist, vd_path, err); 2209 continue; 2210 } 2211 2212 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 2213 fnvlist_add_uint64(vdev_guids, vd_path, guid); 2214 2215 char msg[MAXNAMELEN]; 2216 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid); 2217 fnvlist_add_string(guids_to_paths, msg, vd_path); 2218 } 2219 2220 if (error != 0) { 2221 verify(errlist != NULL); 2222 if (vd_errlist != NULL) 2223 *vd_errlist = errlist; 2224 else 2225 fnvlist_free(errlist); 2226 } 2227 2228 return (error); 2229 } 2230 2231 static int 2232 xlate_init_err(int err) 2233 { 2234 switch (err) { 2235 case ENODEV: 2236 return (EZFS_NODEVICE); 2237 case EINVAL: 2238 case EROFS: 2239 return (EZFS_BADDEV); 2240 case EBUSY: 2241 return (EZFS_INITIALIZING); 2242 case ESRCH: 2243 return (EZFS_NO_INITIALIZE); 2244 } 2245 return (err); 2246 } 2247 2248 /* 2249 * Begin, suspend, or cancel the initialization (initializing of all free 2250 * blocks) for the given vdevs in the given pool. 2251 */ 2252 static int 2253 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2254 nvlist_t *vds, boolean_t wait) 2255 { 2256 int err; 2257 2258 nvlist_t *vdev_guids = fnvlist_alloc(); 2259 nvlist_t *guids_to_paths = fnvlist_alloc(); 2260 nvlist_t *vd_errlist = NULL; 2261 nvlist_t *errlist; 2262 nvpair_t *elem; 2263 2264 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2265 guids_to_paths, &vd_errlist); 2266 2267 if (err != 0) { 2268 verify(vd_errlist != NULL); 2269 goto list_errors; 2270 } 2271 2272 err = lzc_initialize(zhp->zpool_name, cmd_type, 2273 vdev_guids, &errlist); 2274 2275 if (err != 0) { 2276 if (errlist != NULL) { 2277 vd_errlist = fnvlist_lookup_nvlist(errlist, 2278 ZPOOL_INITIALIZE_VDEVS); 2279 goto list_errors; 2280 } 2281 (void) zpool_standard_error(zhp->zpool_hdl, err, 2282 dgettext(TEXT_DOMAIN, "operation failed")); 2283 goto out; 2284 } 2285 2286 if (wait) { 2287 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL; 2288 elem = nvlist_next_nvpair(vdev_guids, elem)) { 2289 2290 uint64_t guid = fnvpair_value_uint64(elem); 2291 2292 err = lzc_wait_tag(zhp->zpool_name, 2293 ZPOOL_WAIT_INITIALIZE, guid, NULL); 2294 if (err != 0) { 2295 (void) zpool_standard_error_fmt(zhp->zpool_hdl, 2296 err, dgettext(TEXT_DOMAIN, "error " 2297 "waiting for '%s' to initialize"), 2298 nvpair_name(elem)); 2299 2300 goto out; 2301 } 2302 } 2303 } 2304 goto out; 2305 2306 list_errors: 2307 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL; 2308 elem = nvlist_next_nvpair(vd_errlist, elem)) { 2309 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem)); 2310 char *path; 2311 2312 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2313 &path) != 0) 2314 path = nvpair_name(elem); 2315 2316 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2317 "cannot initialize '%s'", path); 2318 } 2319 2320 out: 2321 fnvlist_free(vdev_guids); 2322 fnvlist_free(guids_to_paths); 2323 2324 if (vd_errlist != NULL) 2325 fnvlist_free(vd_errlist); 2326 2327 return (err == 0 ? 0 : -1); 2328 } 2329 2330 int 2331 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2332 nvlist_t *vds) 2333 { 2334 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE)); 2335 } 2336 2337 int 2338 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2339 nvlist_t *vds) 2340 { 2341 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE)); 2342 } 2343 2344 static int 2345 xlate_trim_err(int err) 2346 { 2347 switch (err) { 2348 case ENODEV: 2349 return (EZFS_NODEVICE); 2350 case EINVAL: 2351 case EROFS: 2352 return (EZFS_BADDEV); 2353 case EBUSY: 2354 return (EZFS_TRIMMING); 2355 case ESRCH: 2356 return (EZFS_NO_TRIM); 2357 case EOPNOTSUPP: 2358 return (EZFS_TRIM_NOTSUP); 2359 } 2360 return (err); 2361 } 2362 2363 static int 2364 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids) 2365 { 2366 int err; 2367 nvpair_t *elem; 2368 2369 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL; 2370 elem = nvlist_next_nvpair(vdev_guids, elem)) { 2371 2372 uint64_t guid = fnvpair_value_uint64(elem); 2373 2374 err = lzc_wait_tag(zhp->zpool_name, 2375 ZPOOL_WAIT_TRIM, guid, NULL); 2376 if (err != 0) { 2377 (void) zpool_standard_error_fmt(zhp->zpool_hdl, 2378 err, dgettext(TEXT_DOMAIN, "error " 2379 "waiting to trim '%s'"), nvpair_name(elem)); 2380 2381 return (err); 2382 } 2383 } 2384 return (0); 2385 } 2386 2387 /* 2388 * Check errlist and report any errors, omitting ones which should be 2389 * suppressed. Returns B_TRUE if any errors were reported. 2390 */ 2391 static boolean_t 2392 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags, 2393 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist) 2394 { 2395 nvpair_t *elem; 2396 boolean_t reported_errs = B_FALSE; 2397 int num_vds = 0; 2398 int num_suppressed_errs = 0; 2399 2400 for (elem = nvlist_next_nvpair(vds, NULL); 2401 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) { 2402 num_vds++; 2403 } 2404 2405 for (elem = nvlist_next_nvpair(errlist, NULL); 2406 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) { 2407 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem)); 2408 char *path; 2409 2410 /* 2411 * If only the pool was specified, and it was not a secure 2412 * trim then suppress warnings for individual vdevs which 2413 * do not support trimming. 2414 */ 2415 if (vd_error == EZFS_TRIM_NOTSUP && 2416 trim_flags->fullpool && 2417 !trim_flags->secure) { 2418 num_suppressed_errs++; 2419 continue; 2420 } 2421 2422 reported_errs = B_TRUE; 2423 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2424 &path) != 0) 2425 path = nvpair_name(elem); 2426 2427 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2428 "cannot trim '%s'", path); 2429 } 2430 2431 if (num_suppressed_errs == num_vds) { 2432 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 2433 "no devices in pool support trim operations")); 2434 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP, 2435 dgettext(TEXT_DOMAIN, "cannot trim"))); 2436 reported_errs = B_TRUE; 2437 } 2438 2439 return (reported_errs); 2440 } 2441 2442 /* 2443 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for 2444 * the given vdevs in the given pool. 2445 */ 2446 int 2447 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds, 2448 trimflags_t *trim_flags) 2449 { 2450 int err; 2451 int retval = 0; 2452 2453 nvlist_t *vdev_guids = fnvlist_alloc(); 2454 nvlist_t *guids_to_paths = fnvlist_alloc(); 2455 nvlist_t *errlist = NULL; 2456 2457 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2458 guids_to_paths, &errlist); 2459 if (err != 0) { 2460 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist); 2461 retval = -1; 2462 goto out; 2463 } 2464 2465 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate, 2466 trim_flags->secure, vdev_guids, &errlist); 2467 if (err != 0) { 2468 nvlist_t *vd_errlist; 2469 if (errlist != NULL && nvlist_lookup_nvlist(errlist, 2470 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) { 2471 if (check_trim_errs(zhp, trim_flags, guids_to_paths, 2472 vds, vd_errlist)) { 2473 retval = -1; 2474 goto out; 2475 } 2476 } else { 2477 char msg[1024]; 2478 2479 (void) snprintf(msg, sizeof (msg), 2480 dgettext(TEXT_DOMAIN, "operation failed")); 2481 zpool_standard_error(zhp->zpool_hdl, err, msg); 2482 retval = -1; 2483 goto out; 2484 } 2485 } 2486 2487 2488 if (trim_flags->wait) 2489 retval = zpool_trim_wait(zhp, vdev_guids); 2490 2491 out: 2492 if (errlist != NULL) 2493 fnvlist_free(errlist); 2494 fnvlist_free(vdev_guids); 2495 fnvlist_free(guids_to_paths); 2496 return (retval); 2497 } 2498 2499 /* 2500 * Scan the pool. 2501 */ 2502 int 2503 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) 2504 { 2505 zfs_cmd_t zc = {"\0"}; 2506 char msg[1024]; 2507 int err; 2508 libzfs_handle_t *hdl = zhp->zpool_hdl; 2509 2510 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2511 zc.zc_cookie = func; 2512 zc.zc_flags = cmd; 2513 2514 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0) 2515 return (0); 2516 2517 err = errno; 2518 2519 /* ECANCELED on a scrub means we resumed a paused scrub */ 2520 if (err == ECANCELED && func == POOL_SCAN_SCRUB && 2521 cmd == POOL_SCRUB_NORMAL) 2522 return (0); 2523 2524 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL) 2525 return (0); 2526 2527 if (func == POOL_SCAN_SCRUB) { 2528 if (cmd == POOL_SCRUB_PAUSE) { 2529 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2530 "cannot pause scrubbing %s"), zc.zc_name); 2531 } else { 2532 assert(cmd == POOL_SCRUB_NORMAL); 2533 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2534 "cannot scrub %s"), zc.zc_name); 2535 } 2536 } else if (func == POOL_SCAN_RESILVER) { 2537 assert(cmd == POOL_SCRUB_NORMAL); 2538 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2539 "cannot restart resilver on %s"), zc.zc_name); 2540 } else if (func == POOL_SCAN_NONE) { 2541 (void) snprintf(msg, sizeof (msg), 2542 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 2543 zc.zc_name); 2544 } else { 2545 assert(!"unexpected result"); 2546 } 2547 2548 if (err == EBUSY) { 2549 nvlist_t *nvroot; 2550 pool_scan_stat_t *ps = NULL; 2551 uint_t psc; 2552 2553 verify(nvlist_lookup_nvlist(zhp->zpool_config, 2554 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 2555 (void) nvlist_lookup_uint64_array(nvroot, 2556 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 2557 if (ps && ps->pss_func == POOL_SCAN_SCRUB && 2558 ps->pss_state == DSS_SCANNING) { 2559 if (cmd == POOL_SCRUB_PAUSE) 2560 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg)); 2561 else 2562 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 2563 } else { 2564 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 2565 } 2566 } else if (err == ENOENT) { 2567 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 2568 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) { 2569 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg)); 2570 } else { 2571 return (zpool_standard_error(hdl, err, msg)); 2572 } 2573 } 2574 2575 /* 2576 * Find a vdev that matches the search criteria specified. We use the 2577 * the nvpair name to determine how we should look for the device. 2578 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 2579 * spare; but FALSE if its an INUSE spare. 2580 */ 2581 static nvlist_t * 2582 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 2583 boolean_t *l2cache, boolean_t *log) 2584 { 2585 uint_t c, children; 2586 nvlist_t **child; 2587 nvlist_t *ret; 2588 uint64_t is_log; 2589 char *srchkey; 2590 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 2591 2592 /* Nothing to look for */ 2593 if (search == NULL || pair == NULL) 2594 return (NULL); 2595 2596 /* Obtain the key we will use to search */ 2597 srchkey = nvpair_name(pair); 2598 2599 switch (nvpair_type(pair)) { 2600 case DATA_TYPE_UINT64: 2601 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 2602 uint64_t srchval, theguid; 2603 2604 verify(nvpair_value_uint64(pair, &srchval) == 0); 2605 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2606 &theguid) == 0); 2607 if (theguid == srchval) 2608 return (nv); 2609 } 2610 break; 2611 2612 case DATA_TYPE_STRING: { 2613 char *srchval, *val; 2614 2615 verify(nvpair_value_string(pair, &srchval) == 0); 2616 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 2617 break; 2618 2619 /* 2620 * Search for the requested value. Special cases: 2621 * 2622 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 2623 * "-part1", or "p1". The suffix is hidden from the user, 2624 * but included in the string, so this matches around it. 2625 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname() 2626 * is used to check all possible expanded paths. 2627 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2628 * 2629 * Otherwise, all other searches are simple string compares. 2630 */ 2631 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) { 2632 uint64_t wholedisk = 0; 2633 2634 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2635 &wholedisk); 2636 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0) 2637 return (nv); 2638 2639 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2640 char *type, *idx, *end, *p; 2641 uint64_t id, vdev_id; 2642 2643 /* 2644 * Determine our vdev type, keeping in mind 2645 * that the srchval is composed of a type and 2646 * vdev id pair (i.e. mirror-4). 2647 */ 2648 if ((type = strdup(srchval)) == NULL) 2649 return (NULL); 2650 2651 if ((p = strrchr(type, '-')) == NULL) { 2652 free(type); 2653 break; 2654 } 2655 idx = p + 1; 2656 *p = '\0'; 2657 2658 /* 2659 * If the types don't match then keep looking. 2660 */ 2661 if (strncmp(val, type, strlen(val)) != 0) { 2662 free(type); 2663 break; 2664 } 2665 2666 verify(zpool_vdev_is_interior(type)); 2667 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2668 &id) == 0); 2669 2670 errno = 0; 2671 vdev_id = strtoull(idx, &end, 10); 2672 2673 free(type); 2674 if (errno != 0) 2675 return (NULL); 2676 2677 /* 2678 * Now verify that we have the correct vdev id. 2679 */ 2680 if (vdev_id == id) 2681 return (nv); 2682 } 2683 2684 /* 2685 * Common case 2686 */ 2687 if (strcmp(srchval, val) == 0) 2688 return (nv); 2689 break; 2690 } 2691 2692 default: 2693 break; 2694 } 2695 2696 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2697 &child, &children) != 0) 2698 return (NULL); 2699 2700 for (c = 0; c < children; c++) { 2701 if ((ret = vdev_to_nvlist_iter(child[c], search, 2702 avail_spare, l2cache, NULL)) != NULL) { 2703 /* 2704 * The 'is_log' value is only set for the toplevel 2705 * vdev, not the leaf vdevs. So we always lookup the 2706 * log device from the root of the vdev tree (where 2707 * 'log' is non-NULL). 2708 */ 2709 if (log != NULL && 2710 nvlist_lookup_uint64(child[c], 2711 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2712 is_log) { 2713 *log = B_TRUE; 2714 } 2715 return (ret); 2716 } 2717 } 2718 2719 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2720 &child, &children) == 0) { 2721 for (c = 0; c < children; c++) { 2722 if ((ret = vdev_to_nvlist_iter(child[c], search, 2723 avail_spare, l2cache, NULL)) != NULL) { 2724 *avail_spare = B_TRUE; 2725 return (ret); 2726 } 2727 } 2728 } 2729 2730 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2731 &child, &children) == 0) { 2732 for (c = 0; c < children; c++) { 2733 if ((ret = vdev_to_nvlist_iter(child[c], search, 2734 avail_spare, l2cache, NULL)) != NULL) { 2735 *l2cache = B_TRUE; 2736 return (ret); 2737 } 2738 } 2739 } 2740 2741 return (NULL); 2742 } 2743 2744 /* 2745 * Given a physical path or guid, find the associated vdev. 2746 */ 2747 nvlist_t * 2748 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2749 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2750 { 2751 nvlist_t *search, *nvroot, *ret; 2752 uint64_t guid; 2753 char *end; 2754 2755 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2756 2757 guid = strtoull(ppath, &end, 0); 2758 if (guid != 0 && *end == '\0') { 2759 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2760 } else { 2761 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, 2762 ppath) == 0); 2763 } 2764 2765 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2766 &nvroot) == 0); 2767 2768 *avail_spare = B_FALSE; 2769 *l2cache = B_FALSE; 2770 if (log != NULL) 2771 *log = B_FALSE; 2772 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2773 nvlist_free(search); 2774 2775 return (ret); 2776 } 2777 2778 /* 2779 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2780 */ 2781 static boolean_t 2782 zpool_vdev_is_interior(const char *name) 2783 { 2784 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2785 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || 2786 strncmp(name, 2787 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || 2788 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2789 return (B_TRUE); 2790 2791 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 && 2792 !zpool_is_draid_spare(name)) 2793 return (B_TRUE); 2794 2795 return (B_FALSE); 2796 } 2797 2798 nvlist_t * 2799 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2800 boolean_t *l2cache, boolean_t *log) 2801 { 2802 char *end; 2803 nvlist_t *nvroot, *search, *ret; 2804 uint64_t guid; 2805 2806 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2807 2808 guid = strtoull(path, &end, 0); 2809 if (guid != 0 && *end == '\0') { 2810 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2811 } else if (zpool_vdev_is_interior(path)) { 2812 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2813 } else { 2814 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2815 } 2816 2817 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2818 &nvroot) == 0); 2819 2820 *avail_spare = B_FALSE; 2821 *l2cache = B_FALSE; 2822 if (log != NULL) 2823 *log = B_FALSE; 2824 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2825 nvlist_free(search); 2826 2827 return (ret); 2828 } 2829 2830 static int 2831 vdev_is_online(nvlist_t *nv) 2832 { 2833 uint64_t ival; 2834 2835 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2836 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2837 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2838 return (0); 2839 2840 return (1); 2841 } 2842 2843 /* 2844 * Helper function for zpool_get_physpaths(). 2845 */ 2846 static int 2847 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2848 size_t *bytes_written) 2849 { 2850 size_t bytes_left, pos, rsz; 2851 char *tmppath; 2852 const char *format; 2853 2854 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2855 &tmppath) != 0) 2856 return (EZFS_NODEVICE); 2857 2858 pos = *bytes_written; 2859 bytes_left = physpath_size - pos; 2860 format = (pos == 0) ? "%s" : " %s"; 2861 2862 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2863 *bytes_written += rsz; 2864 2865 if (rsz >= bytes_left) { 2866 /* if physpath was not copied properly, clear it */ 2867 if (bytes_left != 0) { 2868 physpath[pos] = 0; 2869 } 2870 return (EZFS_NOSPC); 2871 } 2872 return (0); 2873 } 2874 2875 static int 2876 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2877 size_t *rsz, boolean_t is_spare) 2878 { 2879 char *type; 2880 int ret; 2881 2882 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2883 return (EZFS_INVALCONFIG); 2884 2885 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2886 /* 2887 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2888 * For a spare vdev, we only want to boot from the active 2889 * spare device. 2890 */ 2891 if (is_spare) { 2892 uint64_t spare = 0; 2893 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2894 &spare); 2895 if (!spare) 2896 return (EZFS_INVALCONFIG); 2897 } 2898 2899 if (vdev_is_online(nv)) { 2900 if ((ret = vdev_get_one_physpath(nv, physpath, 2901 phypath_size, rsz)) != 0) 2902 return (ret); 2903 } 2904 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2905 strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2906 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2907 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2908 nvlist_t **child; 2909 uint_t count; 2910 int i, ret; 2911 2912 if (nvlist_lookup_nvlist_array(nv, 2913 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2914 return (EZFS_INVALCONFIG); 2915 2916 for (i = 0; i < count; i++) { 2917 ret = vdev_get_physpaths(child[i], physpath, 2918 phypath_size, rsz, is_spare); 2919 if (ret == EZFS_NOSPC) 2920 return (ret); 2921 } 2922 } 2923 2924 return (EZFS_POOL_INVALARG); 2925 } 2926 2927 /* 2928 * Get phys_path for a root pool config. 2929 * Return 0 on success; non-zero on failure. 2930 */ 2931 static int 2932 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2933 { 2934 size_t rsz; 2935 nvlist_t *vdev_root; 2936 nvlist_t **child; 2937 uint_t count; 2938 char *type; 2939 2940 rsz = 0; 2941 2942 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2943 &vdev_root) != 0) 2944 return (EZFS_INVALCONFIG); 2945 2946 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2947 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2948 &child, &count) != 0) 2949 return (EZFS_INVALCONFIG); 2950 2951 /* 2952 * root pool can only have a single top-level vdev. 2953 */ 2954 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2955 return (EZFS_POOL_INVALARG); 2956 2957 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2958 B_FALSE); 2959 2960 /* No online devices */ 2961 if (rsz == 0) 2962 return (EZFS_NODEVICE); 2963 2964 return (0); 2965 } 2966 2967 /* 2968 * Get phys_path for a root pool 2969 * Return 0 on success; non-zero on failure. 2970 */ 2971 int 2972 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2973 { 2974 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2975 phypath_size)); 2976 } 2977 2978 /* 2979 * Convert a vdev path to a GUID. Returns GUID or 0 on error. 2980 * 2981 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it 2982 * if the VDEV is a spare, l2cache, or log device. If they're NULL then 2983 * ignore them. 2984 */ 2985 static uint64_t 2986 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path, 2987 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log) 2988 { 2989 uint64_t guid; 2990 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE; 2991 nvlist_t *tgt; 2992 2993 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache, 2994 &log)) == NULL) 2995 return (0); 2996 2997 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0); 2998 if (is_spare != NULL) 2999 *is_spare = spare; 3000 if (is_l2cache != NULL) 3001 *is_l2cache = l2cache; 3002 if (is_log != NULL) 3003 *is_log = log; 3004 3005 return (guid); 3006 } 3007 3008 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */ 3009 uint64_t 3010 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path) 3011 { 3012 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL)); 3013 } 3014 3015 /* 3016 * Bring the specified vdev online. The 'flags' parameter is a set of the 3017 * ZFS_ONLINE_* flags. 3018 */ 3019 int 3020 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 3021 vdev_state_t *newstate) 3022 { 3023 zfs_cmd_t zc = {"\0"}; 3024 char msg[1024]; 3025 char *pathname; 3026 nvlist_t *tgt; 3027 boolean_t avail_spare, l2cache, islog; 3028 libzfs_handle_t *hdl = zhp->zpool_hdl; 3029 int error; 3030 3031 if (flags & ZFS_ONLINE_EXPAND) { 3032 (void) snprintf(msg, sizeof (msg), 3033 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 3034 } else { 3035 (void) snprintf(msg, sizeof (msg), 3036 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 3037 } 3038 3039 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3040 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3041 &islog)) == NULL) 3042 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3043 3044 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3045 3046 if (avail_spare) 3047 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3048 3049 if ((flags & ZFS_ONLINE_EXPAND || 3050 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) && 3051 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) { 3052 uint64_t wholedisk = 0; 3053 3054 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 3055 &wholedisk); 3056 3057 /* 3058 * XXX - L2ARC 1.0 devices can't support expansion. 3059 */ 3060 if (l2cache) { 3061 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3062 "cannot expand cache devices")); 3063 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 3064 } 3065 3066 if (wholedisk) { 3067 const char *fullpath = path; 3068 char buf[MAXPATHLEN]; 3069 3070 if (path[0] != '/') { 3071 error = zfs_resolve_shortname(path, buf, 3072 sizeof (buf)); 3073 if (error != 0) 3074 return (zfs_error(hdl, EZFS_NODEVICE, 3075 msg)); 3076 3077 fullpath = buf; 3078 } 3079 3080 error = zpool_relabel_disk(hdl, fullpath, msg); 3081 if (error != 0) 3082 return (error); 3083 } 3084 } 3085 3086 zc.zc_cookie = VDEV_STATE_ONLINE; 3087 zc.zc_obj = flags; 3088 3089 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 3090 if (errno == EINVAL) { 3091 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 3092 "from this pool into a new one. Use '%s' " 3093 "instead"), "zpool detach"); 3094 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 3095 } 3096 return (zpool_standard_error(hdl, errno, msg)); 3097 } 3098 3099 *newstate = zc.zc_cookie; 3100 return (0); 3101 } 3102 3103 /* 3104 * Take the specified vdev offline 3105 */ 3106 int 3107 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 3108 { 3109 zfs_cmd_t zc = {"\0"}; 3110 char msg[1024]; 3111 nvlist_t *tgt; 3112 boolean_t avail_spare, l2cache; 3113 libzfs_handle_t *hdl = zhp->zpool_hdl; 3114 3115 (void) snprintf(msg, sizeof (msg), 3116 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 3117 3118 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3119 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3120 NULL)) == NULL) 3121 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3122 3123 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3124 3125 if (avail_spare) 3126 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3127 3128 zc.zc_cookie = VDEV_STATE_OFFLINE; 3129 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 3130 3131 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3132 return (0); 3133 3134 switch (errno) { 3135 case EBUSY: 3136 3137 /* 3138 * There are no other replicas of this device. 3139 */ 3140 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 3141 3142 case EEXIST: 3143 /* 3144 * The log device has unplayed logs 3145 */ 3146 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 3147 3148 default: 3149 return (zpool_standard_error(hdl, errno, msg)); 3150 } 3151 } 3152 3153 /* 3154 * Mark the given vdev faulted. 3155 */ 3156 int 3157 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3158 { 3159 zfs_cmd_t zc = {"\0"}; 3160 char msg[1024]; 3161 libzfs_handle_t *hdl = zhp->zpool_hdl; 3162 3163 (void) snprintf(msg, sizeof (msg), 3164 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid); 3165 3166 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3167 zc.zc_guid = guid; 3168 zc.zc_cookie = VDEV_STATE_FAULTED; 3169 zc.zc_obj = aux; 3170 3171 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3172 return (0); 3173 3174 switch (errno) { 3175 case EBUSY: 3176 3177 /* 3178 * There are no other replicas of this device. 3179 */ 3180 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 3181 3182 default: 3183 return (zpool_standard_error(hdl, errno, msg)); 3184 } 3185 3186 } 3187 3188 /* 3189 * Mark the given vdev degraded. 3190 */ 3191 int 3192 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3193 { 3194 zfs_cmd_t zc = {"\0"}; 3195 char msg[1024]; 3196 libzfs_handle_t *hdl = zhp->zpool_hdl; 3197 3198 (void) snprintf(msg, sizeof (msg), 3199 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid); 3200 3201 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3202 zc.zc_guid = guid; 3203 zc.zc_cookie = VDEV_STATE_DEGRADED; 3204 zc.zc_obj = aux; 3205 3206 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3207 return (0); 3208 3209 return (zpool_standard_error(hdl, errno, msg)); 3210 } 3211 3212 /* 3213 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 3214 * a hot spare. 3215 */ 3216 static boolean_t 3217 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 3218 { 3219 nvlist_t **child; 3220 uint_t c, children; 3221 char *type; 3222 3223 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 3224 &children) == 0) { 3225 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 3226 &type) == 0); 3227 3228 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 || 3229 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) && 3230 children == 2 && child[which] == tgt) 3231 return (B_TRUE); 3232 3233 for (c = 0; c < children; c++) 3234 if (is_replacing_spare(child[c], tgt, which)) 3235 return (B_TRUE); 3236 } 3237 3238 return (B_FALSE); 3239 } 3240 3241 /* 3242 * Attach new_disk (fully described by nvroot) to old_disk. 3243 * If 'replacing' is specified, the new disk will replace the old one. 3244 */ 3245 int 3246 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk, 3247 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild) 3248 { 3249 zfs_cmd_t zc = {"\0"}; 3250 char msg[1024]; 3251 int ret; 3252 nvlist_t *tgt; 3253 boolean_t avail_spare, l2cache, islog; 3254 uint64_t val; 3255 char *newname; 3256 nvlist_t **child; 3257 uint_t children; 3258 nvlist_t *config_root; 3259 libzfs_handle_t *hdl = zhp->zpool_hdl; 3260 3261 if (replacing) 3262 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 3263 "cannot replace %s with %s"), old_disk, new_disk); 3264 else 3265 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 3266 "cannot attach %s to %s"), new_disk, old_disk); 3267 3268 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3269 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 3270 &islog)) == NULL) 3271 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3272 3273 if (avail_spare) 3274 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3275 3276 if (l2cache) 3277 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3278 3279 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3280 zc.zc_cookie = replacing; 3281 zc.zc_simple = rebuild; 3282 3283 if (rebuild && 3284 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) { 3285 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3286 "the loaded zfs module doesn't support device rebuilds")); 3287 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 3288 } 3289 3290 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3291 &child, &children) != 0 || children != 1) { 3292 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3293 "new device must be a single disk")); 3294 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 3295 } 3296 3297 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 3298 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 3299 3300 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL) 3301 return (-1); 3302 3303 /* 3304 * If the target is a hot spare that has been swapped in, we can only 3305 * replace it with another hot spare. 3306 */ 3307 if (replacing && 3308 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 3309 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 3310 NULL) == NULL || !avail_spare) && 3311 is_replacing_spare(config_root, tgt, 1)) { 3312 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3313 "can only be replaced by another hot spare")); 3314 free(newname); 3315 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 3316 } 3317 3318 free(newname); 3319 3320 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 3321 return (-1); 3322 3323 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 3324 3325 zcmd_free_nvlists(&zc); 3326 3327 if (ret == 0) 3328 return (0); 3329 3330 switch (errno) { 3331 case ENOTSUP: 3332 /* 3333 * Can't attach to or replace this type of vdev. 3334 */ 3335 if (replacing) { 3336 uint64_t version = zpool_get_prop_int(zhp, 3337 ZPOOL_PROP_VERSION, NULL); 3338 3339 if (islog) { 3340 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3341 "cannot replace a log with a spare")); 3342 } else if (rebuild) { 3343 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3344 "only mirror and dRAID vdevs support " 3345 "sequential reconstruction")); 3346 } else if (zpool_is_draid_spare(new_disk)) { 3347 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3348 "dRAID spares can only replace child " 3349 "devices in their parent's dRAID vdev")); 3350 } else if (version >= SPA_VERSION_MULTI_REPLACE) { 3351 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3352 "already in replacing/spare config; wait " 3353 "for completion or use 'zpool detach'")); 3354 } else { 3355 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3356 "cannot replace a replacing device")); 3357 } 3358 } else { 3359 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3360 "can only attach to mirrors and top-level " 3361 "disks")); 3362 } 3363 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3364 break; 3365 3366 case EINVAL: 3367 /* 3368 * The new device must be a single disk. 3369 */ 3370 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3371 "new device must be a single disk")); 3372 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3373 break; 3374 3375 case EBUSY: 3376 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, " 3377 "or device removal is in progress"), 3378 new_disk); 3379 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3380 break; 3381 3382 case EOVERFLOW: 3383 /* 3384 * The new device is too small. 3385 */ 3386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3387 "device is too small")); 3388 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3389 break; 3390 3391 case EDOM: 3392 /* 3393 * The new device has a different optimal sector size. 3394 */ 3395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3396 "new device has a different optimal sector size; use the " 3397 "option '-o ashift=N' to override the optimal size")); 3398 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3399 break; 3400 3401 case ENAMETOOLONG: 3402 /* 3403 * The resulting top-level vdev spec won't fit in the label. 3404 */ 3405 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 3406 break; 3407 3408 default: 3409 (void) zpool_standard_error(hdl, errno, msg); 3410 } 3411 3412 return (-1); 3413 } 3414 3415 /* 3416 * Detach the specified device. 3417 */ 3418 int 3419 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 3420 { 3421 zfs_cmd_t zc = {"\0"}; 3422 char msg[1024]; 3423 nvlist_t *tgt; 3424 boolean_t avail_spare, l2cache; 3425 libzfs_handle_t *hdl = zhp->zpool_hdl; 3426 3427 (void) snprintf(msg, sizeof (msg), 3428 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 3429 3430 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3431 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3432 NULL)) == NULL) 3433 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3434 3435 if (avail_spare) 3436 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3437 3438 if (l2cache) 3439 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3440 3441 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3442 3443 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 3444 return (0); 3445 3446 switch (errno) { 3447 3448 case ENOTSUP: 3449 /* 3450 * Can't detach from this type of vdev. 3451 */ 3452 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 3453 "applicable to mirror and replacing vdevs")); 3454 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3455 break; 3456 3457 case EBUSY: 3458 /* 3459 * There are no other replicas of this device. 3460 */ 3461 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 3462 break; 3463 3464 default: 3465 (void) zpool_standard_error(hdl, errno, msg); 3466 } 3467 3468 return (-1); 3469 } 3470 3471 /* 3472 * Find a mirror vdev in the source nvlist. 3473 * 3474 * The mchild array contains a list of disks in one of the top-level mirrors 3475 * of the source pool. The schild array contains a list of disks that the 3476 * user specified on the command line. We loop over the mchild array to 3477 * see if any entry in the schild array matches. 3478 * 3479 * If a disk in the mchild array is found in the schild array, we return 3480 * the index of that entry. Otherwise we return -1. 3481 */ 3482 static int 3483 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 3484 nvlist_t **schild, uint_t schildren) 3485 { 3486 uint_t mc; 3487 3488 for (mc = 0; mc < mchildren; mc++) { 3489 uint_t sc; 3490 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3491 mchild[mc], 0); 3492 3493 for (sc = 0; sc < schildren; sc++) { 3494 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3495 schild[sc], 0); 3496 boolean_t result = (strcmp(mpath, spath) == 0); 3497 3498 free(spath); 3499 if (result) { 3500 free(mpath); 3501 return (mc); 3502 } 3503 } 3504 3505 free(mpath); 3506 } 3507 3508 return (-1); 3509 } 3510 3511 /* 3512 * Split a mirror pool. If newroot points to null, then a new nvlist 3513 * is generated and it is the responsibility of the caller to free it. 3514 */ 3515 int 3516 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 3517 nvlist_t *props, splitflags_t flags) 3518 { 3519 zfs_cmd_t zc = {"\0"}; 3520 char msg[1024], *bias; 3521 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 3522 nvlist_t **varray = NULL, *zc_props = NULL; 3523 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 3524 libzfs_handle_t *hdl = zhp->zpool_hdl; 3525 uint64_t vers, readonly = B_FALSE; 3526 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 3527 int retval = 0; 3528 3529 (void) snprintf(msg, sizeof (msg), 3530 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 3531 3532 if (!zpool_name_valid(hdl, B_FALSE, newname)) 3533 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 3534 3535 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 3536 (void) fprintf(stderr, gettext("Internal error: unable to " 3537 "retrieve pool configuration\n")); 3538 return (-1); 3539 } 3540 3541 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 3542 == 0); 3543 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 3544 3545 if (props) { 3546 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 3547 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 3548 props, vers, flags, msg)) == NULL) 3549 return (-1); 3550 (void) nvlist_lookup_uint64(zc_props, 3551 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 3552 if (readonly) { 3553 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3554 "property %s can only be set at import time"), 3555 zpool_prop_to_name(ZPOOL_PROP_READONLY)); 3556 return (-1); 3557 } 3558 } 3559 3560 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 3561 &children) != 0) { 3562 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3563 "Source pool is missing vdev tree")); 3564 nvlist_free(zc_props); 3565 return (-1); 3566 } 3567 3568 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 3569 vcount = 0; 3570 3571 if (*newroot == NULL || 3572 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 3573 &newchild, &newchildren) != 0) 3574 newchildren = 0; 3575 3576 for (c = 0; c < children; c++) { 3577 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 3578 boolean_t is_special = B_FALSE, is_dedup = B_FALSE; 3579 char *type; 3580 nvlist_t **mchild, *vdev; 3581 uint_t mchildren; 3582 int entry; 3583 3584 /* 3585 * Unlike cache & spares, slogs are stored in the 3586 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 3587 */ 3588 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3589 &is_log); 3590 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3591 &is_hole); 3592 if (is_log || is_hole) { 3593 /* 3594 * Create a hole vdev and put it in the config. 3595 */ 3596 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 3597 goto out; 3598 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 3599 VDEV_TYPE_HOLE) != 0) 3600 goto out; 3601 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 3602 1) != 0) 3603 goto out; 3604 if (lastlog == 0) 3605 lastlog = vcount; 3606 varray[vcount++] = vdev; 3607 continue; 3608 } 3609 lastlog = 0; 3610 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 3611 == 0); 3612 3613 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) { 3614 vdev = child[c]; 3615 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3616 goto out; 3617 continue; 3618 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 3619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3620 "Source pool must be composed only of mirrors\n")); 3621 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3622 goto out; 3623 } 3624 3625 if (nvlist_lookup_string(child[c], 3626 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) { 3627 if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) 3628 is_special = B_TRUE; 3629 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) 3630 is_dedup = B_TRUE; 3631 } 3632 verify(nvlist_lookup_nvlist_array(child[c], 3633 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3634 3635 /* find or add an entry for this top-level vdev */ 3636 if (newchildren > 0 && 3637 (entry = find_vdev_entry(zhp, mchild, mchildren, 3638 newchild, newchildren)) >= 0) { 3639 /* We found a disk that the user specified. */ 3640 vdev = mchild[entry]; 3641 ++found; 3642 } else { 3643 /* User didn't specify a disk for this vdev. */ 3644 vdev = mchild[mchildren - 1]; 3645 } 3646 3647 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3648 goto out; 3649 3650 if (flags.dryrun != 0) { 3651 if (is_dedup == B_TRUE) { 3652 if (nvlist_add_string(varray[vcount - 1], 3653 ZPOOL_CONFIG_ALLOCATION_BIAS, 3654 VDEV_ALLOC_BIAS_DEDUP) != 0) 3655 goto out; 3656 } else if (is_special == B_TRUE) { 3657 if (nvlist_add_string(varray[vcount - 1], 3658 ZPOOL_CONFIG_ALLOCATION_BIAS, 3659 VDEV_ALLOC_BIAS_SPECIAL) != 0) 3660 goto out; 3661 } 3662 } 3663 } 3664 3665 /* did we find every disk the user specified? */ 3666 if (found != newchildren) { 3667 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3668 "include at most one disk from each mirror")); 3669 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3670 goto out; 3671 } 3672 3673 /* Prepare the nvlist for populating. */ 3674 if (*newroot == NULL) { 3675 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3676 goto out; 3677 freelist = B_TRUE; 3678 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3679 VDEV_TYPE_ROOT) != 0) 3680 goto out; 3681 } else { 3682 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3683 } 3684 3685 /* Add all the children we found */ 3686 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3687 lastlog == 0 ? vcount : lastlog) != 0) 3688 goto out; 3689 3690 /* 3691 * If we're just doing a dry run, exit now with success. 3692 */ 3693 if (flags.dryrun) { 3694 memory_err = B_FALSE; 3695 freelist = B_FALSE; 3696 goto out; 3697 } 3698 3699 /* now build up the config list & call the ioctl */ 3700 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3701 goto out; 3702 3703 if (nvlist_add_nvlist(newconfig, 3704 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3705 nvlist_add_string(newconfig, 3706 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3707 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3708 goto out; 3709 3710 /* 3711 * The new pool is automatically part of the namespace unless we 3712 * explicitly export it. 3713 */ 3714 if (!flags.import) 3715 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3716 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3717 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3718 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3719 goto out; 3720 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3721 goto out; 3722 3723 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3724 retval = zpool_standard_error(hdl, errno, msg); 3725 goto out; 3726 } 3727 3728 freelist = B_FALSE; 3729 memory_err = B_FALSE; 3730 3731 out: 3732 if (varray != NULL) { 3733 int v; 3734 3735 for (v = 0; v < vcount; v++) 3736 nvlist_free(varray[v]); 3737 free(varray); 3738 } 3739 zcmd_free_nvlists(&zc); 3740 nvlist_free(zc_props); 3741 nvlist_free(newconfig); 3742 if (freelist) { 3743 nvlist_free(*newroot); 3744 *newroot = NULL; 3745 } 3746 3747 if (retval != 0) 3748 return (retval); 3749 3750 if (memory_err) 3751 return (no_memory(hdl)); 3752 3753 return (0); 3754 } 3755 3756 /* 3757 * Remove the given device. 3758 */ 3759 int 3760 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3761 { 3762 zfs_cmd_t zc = {"\0"}; 3763 char msg[1024]; 3764 nvlist_t *tgt; 3765 boolean_t avail_spare, l2cache, islog; 3766 libzfs_handle_t *hdl = zhp->zpool_hdl; 3767 uint64_t version; 3768 3769 (void) snprintf(msg, sizeof (msg), 3770 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3771 3772 if (zpool_is_draid_spare(path)) { 3773 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3774 "dRAID spares cannot be removed")); 3775 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3776 } 3777 3778 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3779 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3780 &islog)) == NULL) 3781 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3782 3783 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3784 if (islog && version < SPA_VERSION_HOLES) { 3785 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3786 "pool must be upgraded to support log removal")); 3787 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3788 } 3789 3790 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3791 3792 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3793 return (0); 3794 3795 switch (errno) { 3796 3797 case EINVAL: 3798 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3799 "invalid config; all top-level vdevs must " 3800 "have the same sector size and not be raidz.")); 3801 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3802 break; 3803 3804 case EBUSY: 3805 if (islog) { 3806 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3807 "Mount encrypted datasets to replay logs.")); 3808 } else { 3809 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3810 "Pool busy; removal may already be in progress")); 3811 } 3812 (void) zfs_error(hdl, EZFS_BUSY, msg); 3813 break; 3814 3815 case EACCES: 3816 if (islog) { 3817 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3818 "Mount encrypted datasets to replay logs.")); 3819 (void) zfs_error(hdl, EZFS_BUSY, msg); 3820 } else { 3821 (void) zpool_standard_error(hdl, errno, msg); 3822 } 3823 break; 3824 3825 default: 3826 (void) zpool_standard_error(hdl, errno, msg); 3827 } 3828 return (-1); 3829 } 3830 3831 int 3832 zpool_vdev_remove_cancel(zpool_handle_t *zhp) 3833 { 3834 zfs_cmd_t zc; 3835 char msg[1024]; 3836 libzfs_handle_t *hdl = zhp->zpool_hdl; 3837 3838 (void) snprintf(msg, sizeof (msg), 3839 dgettext(TEXT_DOMAIN, "cannot cancel removal")); 3840 3841 bzero(&zc, sizeof (zc)); 3842 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3843 zc.zc_cookie = 1; 3844 3845 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3846 return (0); 3847 3848 return (zpool_standard_error(hdl, errno, msg)); 3849 } 3850 3851 int 3852 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path, 3853 uint64_t *sizep) 3854 { 3855 char msg[1024]; 3856 nvlist_t *tgt; 3857 boolean_t avail_spare, l2cache, islog; 3858 libzfs_handle_t *hdl = zhp->zpool_hdl; 3859 3860 (void) snprintf(msg, sizeof (msg), 3861 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"), 3862 path); 3863 3864 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3865 &islog)) == NULL) 3866 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3867 3868 if (avail_spare || l2cache || islog) { 3869 *sizep = 0; 3870 return (0); 3871 } 3872 3873 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) { 3874 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3875 "indirect size not available")); 3876 return (zfs_error(hdl, EINVAL, msg)); 3877 } 3878 return (0); 3879 } 3880 3881 /* 3882 * Clear the errors for the pool, or the particular device if specified. 3883 */ 3884 int 3885 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3886 { 3887 zfs_cmd_t zc = {"\0"}; 3888 char msg[1024]; 3889 nvlist_t *tgt; 3890 zpool_load_policy_t policy; 3891 boolean_t avail_spare, l2cache; 3892 libzfs_handle_t *hdl = zhp->zpool_hdl; 3893 nvlist_t *nvi = NULL; 3894 int error; 3895 3896 if (path) 3897 (void) snprintf(msg, sizeof (msg), 3898 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3899 path); 3900 else 3901 (void) snprintf(msg, sizeof (msg), 3902 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3903 zhp->zpool_name); 3904 3905 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3906 if (path) { 3907 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3908 &l2cache, NULL)) == NULL) 3909 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3910 3911 /* 3912 * Don't allow error clearing for hot spares. Do allow 3913 * error clearing for l2cache devices. 3914 */ 3915 if (avail_spare) 3916 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3917 3918 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3919 &zc.zc_guid) == 0); 3920 } 3921 3922 zpool_get_load_policy(rewindnvl, &policy); 3923 zc.zc_cookie = policy.zlp_rewind; 3924 3925 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3926 return (-1); 3927 3928 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3929 return (-1); 3930 3931 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3932 errno == ENOMEM) { 3933 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3934 zcmd_free_nvlists(&zc); 3935 return (-1); 3936 } 3937 } 3938 3939 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) && 3940 errno != EPERM && errno != EACCES)) { 3941 if (policy.zlp_rewind & 3942 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3943 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3944 zpool_rewind_exclaim(hdl, zc.zc_name, 3945 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), 3946 nvi); 3947 nvlist_free(nvi); 3948 } 3949 zcmd_free_nvlists(&zc); 3950 return (0); 3951 } 3952 3953 zcmd_free_nvlists(&zc); 3954 return (zpool_standard_error(hdl, errno, msg)); 3955 } 3956 3957 /* 3958 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3959 */ 3960 int 3961 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3962 { 3963 zfs_cmd_t zc = {"\0"}; 3964 char msg[1024]; 3965 libzfs_handle_t *hdl = zhp->zpool_hdl; 3966 3967 (void) snprintf(msg, sizeof (msg), 3968 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3969 (u_longlong_t)guid); 3970 3971 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3972 zc.zc_guid = guid; 3973 zc.zc_cookie = ZPOOL_NO_REWIND; 3974 3975 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) 3976 return (0); 3977 3978 return (zpool_standard_error(hdl, errno, msg)); 3979 } 3980 3981 /* 3982 * Change the GUID for a pool. 3983 */ 3984 int 3985 zpool_reguid(zpool_handle_t *zhp) 3986 { 3987 char msg[1024]; 3988 libzfs_handle_t *hdl = zhp->zpool_hdl; 3989 zfs_cmd_t zc = {"\0"}; 3990 3991 (void) snprintf(msg, sizeof (msg), 3992 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3993 3994 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3995 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3996 return (0); 3997 3998 return (zpool_standard_error(hdl, errno, msg)); 3999 } 4000 4001 /* 4002 * Reopen the pool. 4003 */ 4004 int 4005 zpool_reopen_one(zpool_handle_t *zhp, void *data) 4006 { 4007 libzfs_handle_t *hdl = zpool_get_handle(zhp); 4008 const char *pool_name = zpool_get_name(zhp); 4009 boolean_t *scrub_restart = data; 4010 int error; 4011 4012 error = lzc_reopen(pool_name, *scrub_restart); 4013 if (error) { 4014 return (zpool_standard_error_fmt(hdl, error, 4015 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name)); 4016 } 4017 4018 return (0); 4019 } 4020 4021 /* call into libzfs_core to execute the sync IOCTL per pool */ 4022 int 4023 zpool_sync_one(zpool_handle_t *zhp, void *data) 4024 { 4025 int ret; 4026 libzfs_handle_t *hdl = zpool_get_handle(zhp); 4027 const char *pool_name = zpool_get_name(zhp); 4028 boolean_t *force = data; 4029 nvlist_t *innvl = fnvlist_alloc(); 4030 4031 fnvlist_add_boolean_value(innvl, "force", *force); 4032 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) { 4033 nvlist_free(innvl); 4034 return (zpool_standard_error_fmt(hdl, ret, 4035 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name)); 4036 } 4037 nvlist_free(innvl); 4038 4039 return (0); 4040 } 4041 4042 #define PATH_BUF_LEN 64 4043 4044 /* 4045 * Given a vdev, return the name to display in iostat. If the vdev has a path, 4046 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 4047 * We also check if this is a whole disk, in which case we strip off the 4048 * trailing 's0' slice name. 4049 * 4050 * This routine is also responsible for identifying when disks have been 4051 * reconfigured in a new location. The kernel will have opened the device by 4052 * devid, but the path will still refer to the old location. To catch this, we 4053 * first do a path -> devid translation (which is fast for the common case). If 4054 * the devid matches, we're done. If not, we do a reverse devid -> path 4055 * translation and issue the appropriate ioctl() to update the path of the vdev. 4056 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 4057 * of these checks. 4058 */ 4059 char * 4060 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 4061 int name_flags) 4062 { 4063 char *path, *type, *env; 4064 uint64_t value; 4065 char buf[PATH_BUF_LEN]; 4066 char tmpbuf[PATH_BUF_LEN]; 4067 4068 /* 4069 * vdev_name will be "root"/"root-0" for the root vdev, but it is the 4070 * zpool name that will be displayed to the user. 4071 */ 4072 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 4073 if (zhp != NULL && strcmp(type, "root") == 0) 4074 return (zfs_strdup(hdl, zpool_get_name(zhp))); 4075 4076 env = getenv("ZPOOL_VDEV_NAME_PATH"); 4077 if (env && (strtoul(env, NULL, 0) > 0 || 4078 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 4079 name_flags |= VDEV_NAME_PATH; 4080 4081 env = getenv("ZPOOL_VDEV_NAME_GUID"); 4082 if (env && (strtoul(env, NULL, 0) > 0 || 4083 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 4084 name_flags |= VDEV_NAME_GUID; 4085 4086 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS"); 4087 if (env && (strtoul(env, NULL, 0) > 0 || 4088 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 4089 name_flags |= VDEV_NAME_FOLLOW_LINKS; 4090 4091 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 4092 name_flags & VDEV_NAME_GUID) { 4093 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value); 4094 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value); 4095 path = buf; 4096 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 4097 if (name_flags & VDEV_NAME_FOLLOW_LINKS) { 4098 char *rp = realpath(path, NULL); 4099 if (rp) { 4100 strlcpy(buf, rp, sizeof (buf)); 4101 path = buf; 4102 free(rp); 4103 } 4104 } 4105 4106 /* 4107 * For a block device only use the name. 4108 */ 4109 if ((strcmp(type, VDEV_TYPE_DISK) == 0) && 4110 !(name_flags & VDEV_NAME_PATH)) { 4111 path = zfs_strip_path(path); 4112 } 4113 4114 /* 4115 * Remove the partition from the path if this is a whole disk. 4116 */ 4117 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 && 4118 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value) 4119 == 0 && value && !(name_flags & VDEV_NAME_PATH)) { 4120 return (zfs_strip_partition(path)); 4121 } 4122 } else { 4123 path = type; 4124 4125 /* 4126 * If it's a raidz device, we need to stick in the parity level. 4127 */ 4128 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 4129 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4130 &value) == 0); 4131 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 4132 (u_longlong_t)value); 4133 path = buf; 4134 } 4135 4136 /* 4137 * If it's a dRAID device, we add parity, groups, and spares. 4138 */ 4139 if (strcmp(path, VDEV_TYPE_DRAID) == 0) { 4140 uint64_t ndata, nparity, nspares; 4141 nvlist_t **child; 4142 uint_t children; 4143 4144 verify(nvlist_lookup_nvlist_array(nv, 4145 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0); 4146 verify(nvlist_lookup_uint64(nv, 4147 ZPOOL_CONFIG_NPARITY, &nparity) == 0); 4148 verify(nvlist_lookup_uint64(nv, 4149 ZPOOL_CONFIG_DRAID_NDATA, &ndata) == 0); 4150 verify(nvlist_lookup_uint64(nv, 4151 ZPOOL_CONFIG_DRAID_NSPARES, &nspares) == 0); 4152 4153 path = zpool_draid_name(buf, sizeof (buf), ndata, 4154 nparity, nspares, children); 4155 } 4156 4157 /* 4158 * We identify each top-level vdev by using a <type-id> 4159 * naming convention. 4160 */ 4161 if (name_flags & VDEV_NAME_TYPE_ID) { 4162 uint64_t id; 4163 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 4164 &id) == 0); 4165 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu", 4166 path, (u_longlong_t)id); 4167 path = tmpbuf; 4168 } 4169 } 4170 4171 return (zfs_strdup(hdl, path)); 4172 } 4173 4174 static int 4175 zbookmark_mem_compare(const void *a, const void *b) 4176 { 4177 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 4178 } 4179 4180 /* 4181 * Retrieve the persistent error log, uniquify the members, and return to the 4182 * caller. 4183 */ 4184 int 4185 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 4186 { 4187 zfs_cmd_t zc = {"\0"}; 4188 libzfs_handle_t *hdl = zhp->zpool_hdl; 4189 uint64_t count; 4190 zbookmark_phys_t *zb = NULL; 4191 int i; 4192 4193 /* 4194 * Retrieve the raw error list from the kernel. If the number of errors 4195 * has increased, allocate more space and continue until we get the 4196 * entire list. 4197 */ 4198 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 4199 &count) == 0); 4200 if (count == 0) 4201 return (0); 4202 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 4203 count * sizeof (zbookmark_phys_t)); 4204 zc.zc_nvlist_dst_size = count; 4205 (void) strcpy(zc.zc_name, zhp->zpool_name); 4206 for (;;) { 4207 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG, 4208 &zc) != 0) { 4209 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4210 if (errno == ENOMEM) { 4211 void *dst; 4212 4213 count = zc.zc_nvlist_dst_size; 4214 dst = zfs_alloc(zhp->zpool_hdl, count * 4215 sizeof (zbookmark_phys_t)); 4216 zc.zc_nvlist_dst = (uintptr_t)dst; 4217 } else { 4218 return (zpool_standard_error_fmt(hdl, errno, 4219 dgettext(TEXT_DOMAIN, "errors: List of " 4220 "errors unavailable"))); 4221 } 4222 } else { 4223 break; 4224 } 4225 } 4226 4227 /* 4228 * Sort the resulting bookmarks. This is a little confusing due to the 4229 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 4230 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks 4231 * _not_ copied as part of the process. So we point the start of our 4232 * array appropriate and decrement the total number of elements. 4233 */ 4234 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 4235 zc.zc_nvlist_dst_size; 4236 count -= zc.zc_nvlist_dst_size; 4237 4238 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 4239 4240 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 4241 4242 /* 4243 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 4244 */ 4245 for (i = 0; i < count; i++) { 4246 nvlist_t *nv; 4247 4248 /* ignoring zb_blkid and zb_level for now */ 4249 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 4250 zb[i-1].zb_object == zb[i].zb_object) 4251 continue; 4252 4253 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 4254 goto nomem; 4255 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 4256 zb[i].zb_objset) != 0) { 4257 nvlist_free(nv); 4258 goto nomem; 4259 } 4260 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 4261 zb[i].zb_object) != 0) { 4262 nvlist_free(nv); 4263 goto nomem; 4264 } 4265 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 4266 nvlist_free(nv); 4267 goto nomem; 4268 } 4269 nvlist_free(nv); 4270 } 4271 4272 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4273 return (0); 4274 4275 nomem: 4276 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4277 return (no_memory(zhp->zpool_hdl)); 4278 } 4279 4280 /* 4281 * Upgrade a ZFS pool to the latest on-disk version. 4282 */ 4283 int 4284 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 4285 { 4286 zfs_cmd_t zc = {"\0"}; 4287 libzfs_handle_t *hdl = zhp->zpool_hdl; 4288 4289 (void) strcpy(zc.zc_name, zhp->zpool_name); 4290 zc.zc_cookie = new_version; 4291 4292 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 4293 return (zpool_standard_error_fmt(hdl, errno, 4294 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 4295 zhp->zpool_name)); 4296 return (0); 4297 } 4298 4299 void 4300 zfs_save_arguments(int argc, char **argv, char *string, int len) 4301 { 4302 int i; 4303 4304 (void) strlcpy(string, basename(argv[0]), len); 4305 for (i = 1; i < argc; i++) { 4306 (void) strlcat(string, " ", len); 4307 (void) strlcat(string, argv[i], len); 4308 } 4309 } 4310 4311 int 4312 zpool_log_history(libzfs_handle_t *hdl, const char *message) 4313 { 4314 zfs_cmd_t zc = {"\0"}; 4315 nvlist_t *args; 4316 int err; 4317 4318 args = fnvlist_alloc(); 4319 fnvlist_add_string(args, "message", message); 4320 err = zcmd_write_src_nvlist(hdl, &zc, args); 4321 if (err == 0) 4322 err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc); 4323 nvlist_free(args); 4324 zcmd_free_nvlists(&zc); 4325 return (err); 4326 } 4327 4328 /* 4329 * Perform ioctl to get some command history of a pool. 4330 * 4331 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 4332 * logical offset of the history buffer to start reading from. 4333 * 4334 * Upon return, 'off' is the next logical offset to read from and 4335 * 'len' is the actual amount of bytes read into 'buf'. 4336 */ 4337 static int 4338 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 4339 { 4340 zfs_cmd_t zc = {"\0"}; 4341 libzfs_handle_t *hdl = zhp->zpool_hdl; 4342 4343 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4344 4345 zc.zc_history = (uint64_t)(uintptr_t)buf; 4346 zc.zc_history_len = *len; 4347 zc.zc_history_offset = *off; 4348 4349 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 4350 switch (errno) { 4351 case EPERM: 4352 return (zfs_error_fmt(hdl, EZFS_PERM, 4353 dgettext(TEXT_DOMAIN, 4354 "cannot show history for pool '%s'"), 4355 zhp->zpool_name)); 4356 case ENOENT: 4357 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 4358 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4359 "'%s'"), zhp->zpool_name)); 4360 case ENOTSUP: 4361 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 4362 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4363 "'%s', pool must be upgraded"), zhp->zpool_name)); 4364 default: 4365 return (zpool_standard_error_fmt(hdl, errno, 4366 dgettext(TEXT_DOMAIN, 4367 "cannot get history for '%s'"), zhp->zpool_name)); 4368 } 4369 } 4370 4371 *len = zc.zc_history_len; 4372 *off = zc.zc_history_offset; 4373 4374 return (0); 4375 } 4376 4377 /* 4378 * Retrieve the command history of a pool. 4379 */ 4380 int 4381 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off, 4382 boolean_t *eof) 4383 { 4384 char *buf; 4385 int buflen = 128 * 1024; 4386 nvlist_t **records = NULL; 4387 uint_t numrecords = 0; 4388 int err, i; 4389 uint64_t start = *off; 4390 4391 buf = malloc(buflen); 4392 if (buf == NULL) 4393 return (ENOMEM); 4394 /* process about 1MB a time */ 4395 while (*off - start < 1024 * 1024) { 4396 uint64_t bytes_read = buflen; 4397 uint64_t leftover; 4398 4399 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0) 4400 break; 4401 4402 /* if nothing else was read in, we're at EOF, just return */ 4403 if (!bytes_read) { 4404 *eof = B_TRUE; 4405 break; 4406 } 4407 4408 if ((err = zpool_history_unpack(buf, bytes_read, 4409 &leftover, &records, &numrecords)) != 0) 4410 break; 4411 *off -= leftover; 4412 if (leftover == bytes_read) { 4413 /* 4414 * no progress made, because buffer is not big enough 4415 * to hold this record; resize and retry. 4416 */ 4417 buflen *= 2; 4418 free(buf); 4419 buf = malloc(buflen); 4420 if (buf == NULL) 4421 return (ENOMEM); 4422 } 4423 } 4424 4425 free(buf); 4426 4427 if (!err) { 4428 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 4429 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 4430 records, numrecords) == 0); 4431 } 4432 for (i = 0; i < numrecords; i++) 4433 nvlist_free(records[i]); 4434 free(records); 4435 4436 return (err); 4437 } 4438 4439 /* 4440 * Retrieve the next event given the passed 'zevent_fd' file descriptor. 4441 * If there is a new event available 'nvp' will contain a newly allocated 4442 * nvlist and 'dropped' will be set to the number of missed events since 4443 * the last call to this function. When 'nvp' is set to NULL it indicates 4444 * no new events are available. In either case the function returns 0 and 4445 * it is up to the caller to free 'nvp'. In the case of a fatal error the 4446 * function will return a non-zero value. When the function is called in 4447 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed), 4448 * it will not return until a new event is available. 4449 */ 4450 int 4451 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp, 4452 int *dropped, unsigned flags, int zevent_fd) 4453 { 4454 zfs_cmd_t zc = {"\0"}; 4455 int error = 0; 4456 4457 *nvp = NULL; 4458 *dropped = 0; 4459 zc.zc_cleanup_fd = zevent_fd; 4460 4461 if (flags & ZEVENT_NONBLOCK) 4462 zc.zc_guid = ZEVENT_NONBLOCK; 4463 4464 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0) 4465 return (-1); 4466 4467 retry: 4468 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) { 4469 switch (errno) { 4470 case ESHUTDOWN: 4471 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 4472 dgettext(TEXT_DOMAIN, "zfs shutdown")); 4473 goto out; 4474 case ENOENT: 4475 /* Blocking error case should not occur */ 4476 if (!(flags & ZEVENT_NONBLOCK)) 4477 error = zpool_standard_error_fmt(hdl, errno, 4478 dgettext(TEXT_DOMAIN, "cannot get event")); 4479 4480 goto out; 4481 case ENOMEM: 4482 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 4483 error = zfs_error_fmt(hdl, EZFS_NOMEM, 4484 dgettext(TEXT_DOMAIN, "cannot get event")); 4485 goto out; 4486 } else { 4487 goto retry; 4488 } 4489 default: 4490 error = zpool_standard_error_fmt(hdl, errno, 4491 dgettext(TEXT_DOMAIN, "cannot get event")); 4492 goto out; 4493 } 4494 } 4495 4496 error = zcmd_read_dst_nvlist(hdl, &zc, nvp); 4497 if (error != 0) 4498 goto out; 4499 4500 *dropped = (int)zc.zc_cookie; 4501 out: 4502 zcmd_free_nvlists(&zc); 4503 4504 return (error); 4505 } 4506 4507 /* 4508 * Clear all events. 4509 */ 4510 int 4511 zpool_events_clear(libzfs_handle_t *hdl, int *count) 4512 { 4513 zfs_cmd_t zc = {"\0"}; 4514 char msg[1024]; 4515 4516 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 4517 "cannot clear events")); 4518 4519 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0) 4520 return (zpool_standard_error_fmt(hdl, errno, msg)); 4521 4522 if (count != NULL) 4523 *count = (int)zc.zc_cookie; /* # of events cleared */ 4524 4525 return (0); 4526 } 4527 4528 /* 4529 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for 4530 * the passed zevent_fd file handle. On success zero is returned, 4531 * otherwise -1 is returned and hdl->libzfs_error is set to the errno. 4532 */ 4533 int 4534 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd) 4535 { 4536 zfs_cmd_t zc = {"\0"}; 4537 int error = 0; 4538 4539 zc.zc_guid = eid; 4540 zc.zc_cleanup_fd = zevent_fd; 4541 4542 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) { 4543 switch (errno) { 4544 case ENOENT: 4545 error = zfs_error_fmt(hdl, EZFS_NOENT, 4546 dgettext(TEXT_DOMAIN, "cannot get event")); 4547 break; 4548 4549 case ENOMEM: 4550 error = zfs_error_fmt(hdl, EZFS_NOMEM, 4551 dgettext(TEXT_DOMAIN, "cannot get event")); 4552 break; 4553 4554 default: 4555 error = zpool_standard_error_fmt(hdl, errno, 4556 dgettext(TEXT_DOMAIN, "cannot get event")); 4557 break; 4558 } 4559 } 4560 4561 return (error); 4562 } 4563 4564 static void 4565 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4566 char *pathname, size_t len, boolean_t always_unmounted) 4567 { 4568 zfs_cmd_t zc = {"\0"}; 4569 boolean_t mounted = B_FALSE; 4570 char *mntpnt = NULL; 4571 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 4572 4573 if (dsobj == 0) { 4574 /* special case for the MOS */ 4575 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", 4576 (longlong_t)obj); 4577 return; 4578 } 4579 4580 /* get the dataset's name */ 4581 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4582 zc.zc_obj = dsobj; 4583 if (zfs_ioctl(zhp->zpool_hdl, 4584 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 4585 /* just write out a path of two object numbers */ 4586 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 4587 (longlong_t)dsobj, (longlong_t)obj); 4588 return; 4589 } 4590 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 4591 4592 /* find out if the dataset is mounted */ 4593 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname, 4594 &mntpnt); 4595 4596 /* get the corrupted object's path */ 4597 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 4598 zc.zc_obj = obj; 4599 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH, 4600 &zc) == 0) { 4601 if (mounted) { 4602 (void) snprintf(pathname, len, "%s%s", mntpnt, 4603 zc.zc_value); 4604 } else { 4605 (void) snprintf(pathname, len, "%s:%s", 4606 dsname, zc.zc_value); 4607 } 4608 } else { 4609 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, 4610 (longlong_t)obj); 4611 } 4612 free(mntpnt); 4613 } 4614 4615 void 4616 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4617 char *pathname, size_t len) 4618 { 4619 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE); 4620 } 4621 4622 void 4623 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4624 char *pathname, size_t len) 4625 { 4626 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE); 4627 } 4628 /* 4629 * Wait while the specified activity is in progress in the pool. 4630 */ 4631 int 4632 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity) 4633 { 4634 boolean_t missing; 4635 4636 int error = zpool_wait_status(zhp, activity, &missing, NULL); 4637 4638 if (missing) { 4639 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT, 4640 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"), 4641 zhp->zpool_name); 4642 return (ENOENT); 4643 } else { 4644 return (error); 4645 } 4646 } 4647 4648 /* 4649 * Wait for the given activity and return the status of the wait (whether or not 4650 * any waiting was done) in the 'waited' parameter. Non-existent pools are 4651 * reported via the 'missing' parameter, rather than by printing an error 4652 * message. This is convenient when this function is called in a loop over a 4653 * long period of time (as it is, for example, by zpool's wait cmd). In that 4654 * scenario, a pool being exported or destroyed should be considered a normal 4655 * event, so we don't want to print an error when we find that the pool doesn't 4656 * exist. 4657 */ 4658 int 4659 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity, 4660 boolean_t *missing, boolean_t *waited) 4661 { 4662 int error = lzc_wait(zhp->zpool_name, activity, waited); 4663 *missing = (error == ENOENT); 4664 if (*missing) 4665 return (0); 4666 4667 if (error != 0) { 4668 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4669 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"), 4670 zhp->zpool_name); 4671 } 4672 4673 return (error); 4674 } 4675 4676 int 4677 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap) 4678 { 4679 int error = lzc_set_bootenv(zhp->zpool_name, envmap); 4680 if (error != 0) { 4681 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4682 dgettext(TEXT_DOMAIN, 4683 "error setting bootenv in pool '%s'"), zhp->zpool_name); 4684 } 4685 4686 return (error); 4687 } 4688 4689 int 4690 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp) 4691 { 4692 nvlist_t *nvl; 4693 int error; 4694 4695 nvl = NULL; 4696 error = lzc_get_bootenv(zhp->zpool_name, &nvl); 4697 if (error != 0) { 4698 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4699 dgettext(TEXT_DOMAIN, 4700 "error getting bootenv in pool '%s'"), zhp->zpool_name); 4701 } else { 4702 *nvlp = nvl; 4703 } 4704 4705 return (error); 4706 } 4707 4708 /* 4709 * Attempt to read and parse feature file(s) (from "compatibility" property). 4710 * Files contain zpool feature names, comma or whitespace-separated. 4711 * Comments (# character to next newline) are discarded. 4712 * 4713 * Arguments: 4714 * compatibility : string containing feature filenames 4715 * features : either NULL or pointer to array of boolean 4716 * badtoken : either NULL or pointer to char[ZFS_MAXPROPLEN] 4717 * badfile : either NULL or pointer to char[MAXPATHLEN] 4718 * 4719 * compatibility is NULL (unset), "", "off", "legacy", or list of 4720 * comma-separated filenames. filenames should either be absolute, 4721 * or relative to: 4722 * 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or 4723 * 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d). 4724 * (Unset), "" or "off" => enable all features 4725 * "legacy" => disable all features 4726 * Any feature names read from files which match unames in spa_feature_table 4727 * will have the corresponding boolean set in the features array (if non-NULL). 4728 * If more than one feature set specified, only features present in *all* of 4729 * them will be set. 4730 * 4731 * An unreadable filename will be strlcpy'd to badfile (if non-NULL). 4732 * An unrecognized feature will be strlcpy'd to badtoken (if non-NULL). 4733 * 4734 * Return values: 4735 * ZPOOL_COMPATIBILITY_OK : files read and parsed ok 4736 * ZPOOL_COMPATIBILITY_READERR : file could not be opened / mmap'd 4737 * ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file 4738 * ZPOOL_COMPATIBILITY_BADWORD : file contains invalid feature name 4739 * ZPOOL_COMPATIBILITY_NOFILES : no file names found 4740 */ 4741 zpool_compat_status_t 4742 zpool_load_compat(const char *compatibility, 4743 boolean_t *features, char *badtoken, char *badfile) 4744 { 4745 int sdirfd, ddirfd, featfd; 4746 int i; 4747 struct stat fs; 4748 char *fc; /* mmap of file */ 4749 char *ps, *ls, *ws; /* strtok state */ 4750 char *file, *line, *word; 4751 char filenames[ZFS_MAXPROPLEN]; 4752 int filecount = 0; 4753 4754 /* special cases (unset), "" and "off" => enable all features */ 4755 if (compatibility == NULL || compatibility[0] == '\0' || 4756 strcmp(compatibility, ZPOOL_COMPAT_OFF) == 0) { 4757 if (features != NULL) 4758 for (i = 0; i < SPA_FEATURES; i++) 4759 features[i] = B_TRUE; 4760 return (ZPOOL_COMPATIBILITY_OK); 4761 } 4762 4763 /* Final special case "legacy" => disable all features */ 4764 if (strcmp(compatibility, ZPOOL_COMPAT_LEGACY) == 0) { 4765 if (features != NULL) 4766 for (i = 0; i < SPA_FEATURES; i++) 4767 features[i] = B_FALSE; 4768 return (ZPOOL_COMPATIBILITY_OK); 4769 } 4770 4771 /* 4772 * Start with all true; will be ANDed with results from each file 4773 */ 4774 if (features != NULL) 4775 for (i = 0; i < SPA_FEATURES; i++) 4776 features[i] = B_TRUE; 4777 4778 /* 4779 * We ignore errors from the directory open() 4780 * as they're only needed if the filename is relative 4781 * which will be checked during the openat(). 4782 */ 4783 #ifdef O_PATH 4784 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, O_DIRECTORY | O_PATH); 4785 ddirfd = open(ZPOOL_DATA_COMPAT_D, O_DIRECTORY | O_PATH); 4786 #else 4787 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, O_DIRECTORY | O_RDONLY); 4788 ddirfd = open(ZPOOL_DATA_COMPAT_D, O_DIRECTORY | O_RDONLY); 4789 #endif 4790 4791 (void) strlcpy(filenames, compatibility, ZFS_MAXPROPLEN); 4792 file = strtok_r(filenames, ",", &ps); 4793 while (file != NULL) { 4794 boolean_t features_local[SPA_FEATURES]; 4795 4796 /* try sysconfdir first, then datadir */ 4797 if ((featfd = openat(sdirfd, file, 0, O_RDONLY)) < 0) 4798 featfd = openat(ddirfd, file, 0, O_RDONLY); 4799 4800 if (featfd < 0 || fstat(featfd, &fs) < 0) { 4801 (void) close(featfd); 4802 (void) close(sdirfd); 4803 (void) close(ddirfd); 4804 if (badfile != NULL) 4805 (void) strlcpy(badfile, file, MAXPATHLEN); 4806 return (ZPOOL_COMPATIBILITY_READERR); 4807 } 4808 4809 /* Too big or too small */ 4810 if (fs.st_size < 1 || fs.st_size > ZPOOL_COMPAT_MAXSIZE) { 4811 (void) close(featfd); 4812 (void) close(sdirfd); 4813 (void) close(ddirfd); 4814 if (badfile != NULL) 4815 (void) strlcpy(badfile, file, MAXPATHLEN); 4816 return (ZPOOL_COMPATIBILITY_BADFILE); 4817 } 4818 4819 /* private mmap() so we can strtok safely */ 4820 fc = (char *)mmap(NULL, fs.st_size, 4821 PROT_READ|PROT_WRITE, MAP_PRIVATE, featfd, 0); 4822 (void) close(featfd); 4823 4824 if (fc < 0) { 4825 (void) close(sdirfd); 4826 (void) close(ddirfd); 4827 if (badfile != NULL) 4828 (void) strlcpy(badfile, file, MAXPATHLEN); 4829 return (ZPOOL_COMPATIBILITY_READERR); 4830 } 4831 4832 /* Text file sanity check - last char should be newline */ 4833 if (fc[fs.st_size - 1] != '\n') { 4834 (void) munmap((void *) fc, fs.st_size); 4835 (void) close(sdirfd); 4836 (void) close(ddirfd); 4837 if (badfile != NULL) 4838 (void) strlcpy(badfile, file, MAXPATHLEN); 4839 return (ZPOOL_COMPATIBILITY_BADFILE); 4840 } 4841 4842 /* replace with NUL to ensure we have a delimiter */ 4843 fc[fs.st_size - 1] = '\0'; 4844 4845 for (i = 0; i < SPA_FEATURES; i++) 4846 features_local[i] = B_FALSE; 4847 4848 line = strtok_r(fc, "\n", &ls); 4849 while (line != NULL) { 4850 /* discard comments */ 4851 *(strchrnul(line, '#')) = '\0'; 4852 4853 word = strtok_r(line, ", \t", &ws); 4854 while (word != NULL) { 4855 /* Find matching feature name */ 4856 for (i = 0; i < SPA_FEATURES; i++) { 4857 zfeature_info_t *fi = 4858 &spa_feature_table[i]; 4859 if (strcmp(word, fi->fi_uname) == 0) { 4860 features_local[i] = B_TRUE; 4861 break; 4862 } 4863 } 4864 if (i == SPA_FEATURES) { 4865 if (badtoken != NULL) 4866 (void) strlcpy(badtoken, word, 4867 ZFS_MAXPROPLEN); 4868 if (badfile != NULL) 4869 (void) strlcpy(badfile, file, 4870 MAXPATHLEN); 4871 (void) munmap((void *) fc, fs.st_size); 4872 (void) close(sdirfd); 4873 (void) close(ddirfd); 4874 return (ZPOOL_COMPATIBILITY_BADWORD); 4875 } 4876 word = strtok_r(NULL, ", \t", &ws); 4877 } 4878 line = strtok_r(NULL, "\n", &ls); 4879 } 4880 (void) munmap((void *) fc, fs.st_size); 4881 if (features != NULL) { 4882 for (i = 0; i < SPA_FEATURES; i++) 4883 features[i] &= features_local[i]; 4884 } 4885 filecount++; 4886 file = strtok_r(NULL, ",", &ps); 4887 } 4888 (void) close(sdirfd); 4889 (void) close(ddirfd); 4890 if (filecount == 0) 4891 return (ZPOOL_COMPATIBILITY_NOFILES); 4892 return (ZPOOL_COMPATIBILITY_OK); 4893 } 4894