1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 27 * Copyright (c) 2018 Datto Inc. 28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 29 * Copyright (c) 2017, Intel Corporation. 30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com> 31 */ 32 33 #include <errno.h> 34 #include <libintl.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <strings.h> 38 #include <unistd.h> 39 #include <libgen.h> 40 #include <zone.h> 41 #include <sys/stat.h> 42 #include <sys/efi_partition.h> 43 #include <sys/systeminfo.h> 44 #include <sys/zfs_ioctl.h> 45 #include <sys/zfs_sysfs.h> 46 #include <sys/vdev_disk.h> 47 #include <dlfcn.h> 48 #include <libzutil.h> 49 #include "zfs_namecheck.h" 50 #include "zfs_prop.h" 51 #include "libzfs_impl.h" 52 #include "zfs_comutil.h" 53 #include "zfeature_common.h" 54 55 static boolean_t zpool_vdev_is_interior(const char *name); 56 57 typedef struct prop_flags { 58 int create:1; /* Validate property on creation */ 59 int import:1; /* Validate property on import */ 60 } prop_flags_t; 61 62 /* 63 * ==================================================================== 64 * zpool property functions 65 * ==================================================================== 66 */ 67 68 static int 69 zpool_get_all_props(zpool_handle_t *zhp) 70 { 71 zfs_cmd_t zc = {"\0"}; 72 libzfs_handle_t *hdl = zhp->zpool_hdl; 73 74 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 75 76 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 77 return (-1); 78 79 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 80 if (errno == ENOMEM) { 81 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 82 zcmd_free_nvlists(&zc); 83 return (-1); 84 } 85 } else { 86 zcmd_free_nvlists(&zc); 87 return (-1); 88 } 89 } 90 91 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 92 zcmd_free_nvlists(&zc); 93 return (-1); 94 } 95 96 zcmd_free_nvlists(&zc); 97 98 return (0); 99 } 100 101 int 102 zpool_props_refresh(zpool_handle_t *zhp) 103 { 104 nvlist_t *old_props; 105 106 old_props = zhp->zpool_props; 107 108 if (zpool_get_all_props(zhp) != 0) 109 return (-1); 110 111 nvlist_free(old_props); 112 return (0); 113 } 114 115 static const char * 116 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 117 zprop_source_t *src) 118 { 119 nvlist_t *nv, *nvl; 120 uint64_t ival; 121 char *value; 122 zprop_source_t source; 123 124 nvl = zhp->zpool_props; 125 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 126 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 127 source = ival; 128 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 129 } else { 130 source = ZPROP_SRC_DEFAULT; 131 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 132 value = "-"; 133 } 134 135 if (src) 136 *src = source; 137 138 return (value); 139 } 140 141 uint64_t 142 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 143 { 144 nvlist_t *nv, *nvl; 145 uint64_t value; 146 zprop_source_t source; 147 148 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 149 /* 150 * zpool_get_all_props() has most likely failed because 151 * the pool is faulted, but if all we need is the top level 152 * vdev's guid then get it from the zhp config nvlist. 153 */ 154 if ((prop == ZPOOL_PROP_GUID) && 155 (nvlist_lookup_nvlist(zhp->zpool_config, 156 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 157 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 158 == 0)) { 159 return (value); 160 } 161 return (zpool_prop_default_numeric(prop)); 162 } 163 164 nvl = zhp->zpool_props; 165 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 166 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 167 source = value; 168 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 169 } else { 170 source = ZPROP_SRC_DEFAULT; 171 value = zpool_prop_default_numeric(prop); 172 } 173 174 if (src) 175 *src = source; 176 177 return (value); 178 } 179 180 /* 181 * Map VDEV STATE to printed strings. 182 */ 183 const char * 184 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 185 { 186 switch (state) { 187 case VDEV_STATE_CLOSED: 188 case VDEV_STATE_OFFLINE: 189 return (gettext("OFFLINE")); 190 case VDEV_STATE_REMOVED: 191 return (gettext("REMOVED")); 192 case VDEV_STATE_CANT_OPEN: 193 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 194 return (gettext("FAULTED")); 195 else if (aux == VDEV_AUX_SPLIT_POOL) 196 return (gettext("SPLIT")); 197 else 198 return (gettext("UNAVAIL")); 199 case VDEV_STATE_FAULTED: 200 return (gettext("FAULTED")); 201 case VDEV_STATE_DEGRADED: 202 return (gettext("DEGRADED")); 203 case VDEV_STATE_HEALTHY: 204 return (gettext("ONLINE")); 205 206 default: 207 break; 208 } 209 210 return (gettext("UNKNOWN")); 211 } 212 213 /* 214 * Map POOL STATE to printed strings. 215 */ 216 const char * 217 zpool_pool_state_to_name(pool_state_t state) 218 { 219 switch (state) { 220 default: 221 break; 222 case POOL_STATE_ACTIVE: 223 return (gettext("ACTIVE")); 224 case POOL_STATE_EXPORTED: 225 return (gettext("EXPORTED")); 226 case POOL_STATE_DESTROYED: 227 return (gettext("DESTROYED")); 228 case POOL_STATE_SPARE: 229 return (gettext("SPARE")); 230 case POOL_STATE_L2CACHE: 231 return (gettext("L2CACHE")); 232 case POOL_STATE_UNINITIALIZED: 233 return (gettext("UNINITIALIZED")); 234 case POOL_STATE_UNAVAIL: 235 return (gettext("UNAVAIL")); 236 case POOL_STATE_POTENTIALLY_ACTIVE: 237 return (gettext("POTENTIALLY_ACTIVE")); 238 } 239 240 return (gettext("UNKNOWN")); 241 } 242 243 /* 244 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED", 245 * "SUSPENDED", etc). 246 */ 247 const char * 248 zpool_get_state_str(zpool_handle_t *zhp) 249 { 250 zpool_errata_t errata; 251 zpool_status_t status; 252 nvlist_t *nvroot; 253 vdev_stat_t *vs; 254 uint_t vsc; 255 const char *str; 256 257 status = zpool_get_status(zhp, NULL, &errata); 258 259 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 260 str = gettext("FAULTED"); 261 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT || 262 status == ZPOOL_STATUS_IO_FAILURE_MMP) { 263 str = gettext("SUSPENDED"); 264 } else { 265 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 266 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 267 verify(nvlist_lookup_uint64_array(nvroot, 268 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 269 == 0); 270 str = zpool_state_to_name(vs->vs_state, vs->vs_aux); 271 } 272 return (str); 273 } 274 275 /* 276 * Get a zpool property value for 'prop' and return the value in 277 * a pre-allocated buffer. 278 */ 279 int 280 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, 281 size_t len, zprop_source_t *srctype, boolean_t literal) 282 { 283 uint64_t intval; 284 const char *strval; 285 zprop_source_t src = ZPROP_SRC_NONE; 286 287 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 288 switch (prop) { 289 case ZPOOL_PROP_NAME: 290 (void) strlcpy(buf, zpool_get_name(zhp), len); 291 break; 292 293 case ZPOOL_PROP_HEALTH: 294 (void) strlcpy(buf, zpool_get_state_str(zhp), len); 295 break; 296 297 case ZPOOL_PROP_GUID: 298 intval = zpool_get_prop_int(zhp, prop, &src); 299 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval); 300 break; 301 302 case ZPOOL_PROP_ALTROOT: 303 case ZPOOL_PROP_CACHEFILE: 304 case ZPOOL_PROP_COMMENT: 305 if (zhp->zpool_props != NULL || 306 zpool_get_all_props(zhp) == 0) { 307 (void) strlcpy(buf, 308 zpool_get_prop_string(zhp, prop, &src), 309 len); 310 break; 311 } 312 /* FALLTHROUGH */ 313 default: 314 (void) strlcpy(buf, "-", len); 315 break; 316 } 317 318 if (srctype != NULL) 319 *srctype = src; 320 return (0); 321 } 322 323 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 324 prop != ZPOOL_PROP_NAME) 325 return (-1); 326 327 switch (zpool_prop_get_type(prop)) { 328 case PROP_TYPE_STRING: 329 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 330 len); 331 break; 332 333 case PROP_TYPE_NUMBER: 334 intval = zpool_get_prop_int(zhp, prop, &src); 335 336 switch (prop) { 337 case ZPOOL_PROP_SIZE: 338 case ZPOOL_PROP_ALLOCATED: 339 case ZPOOL_PROP_FREE: 340 case ZPOOL_PROP_FREEING: 341 case ZPOOL_PROP_LEAKED: 342 case ZPOOL_PROP_ASHIFT: 343 if (literal) 344 (void) snprintf(buf, len, "%llu", 345 (u_longlong_t)intval); 346 else 347 (void) zfs_nicenum(intval, buf, len); 348 break; 349 350 case ZPOOL_PROP_EXPANDSZ: 351 case ZPOOL_PROP_CHECKPOINT: 352 if (intval == 0) { 353 (void) strlcpy(buf, "-", len); 354 } else if (literal) { 355 (void) snprintf(buf, len, "%llu", 356 (u_longlong_t)intval); 357 } else { 358 (void) zfs_nicebytes(intval, buf, len); 359 } 360 break; 361 362 case ZPOOL_PROP_CAPACITY: 363 if (literal) { 364 (void) snprintf(buf, len, "%llu", 365 (u_longlong_t)intval); 366 } else { 367 (void) snprintf(buf, len, "%llu%%", 368 (u_longlong_t)intval); 369 } 370 break; 371 372 case ZPOOL_PROP_FRAGMENTATION: 373 if (intval == UINT64_MAX) { 374 (void) strlcpy(buf, "-", len); 375 } else if (literal) { 376 (void) snprintf(buf, len, "%llu", 377 (u_longlong_t)intval); 378 } else { 379 (void) snprintf(buf, len, "%llu%%", 380 (u_longlong_t)intval); 381 } 382 break; 383 384 case ZPOOL_PROP_DEDUPRATIO: 385 if (literal) 386 (void) snprintf(buf, len, "%llu.%02llu", 387 (u_longlong_t)(intval / 100), 388 (u_longlong_t)(intval % 100)); 389 else 390 (void) snprintf(buf, len, "%llu.%02llux", 391 (u_longlong_t)(intval / 100), 392 (u_longlong_t)(intval % 100)); 393 break; 394 395 case ZPOOL_PROP_HEALTH: 396 (void) strlcpy(buf, zpool_get_state_str(zhp), len); 397 break; 398 case ZPOOL_PROP_VERSION: 399 if (intval >= SPA_VERSION_FEATURES) { 400 (void) snprintf(buf, len, "-"); 401 break; 402 } 403 /* FALLTHROUGH */ 404 default: 405 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval); 406 } 407 break; 408 409 case PROP_TYPE_INDEX: 410 intval = zpool_get_prop_int(zhp, prop, &src); 411 if (zpool_prop_index_to_string(prop, intval, &strval) 412 != 0) 413 return (-1); 414 (void) strlcpy(buf, strval, len); 415 break; 416 417 default: 418 abort(); 419 } 420 421 if (srctype) 422 *srctype = src; 423 424 return (0); 425 } 426 427 /* 428 * Check if the bootfs name has the same pool name as it is set to. 429 * Assuming bootfs is a valid dataset name. 430 */ 431 static boolean_t 432 bootfs_name_valid(const char *pool, const char *bootfs) 433 { 434 int len = strlen(pool); 435 if (bootfs[0] == '\0') 436 return (B_TRUE); 437 438 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 439 return (B_FALSE); 440 441 if (strncmp(pool, bootfs, len) == 0 && 442 (bootfs[len] == '/' || bootfs[len] == '\0')) 443 return (B_TRUE); 444 445 return (B_FALSE); 446 } 447 448 /* 449 * Given an nvlist of zpool properties to be set, validate that they are 450 * correct, and parse any numeric properties (index, boolean, etc) if they are 451 * specified as strings. 452 */ 453 static nvlist_t * 454 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 455 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 456 { 457 nvpair_t *elem; 458 nvlist_t *retprops; 459 zpool_prop_t prop; 460 char *strval; 461 uint64_t intval; 462 char *slash, *check; 463 struct stat64 statbuf; 464 zpool_handle_t *zhp; 465 466 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 467 (void) no_memory(hdl); 468 return (NULL); 469 } 470 471 elem = NULL; 472 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 473 const char *propname = nvpair_name(elem); 474 475 prop = zpool_name_to_prop(propname); 476 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) { 477 int err; 478 char *fname = strchr(propname, '@') + 1; 479 480 err = zfeature_lookup_name(fname, NULL); 481 if (err != 0) { 482 ASSERT3U(err, ==, ENOENT); 483 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 484 "feature '%s' unsupported by kernel"), 485 fname); 486 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 487 goto error; 488 } 489 490 if (nvpair_type(elem) != DATA_TYPE_STRING) { 491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 492 "'%s' must be a string"), propname); 493 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 494 goto error; 495 } 496 497 (void) nvpair_value_string(elem, &strval); 498 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 && 499 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) { 500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 501 "property '%s' can only be set to " 502 "'enabled' or 'disabled'"), propname); 503 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 504 goto error; 505 } 506 507 if (!flags.create && 508 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) { 509 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 510 "property '%s' can only be set to " 511 "'disabled' at creation time"), propname); 512 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 513 goto error; 514 } 515 516 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 517 (void) no_memory(hdl); 518 goto error; 519 } 520 continue; 521 } 522 523 /* 524 * Make sure this property is valid and applies to this type. 525 */ 526 if (prop == ZPOOL_PROP_INVAL) { 527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 528 "invalid property '%s'"), propname); 529 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 530 goto error; 531 } 532 533 if (zpool_prop_readonly(prop)) { 534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 535 "is readonly"), propname); 536 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 537 goto error; 538 } 539 540 if (!flags.create && zpool_prop_setonce(prop)) { 541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 542 "property '%s' can only be set at " 543 "creation time"), propname); 544 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 545 goto error; 546 } 547 548 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 549 &strval, &intval, errbuf) != 0) 550 goto error; 551 552 /* 553 * Perform additional checking for specific properties. 554 */ 555 switch (prop) { 556 case ZPOOL_PROP_VERSION: 557 if (intval < version || 558 !SPA_VERSION_IS_SUPPORTED(intval)) { 559 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 560 "property '%s' number %d is invalid."), 561 propname, intval); 562 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 563 goto error; 564 } 565 break; 566 567 case ZPOOL_PROP_ASHIFT: 568 if (intval != 0 && 569 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) { 570 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 571 "property '%s' number %d is invalid, only " 572 "values between %" PRId32 " and " 573 "%" PRId32 " are allowed."), 574 propname, intval, ASHIFT_MIN, ASHIFT_MAX); 575 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 576 goto error; 577 } 578 break; 579 580 case ZPOOL_PROP_BOOTFS: 581 if (flags.create || flags.import) { 582 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 583 "property '%s' cannot be set at creation " 584 "or import time"), propname); 585 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 586 goto error; 587 } 588 589 if (version < SPA_VERSION_BOOTFS) { 590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 591 "pool must be upgraded to support " 592 "'%s' property"), propname); 593 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 594 goto error; 595 } 596 597 /* 598 * bootfs property value has to be a dataset name and 599 * the dataset has to be in the same pool as it sets to. 600 */ 601 if (!bootfs_name_valid(poolname, strval)) { 602 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 603 "is an invalid name"), strval); 604 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 605 goto error; 606 } 607 608 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 610 "could not open pool '%s'"), poolname); 611 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 612 goto error; 613 } 614 zpool_close(zhp); 615 break; 616 617 case ZPOOL_PROP_ALTROOT: 618 if (!flags.create && !flags.import) { 619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 620 "property '%s' can only be set during pool " 621 "creation or import"), propname); 622 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 623 goto error; 624 } 625 626 if (strval[0] != '/') { 627 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 628 "bad alternate root '%s'"), strval); 629 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 630 goto error; 631 } 632 break; 633 634 case ZPOOL_PROP_CACHEFILE: 635 if (strval[0] == '\0') 636 break; 637 638 if (strcmp(strval, "none") == 0) 639 break; 640 641 if (strval[0] != '/') { 642 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 643 "property '%s' must be empty, an " 644 "absolute path, or 'none'"), propname); 645 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 646 goto error; 647 } 648 649 slash = strrchr(strval, '/'); 650 651 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 652 strcmp(slash, "/..") == 0) { 653 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 654 "'%s' is not a valid file"), strval); 655 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 656 goto error; 657 } 658 659 *slash = '\0'; 660 661 if (strval[0] != '\0' && 662 (stat64(strval, &statbuf) != 0 || 663 !S_ISDIR(statbuf.st_mode))) { 664 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 665 "'%s' is not a valid directory"), 666 strval); 667 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 668 goto error; 669 } 670 671 *slash = '/'; 672 break; 673 674 case ZPOOL_PROP_COMMENT: 675 for (check = strval; *check != '\0'; check++) { 676 if (!isprint(*check)) { 677 zfs_error_aux(hdl, 678 dgettext(TEXT_DOMAIN, 679 "comment may only have printable " 680 "characters")); 681 (void) zfs_error(hdl, EZFS_BADPROP, 682 errbuf); 683 goto error; 684 } 685 } 686 if (strlen(strval) > ZPROP_MAX_COMMENT) { 687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 688 "comment must not exceed %d characters"), 689 ZPROP_MAX_COMMENT); 690 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 691 goto error; 692 } 693 break; 694 case ZPOOL_PROP_READONLY: 695 if (!flags.import) { 696 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 697 "property '%s' can only be set at " 698 "import time"), propname); 699 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 700 goto error; 701 } 702 break; 703 case ZPOOL_PROP_MULTIHOST: 704 if (get_system_hostid() == 0) { 705 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 706 "requires a non-zero system hostid")); 707 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 708 goto error; 709 } 710 break; 711 case ZPOOL_PROP_DEDUPDITTO: 712 printf("Note: property '%s' no longer has " 713 "any effect\n", propname); 714 break; 715 716 default: 717 break; 718 } 719 } 720 721 return (retprops); 722 error: 723 nvlist_free(retprops); 724 return (NULL); 725 } 726 727 /* 728 * Set zpool property : propname=propval. 729 */ 730 int 731 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 732 { 733 zfs_cmd_t zc = {"\0"}; 734 int ret = -1; 735 char errbuf[1024]; 736 nvlist_t *nvl = NULL; 737 nvlist_t *realprops; 738 uint64_t version; 739 prop_flags_t flags = { 0 }; 740 741 (void) snprintf(errbuf, sizeof (errbuf), 742 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 743 zhp->zpool_name); 744 745 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 746 return (no_memory(zhp->zpool_hdl)); 747 748 if (nvlist_add_string(nvl, propname, propval) != 0) { 749 nvlist_free(nvl); 750 return (no_memory(zhp->zpool_hdl)); 751 } 752 753 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 754 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 755 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 756 nvlist_free(nvl); 757 return (-1); 758 } 759 760 nvlist_free(nvl); 761 nvl = realprops; 762 763 /* 764 * Execute the corresponding ioctl() to set this property. 765 */ 766 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 767 768 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 769 nvlist_free(nvl); 770 return (-1); 771 } 772 773 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 774 775 zcmd_free_nvlists(&zc); 776 nvlist_free(nvl); 777 778 if (ret) 779 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 780 else 781 (void) zpool_props_refresh(zhp); 782 783 return (ret); 784 } 785 786 int 787 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp, 788 boolean_t literal) 789 { 790 libzfs_handle_t *hdl = zhp->zpool_hdl; 791 zprop_list_t *entry; 792 char buf[ZFS_MAXPROPLEN]; 793 nvlist_t *features = NULL; 794 nvpair_t *nvp; 795 zprop_list_t **last; 796 boolean_t firstexpand = (NULL == *plp); 797 int i; 798 799 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 800 return (-1); 801 802 last = plp; 803 while (*last != NULL) 804 last = &(*last)->pl_next; 805 806 if ((*plp)->pl_all) 807 features = zpool_get_features(zhp); 808 809 if ((*plp)->pl_all && firstexpand) { 810 for (i = 0; i < SPA_FEATURES; i++) { 811 zprop_list_t *entry = zfs_alloc(hdl, 812 sizeof (zprop_list_t)); 813 entry->pl_prop = ZPROP_INVAL; 814 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 815 spa_feature_table[i].fi_uname); 816 entry->pl_width = strlen(entry->pl_user_prop); 817 entry->pl_all = B_TRUE; 818 819 *last = entry; 820 last = &entry->pl_next; 821 } 822 } 823 824 /* add any unsupported features */ 825 for (nvp = nvlist_next_nvpair(features, NULL); 826 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 827 char *propname; 828 boolean_t found; 829 zprop_list_t *entry; 830 831 if (zfeature_is_supported(nvpair_name(nvp))) 832 continue; 833 834 propname = zfs_asprintf(hdl, "unsupported@%s", 835 nvpair_name(nvp)); 836 837 /* 838 * Before adding the property to the list make sure that no 839 * other pool already added the same property. 840 */ 841 found = B_FALSE; 842 entry = *plp; 843 while (entry != NULL) { 844 if (entry->pl_user_prop != NULL && 845 strcmp(propname, entry->pl_user_prop) == 0) { 846 found = B_TRUE; 847 break; 848 } 849 entry = entry->pl_next; 850 } 851 if (found) { 852 free(propname); 853 continue; 854 } 855 856 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 857 entry->pl_prop = ZPROP_INVAL; 858 entry->pl_user_prop = propname; 859 entry->pl_width = strlen(entry->pl_user_prop); 860 entry->pl_all = B_TRUE; 861 862 *last = entry; 863 last = &entry->pl_next; 864 } 865 866 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 867 if (entry->pl_fixed && !literal) 868 continue; 869 870 if (entry->pl_prop != ZPROP_INVAL && 871 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 872 NULL, literal) == 0) { 873 if (strlen(buf) > entry->pl_width) 874 entry->pl_width = strlen(buf); 875 } 876 } 877 878 return (0); 879 } 880 881 /* 882 * Get the state for the given feature on the given ZFS pool. 883 */ 884 int 885 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 886 size_t len) 887 { 888 uint64_t refcount; 889 boolean_t found = B_FALSE; 890 nvlist_t *features = zpool_get_features(zhp); 891 boolean_t supported; 892 const char *feature = strchr(propname, '@') + 1; 893 894 supported = zpool_prop_feature(propname); 895 ASSERT(supported || zpool_prop_unsupported(propname)); 896 897 /* 898 * Convert from feature name to feature guid. This conversion is 899 * unnecessary for unsupported@... properties because they already 900 * use guids. 901 */ 902 if (supported) { 903 int ret; 904 spa_feature_t fid; 905 906 ret = zfeature_lookup_name(feature, &fid); 907 if (ret != 0) { 908 (void) strlcpy(buf, "-", len); 909 return (ENOTSUP); 910 } 911 feature = spa_feature_table[fid].fi_guid; 912 } 913 914 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 915 found = B_TRUE; 916 917 if (supported) { 918 if (!found) { 919 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 920 } else { 921 if (refcount == 0) 922 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 923 else 924 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 925 } 926 } else { 927 if (found) { 928 if (refcount == 0) { 929 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 930 } else { 931 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 932 } 933 } else { 934 (void) strlcpy(buf, "-", len); 935 return (ENOTSUP); 936 } 937 } 938 939 return (0); 940 } 941 942 /* 943 * Validate the given pool name, optionally putting an extended error message in 944 * 'buf'. 945 */ 946 boolean_t 947 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 948 { 949 namecheck_err_t why; 950 char what; 951 int ret; 952 953 ret = pool_namecheck(pool, &why, &what); 954 955 /* 956 * The rules for reserved pool names were extended at a later point. 957 * But we need to support users with existing pools that may now be 958 * invalid. So we only check for this expanded set of names during a 959 * create (or import), and only in userland. 960 */ 961 if (ret == 0 && !isopen && 962 (strncmp(pool, "mirror", 6) == 0 || 963 strncmp(pool, "raidz", 5) == 0 || 964 strncmp(pool, "draid", 5) == 0 || 965 strncmp(pool, "spare", 5) == 0 || 966 strcmp(pool, "log") == 0)) { 967 if (hdl != NULL) 968 zfs_error_aux(hdl, 969 dgettext(TEXT_DOMAIN, "name is reserved")); 970 return (B_FALSE); 971 } 972 973 974 if (ret != 0) { 975 if (hdl != NULL) { 976 switch (why) { 977 case NAME_ERR_TOOLONG: 978 zfs_error_aux(hdl, 979 dgettext(TEXT_DOMAIN, "name is too long")); 980 break; 981 982 case NAME_ERR_INVALCHAR: 983 zfs_error_aux(hdl, 984 dgettext(TEXT_DOMAIN, "invalid character " 985 "'%c' in pool name"), what); 986 break; 987 988 case NAME_ERR_NOLETTER: 989 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 990 "name must begin with a letter")); 991 break; 992 993 case NAME_ERR_RESERVED: 994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 995 "name is reserved")); 996 break; 997 998 case NAME_ERR_DISKLIKE: 999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1000 "pool name is reserved")); 1001 break; 1002 1003 case NAME_ERR_LEADING_SLASH: 1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1005 "leading slash in name")); 1006 break; 1007 1008 case NAME_ERR_EMPTY_COMPONENT: 1009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1010 "empty component in name")); 1011 break; 1012 1013 case NAME_ERR_TRAILING_SLASH: 1014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1015 "trailing slash in name")); 1016 break; 1017 1018 case NAME_ERR_MULTIPLE_DELIMITERS: 1019 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1020 "multiple '@' and/or '#' delimiters in " 1021 "name")); 1022 break; 1023 1024 case NAME_ERR_NO_AT: 1025 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1026 "permission set is missing '@'")); 1027 break; 1028 1029 default: 1030 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1031 "(%d) not defined"), why); 1032 break; 1033 } 1034 } 1035 return (B_FALSE); 1036 } 1037 1038 return (B_TRUE); 1039 } 1040 1041 /* 1042 * Open a handle to the given pool, even if the pool is currently in the FAULTED 1043 * state. 1044 */ 1045 zpool_handle_t * 1046 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 1047 { 1048 zpool_handle_t *zhp; 1049 boolean_t missing; 1050 1051 /* 1052 * Make sure the pool name is valid. 1053 */ 1054 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 1055 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1056 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1057 pool); 1058 return (NULL); 1059 } 1060 1061 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1062 return (NULL); 1063 1064 zhp->zpool_hdl = hdl; 1065 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1066 1067 if (zpool_refresh_stats(zhp, &missing) != 0) { 1068 zpool_close(zhp); 1069 return (NULL); 1070 } 1071 1072 if (missing) { 1073 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1074 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1075 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1076 zpool_close(zhp); 1077 return (NULL); 1078 } 1079 1080 return (zhp); 1081 } 1082 1083 /* 1084 * Like the above, but silent on error. Used when iterating over pools (because 1085 * the configuration cache may be out of date). 1086 */ 1087 int 1088 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1089 { 1090 zpool_handle_t *zhp; 1091 boolean_t missing; 1092 1093 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1094 return (-1); 1095 1096 zhp->zpool_hdl = hdl; 1097 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1098 1099 if (zpool_refresh_stats(zhp, &missing) != 0) { 1100 zpool_close(zhp); 1101 return (-1); 1102 } 1103 1104 if (missing) { 1105 zpool_close(zhp); 1106 *ret = NULL; 1107 return (0); 1108 } 1109 1110 *ret = zhp; 1111 return (0); 1112 } 1113 1114 /* 1115 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1116 * state. 1117 */ 1118 zpool_handle_t * 1119 zpool_open(libzfs_handle_t *hdl, const char *pool) 1120 { 1121 zpool_handle_t *zhp; 1122 1123 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1124 return (NULL); 1125 1126 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1127 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1128 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1129 zpool_close(zhp); 1130 return (NULL); 1131 } 1132 1133 return (zhp); 1134 } 1135 1136 /* 1137 * Close the handle. Simply frees the memory associated with the handle. 1138 */ 1139 void 1140 zpool_close(zpool_handle_t *zhp) 1141 { 1142 nvlist_free(zhp->zpool_config); 1143 nvlist_free(zhp->zpool_old_config); 1144 nvlist_free(zhp->zpool_props); 1145 free(zhp); 1146 } 1147 1148 /* 1149 * Return the name of the pool. 1150 */ 1151 const char * 1152 zpool_get_name(zpool_handle_t *zhp) 1153 { 1154 return (zhp->zpool_name); 1155 } 1156 1157 1158 /* 1159 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1160 */ 1161 int 1162 zpool_get_state(zpool_handle_t *zhp) 1163 { 1164 return (zhp->zpool_state); 1165 } 1166 1167 /* 1168 * Check if vdev list contains a special vdev 1169 */ 1170 static boolean_t 1171 zpool_has_special_vdev(nvlist_t *nvroot) 1172 { 1173 nvlist_t **child; 1174 uint_t children; 1175 1176 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child, 1177 &children) == 0) { 1178 for (uint_t c = 0; c < children; c++) { 1179 char *bias; 1180 1181 if (nvlist_lookup_string(child[c], 1182 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 && 1183 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) { 1184 return (B_TRUE); 1185 } 1186 } 1187 } 1188 return (B_FALSE); 1189 } 1190 1191 /* 1192 * Output a dRAID top-level vdev name in to the provided buffer. 1193 */ 1194 static char * 1195 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity, 1196 uint64_t spares, uint64_t children) 1197 { 1198 snprintf(name, len, "%s%llu:%llud:%lluc:%llus", 1199 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data, 1200 (u_longlong_t)children, (u_longlong_t)spares); 1201 1202 return (name); 1203 } 1204 1205 /* 1206 * Return B_TRUE if the provided name is a dRAID spare name. 1207 */ 1208 boolean_t 1209 zpool_is_draid_spare(const char *name) 1210 { 1211 uint64_t spare_id, parity, vdev_id; 1212 1213 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu", 1214 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id, 1215 (u_longlong_t *)&spare_id) == 3) { 1216 return (B_TRUE); 1217 } 1218 1219 return (B_FALSE); 1220 } 1221 1222 /* 1223 * Create the named pool, using the provided vdev list. It is assumed 1224 * that the consumer has already validated the contents of the nvlist, so we 1225 * don't have to worry about error semantics. 1226 */ 1227 int 1228 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1229 nvlist_t *props, nvlist_t *fsprops) 1230 { 1231 zfs_cmd_t zc = {"\0"}; 1232 nvlist_t *zc_fsprops = NULL; 1233 nvlist_t *zc_props = NULL; 1234 nvlist_t *hidden_args = NULL; 1235 uint8_t *wkeydata = NULL; 1236 uint_t wkeylen = 0; 1237 char msg[1024]; 1238 int ret = -1; 1239 1240 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1241 "cannot create '%s'"), pool); 1242 1243 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1244 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1245 1246 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1247 return (-1); 1248 1249 if (props) { 1250 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1251 1252 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1253 SPA_VERSION_1, flags, msg)) == NULL) { 1254 goto create_failed; 1255 } 1256 } 1257 1258 if (fsprops) { 1259 uint64_t zoned; 1260 char *zonestr; 1261 1262 zoned = ((nvlist_lookup_string(fsprops, 1263 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1264 strcmp(zonestr, "on") == 0); 1265 1266 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1267 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) { 1268 goto create_failed; 1269 } 1270 1271 if (nvlist_exists(zc_fsprops, 1272 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) && 1273 !zpool_has_special_vdev(nvroot)) { 1274 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1275 "%s property requires a special vdev"), 1276 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)); 1277 (void) zfs_error(hdl, EZFS_BADPROP, msg); 1278 goto create_failed; 1279 } 1280 1281 if (!zc_props && 1282 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1283 goto create_failed; 1284 } 1285 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE, 1286 &wkeydata, &wkeylen) != 0) { 1287 zfs_error(hdl, EZFS_CRYPTOFAILED, msg); 1288 goto create_failed; 1289 } 1290 if (nvlist_add_nvlist(zc_props, 1291 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1292 goto create_failed; 1293 } 1294 if (wkeydata != NULL) { 1295 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0) 1296 goto create_failed; 1297 1298 if (nvlist_add_uint8_array(hidden_args, "wkeydata", 1299 wkeydata, wkeylen) != 0) 1300 goto create_failed; 1301 1302 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS, 1303 hidden_args) != 0) 1304 goto create_failed; 1305 } 1306 } 1307 1308 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1309 goto create_failed; 1310 1311 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1312 1313 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1314 1315 zcmd_free_nvlists(&zc); 1316 nvlist_free(zc_props); 1317 nvlist_free(zc_fsprops); 1318 nvlist_free(hidden_args); 1319 if (wkeydata != NULL) 1320 free(wkeydata); 1321 1322 switch (errno) { 1323 case EBUSY: 1324 /* 1325 * This can happen if the user has specified the same 1326 * device multiple times. We can't reliably detect this 1327 * until we try to add it and see we already have a 1328 * label. This can also happen under if the device is 1329 * part of an active md or lvm device. 1330 */ 1331 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1332 "one or more vdevs refer to the same device, or " 1333 "one of\nthe devices is part of an active md or " 1334 "lvm device")); 1335 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1336 1337 case ERANGE: 1338 /* 1339 * This happens if the record size is smaller or larger 1340 * than the allowed size range, or not a power of 2. 1341 * 1342 * NOTE: although zfs_valid_proplist is called earlier, 1343 * this case may have slipped through since the 1344 * pool does not exist yet and it is therefore 1345 * impossible to read properties e.g. max blocksize 1346 * from the pool. 1347 */ 1348 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1349 "record size invalid")); 1350 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1351 1352 case EOVERFLOW: 1353 /* 1354 * This occurs when one of the devices is below 1355 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1356 * device was the problem device since there's no 1357 * reliable way to determine device size from userland. 1358 */ 1359 { 1360 char buf[64]; 1361 1362 zfs_nicebytes(SPA_MINDEVSIZE, buf, 1363 sizeof (buf)); 1364 1365 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1366 "one or more devices is less than the " 1367 "minimum size (%s)"), buf); 1368 } 1369 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1370 1371 case ENOSPC: 1372 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1373 "one or more devices is out of space")); 1374 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1375 1376 default: 1377 return (zpool_standard_error(hdl, errno, msg)); 1378 } 1379 } 1380 1381 create_failed: 1382 zcmd_free_nvlists(&zc); 1383 nvlist_free(zc_props); 1384 nvlist_free(zc_fsprops); 1385 nvlist_free(hidden_args); 1386 if (wkeydata != NULL) 1387 free(wkeydata); 1388 return (ret); 1389 } 1390 1391 /* 1392 * Destroy the given pool. It is up to the caller to ensure that there are no 1393 * datasets left in the pool. 1394 */ 1395 int 1396 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1397 { 1398 zfs_cmd_t zc = {"\0"}; 1399 zfs_handle_t *zfp = NULL; 1400 libzfs_handle_t *hdl = zhp->zpool_hdl; 1401 char msg[1024]; 1402 1403 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1404 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1405 return (-1); 1406 1407 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1408 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1409 1410 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1411 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1412 "cannot destroy '%s'"), zhp->zpool_name); 1413 1414 if (errno == EROFS) { 1415 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1416 "one or more devices is read only")); 1417 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1418 } else { 1419 (void) zpool_standard_error(hdl, errno, msg); 1420 } 1421 1422 if (zfp) 1423 zfs_close(zfp); 1424 return (-1); 1425 } 1426 1427 if (zfp) { 1428 remove_mountpoint(zfp); 1429 zfs_close(zfp); 1430 } 1431 1432 return (0); 1433 } 1434 1435 /* 1436 * Create a checkpoint in the given pool. 1437 */ 1438 int 1439 zpool_checkpoint(zpool_handle_t *zhp) 1440 { 1441 libzfs_handle_t *hdl = zhp->zpool_hdl; 1442 char msg[1024]; 1443 int error; 1444 1445 error = lzc_pool_checkpoint(zhp->zpool_name); 1446 if (error != 0) { 1447 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1448 "cannot checkpoint '%s'"), zhp->zpool_name); 1449 (void) zpool_standard_error(hdl, error, msg); 1450 return (-1); 1451 } 1452 1453 return (0); 1454 } 1455 1456 /* 1457 * Discard the checkpoint from the given pool. 1458 */ 1459 int 1460 zpool_discard_checkpoint(zpool_handle_t *zhp) 1461 { 1462 libzfs_handle_t *hdl = zhp->zpool_hdl; 1463 char msg[1024]; 1464 int error; 1465 1466 error = lzc_pool_checkpoint_discard(zhp->zpool_name); 1467 if (error != 0) { 1468 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1469 "cannot discard checkpoint in '%s'"), zhp->zpool_name); 1470 (void) zpool_standard_error(hdl, error, msg); 1471 return (-1); 1472 } 1473 1474 return (0); 1475 } 1476 1477 /* 1478 * Add the given vdevs to the pool. The caller must have already performed the 1479 * necessary verification to ensure that the vdev specification is well-formed. 1480 */ 1481 int 1482 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1483 { 1484 zfs_cmd_t zc = {"\0"}; 1485 int ret; 1486 libzfs_handle_t *hdl = zhp->zpool_hdl; 1487 char msg[1024]; 1488 nvlist_t **spares, **l2cache; 1489 uint_t nspares, nl2cache; 1490 1491 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1492 "cannot add to '%s'"), zhp->zpool_name); 1493 1494 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1495 SPA_VERSION_SPARES && 1496 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1497 &spares, &nspares) == 0) { 1498 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1499 "upgraded to add hot spares")); 1500 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1501 } 1502 1503 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1504 SPA_VERSION_L2CACHE && 1505 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1506 &l2cache, &nl2cache) == 0) { 1507 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1508 "upgraded to add cache devices")); 1509 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1510 } 1511 1512 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1513 return (-1); 1514 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1515 1516 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1517 switch (errno) { 1518 case EBUSY: 1519 /* 1520 * This can happen if the user has specified the same 1521 * device multiple times. We can't reliably detect this 1522 * until we try to add it and see we already have a 1523 * label. 1524 */ 1525 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1526 "one or more vdevs refer to the same device")); 1527 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1528 break; 1529 1530 case EINVAL: 1531 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1532 "invalid config; a pool with removing/removed " 1533 "vdevs does not support adding raidz vdevs")); 1534 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1535 break; 1536 1537 case EOVERFLOW: 1538 /* 1539 * This occurs when one of the devices is below 1540 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1541 * device was the problem device since there's no 1542 * reliable way to determine device size from userland. 1543 */ 1544 { 1545 char buf[64]; 1546 1547 zfs_nicebytes(SPA_MINDEVSIZE, buf, 1548 sizeof (buf)); 1549 1550 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1551 "device is less than the minimum " 1552 "size (%s)"), buf); 1553 } 1554 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1555 break; 1556 1557 case ENOTSUP: 1558 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1559 "pool must be upgraded to add these vdevs")); 1560 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1561 break; 1562 1563 default: 1564 (void) zpool_standard_error(hdl, errno, msg); 1565 } 1566 1567 ret = -1; 1568 } else { 1569 ret = 0; 1570 } 1571 1572 zcmd_free_nvlists(&zc); 1573 1574 return (ret); 1575 } 1576 1577 /* 1578 * Exports the pool from the system. The caller must ensure that there are no 1579 * mounted datasets in the pool. 1580 */ 1581 static int 1582 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1583 const char *log_str) 1584 { 1585 zfs_cmd_t zc = {"\0"}; 1586 char msg[1024]; 1587 1588 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1589 "cannot export '%s'"), zhp->zpool_name); 1590 1591 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1592 zc.zc_cookie = force; 1593 zc.zc_guid = hardforce; 1594 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1595 1596 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1597 switch (errno) { 1598 case EXDEV: 1599 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1600 "use '-f' to override the following errors:\n" 1601 "'%s' has an active shared spare which could be" 1602 " used by other pools once '%s' is exported."), 1603 zhp->zpool_name, zhp->zpool_name); 1604 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1605 msg)); 1606 default: 1607 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1608 msg)); 1609 } 1610 } 1611 1612 return (0); 1613 } 1614 1615 int 1616 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1617 { 1618 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1619 } 1620 1621 int 1622 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1623 { 1624 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1625 } 1626 1627 static void 1628 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1629 nvlist_t *config) 1630 { 1631 nvlist_t *nv = NULL; 1632 uint64_t rewindto; 1633 int64_t loss = -1; 1634 struct tm t; 1635 char timestr[128]; 1636 1637 if (!hdl->libzfs_printerr || config == NULL) 1638 return; 1639 1640 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1641 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1642 return; 1643 } 1644 1645 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1646 return; 1647 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1648 1649 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1650 strftime(timestr, 128, "%c", &t) != 0) { 1651 if (dryrun) { 1652 (void) printf(dgettext(TEXT_DOMAIN, 1653 "Would be able to return %s " 1654 "to its state as of %s.\n"), 1655 name, timestr); 1656 } else { 1657 (void) printf(dgettext(TEXT_DOMAIN, 1658 "Pool %s returned to its state as of %s.\n"), 1659 name, timestr); 1660 } 1661 if (loss > 120) { 1662 (void) printf(dgettext(TEXT_DOMAIN, 1663 "%s approximately %lld "), 1664 dryrun ? "Would discard" : "Discarded", 1665 ((longlong_t)loss + 30) / 60); 1666 (void) printf(dgettext(TEXT_DOMAIN, 1667 "minutes of transactions.\n")); 1668 } else if (loss > 0) { 1669 (void) printf(dgettext(TEXT_DOMAIN, 1670 "%s approximately %lld "), 1671 dryrun ? "Would discard" : "Discarded", 1672 (longlong_t)loss); 1673 (void) printf(dgettext(TEXT_DOMAIN, 1674 "seconds of transactions.\n")); 1675 } 1676 } 1677 } 1678 1679 void 1680 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1681 nvlist_t *config) 1682 { 1683 nvlist_t *nv = NULL; 1684 int64_t loss = -1; 1685 uint64_t edata = UINT64_MAX; 1686 uint64_t rewindto; 1687 struct tm t; 1688 char timestr[128]; 1689 1690 if (!hdl->libzfs_printerr) 1691 return; 1692 1693 if (reason >= 0) 1694 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1695 else 1696 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1697 1698 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1699 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1700 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1701 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1702 goto no_info; 1703 1704 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1705 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1706 &edata); 1707 1708 (void) printf(dgettext(TEXT_DOMAIN, 1709 "Recovery is possible, but will result in some data loss.\n")); 1710 1711 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1712 strftime(timestr, 128, "%c", &t) != 0) { 1713 (void) printf(dgettext(TEXT_DOMAIN, 1714 "\tReturning the pool to its state as of %s\n" 1715 "\tshould correct the problem. "), 1716 timestr); 1717 } else { 1718 (void) printf(dgettext(TEXT_DOMAIN, 1719 "\tReverting the pool to an earlier state " 1720 "should correct the problem.\n\t")); 1721 } 1722 1723 if (loss > 120) { 1724 (void) printf(dgettext(TEXT_DOMAIN, 1725 "Approximately %lld minutes of data\n" 1726 "\tmust be discarded, irreversibly. "), 1727 ((longlong_t)loss + 30) / 60); 1728 } else if (loss > 0) { 1729 (void) printf(dgettext(TEXT_DOMAIN, 1730 "Approximately %lld seconds of data\n" 1731 "\tmust be discarded, irreversibly. "), 1732 (longlong_t)loss); 1733 } 1734 if (edata != 0 && edata != UINT64_MAX) { 1735 if (edata == 1) { 1736 (void) printf(dgettext(TEXT_DOMAIN, 1737 "After rewind, at least\n" 1738 "\tone persistent user-data error will remain. ")); 1739 } else { 1740 (void) printf(dgettext(TEXT_DOMAIN, 1741 "After rewind, several\n" 1742 "\tpersistent user-data errors will remain. ")); 1743 } 1744 } 1745 (void) printf(dgettext(TEXT_DOMAIN, 1746 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1747 reason >= 0 ? "clear" : "import", name); 1748 1749 (void) printf(dgettext(TEXT_DOMAIN, 1750 "A scrub of the pool\n" 1751 "\tis strongly recommended after recovery.\n")); 1752 return; 1753 1754 no_info: 1755 (void) printf(dgettext(TEXT_DOMAIN, 1756 "Destroy and re-create the pool from\n\ta backup source.\n")); 1757 } 1758 1759 /* 1760 * zpool_import() is a contracted interface. Should be kept the same 1761 * if possible. 1762 * 1763 * Applications should use zpool_import_props() to import a pool with 1764 * new properties value to be set. 1765 */ 1766 int 1767 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1768 char *altroot) 1769 { 1770 nvlist_t *props = NULL; 1771 int ret; 1772 1773 if (altroot != NULL) { 1774 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1775 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1776 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1777 newname)); 1778 } 1779 1780 if (nvlist_add_string(props, 1781 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1782 nvlist_add_string(props, 1783 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1784 nvlist_free(props); 1785 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1786 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1787 newname)); 1788 } 1789 } 1790 1791 ret = zpool_import_props(hdl, config, newname, props, 1792 ZFS_IMPORT_NORMAL); 1793 nvlist_free(props); 1794 return (ret); 1795 } 1796 1797 static void 1798 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1799 int indent) 1800 { 1801 nvlist_t **child; 1802 uint_t c, children; 1803 char *vname; 1804 uint64_t is_log = 0; 1805 1806 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1807 &is_log); 1808 1809 if (name != NULL) 1810 (void) printf("\t%*s%s%s\n", indent, "", name, 1811 is_log ? " [log]" : ""); 1812 1813 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1814 &child, &children) != 0) 1815 return; 1816 1817 for (c = 0; c < children; c++) { 1818 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID); 1819 print_vdev_tree(hdl, vname, child[c], indent + 2); 1820 free(vname); 1821 } 1822 } 1823 1824 void 1825 zpool_print_unsup_feat(nvlist_t *config) 1826 { 1827 nvlist_t *nvinfo, *unsup_feat; 1828 nvpair_t *nvp; 1829 1830 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1831 0); 1832 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1833 &unsup_feat) == 0); 1834 1835 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1836 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1837 char *desc; 1838 1839 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1840 verify(nvpair_value_string(nvp, &desc) == 0); 1841 1842 if (strlen(desc) > 0) 1843 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1844 else 1845 (void) printf("\t%s\n", nvpair_name(nvp)); 1846 } 1847 } 1848 1849 /* 1850 * Import the given pool using the known configuration and a list of 1851 * properties to be set. The configuration should have come from 1852 * zpool_find_import(). The 'newname' parameters control whether the pool 1853 * is imported with a different name. 1854 */ 1855 int 1856 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1857 nvlist_t *props, int flags) 1858 { 1859 zfs_cmd_t zc = {"\0"}; 1860 zpool_load_policy_t policy; 1861 nvlist_t *nv = NULL; 1862 nvlist_t *nvinfo = NULL; 1863 nvlist_t *missing = NULL; 1864 char *thename; 1865 char *origname; 1866 int ret; 1867 int error = 0; 1868 char errbuf[1024]; 1869 1870 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1871 &origname) == 0); 1872 1873 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1874 "cannot import pool '%s'"), origname); 1875 1876 if (newname != NULL) { 1877 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1878 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1879 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1880 newname)); 1881 thename = (char *)newname; 1882 } else { 1883 thename = origname; 1884 } 1885 1886 if (props != NULL) { 1887 uint64_t version; 1888 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1889 1890 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1891 &version) == 0); 1892 1893 if ((props = zpool_valid_proplist(hdl, origname, 1894 props, version, flags, errbuf)) == NULL) 1895 return (-1); 1896 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1897 nvlist_free(props); 1898 return (-1); 1899 } 1900 nvlist_free(props); 1901 } 1902 1903 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1904 1905 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1906 &zc.zc_guid) == 0); 1907 1908 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1909 zcmd_free_nvlists(&zc); 1910 return (-1); 1911 } 1912 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1913 zcmd_free_nvlists(&zc); 1914 return (-1); 1915 } 1916 1917 zc.zc_cookie = flags; 1918 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1919 errno == ENOMEM) { 1920 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1921 zcmd_free_nvlists(&zc); 1922 return (-1); 1923 } 1924 } 1925 if (ret != 0) 1926 error = errno; 1927 1928 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1929 1930 zcmd_free_nvlists(&zc); 1931 1932 zpool_get_load_policy(config, &policy); 1933 1934 if (error) { 1935 char desc[1024]; 1936 char aux[256]; 1937 1938 /* 1939 * Dry-run failed, but we print out what success 1940 * looks like if we found a best txg 1941 */ 1942 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) { 1943 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1944 B_TRUE, nv); 1945 nvlist_free(nv); 1946 return (-1); 1947 } 1948 1949 if (newname == NULL) 1950 (void) snprintf(desc, sizeof (desc), 1951 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1952 thename); 1953 else 1954 (void) snprintf(desc, sizeof (desc), 1955 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1956 origname, thename); 1957 1958 switch (error) { 1959 case ENOTSUP: 1960 if (nv != NULL && nvlist_lookup_nvlist(nv, 1961 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1962 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1963 (void) printf(dgettext(TEXT_DOMAIN, "This " 1964 "pool uses the following feature(s) not " 1965 "supported by this system:\n")); 1966 zpool_print_unsup_feat(nv); 1967 if (nvlist_exists(nvinfo, 1968 ZPOOL_CONFIG_CAN_RDONLY)) { 1969 (void) printf(dgettext(TEXT_DOMAIN, 1970 "All unsupported features are only " 1971 "required for writing to the pool." 1972 "\nThe pool can be imported using " 1973 "'-o readonly=on'.\n")); 1974 } 1975 } 1976 /* 1977 * Unsupported version. 1978 */ 1979 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1980 break; 1981 1982 case EREMOTEIO: 1983 if (nv != NULL && nvlist_lookup_nvlist(nv, 1984 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) { 1985 char *hostname = "<unknown>"; 1986 uint64_t hostid = 0; 1987 mmp_state_t mmp_state; 1988 1989 mmp_state = fnvlist_lookup_uint64(nvinfo, 1990 ZPOOL_CONFIG_MMP_STATE); 1991 1992 if (nvlist_exists(nvinfo, 1993 ZPOOL_CONFIG_MMP_HOSTNAME)) 1994 hostname = fnvlist_lookup_string(nvinfo, 1995 ZPOOL_CONFIG_MMP_HOSTNAME); 1996 1997 if (nvlist_exists(nvinfo, 1998 ZPOOL_CONFIG_MMP_HOSTID)) 1999 hostid = fnvlist_lookup_uint64(nvinfo, 2000 ZPOOL_CONFIG_MMP_HOSTID); 2001 2002 if (mmp_state == MMP_STATE_ACTIVE) { 2003 (void) snprintf(aux, sizeof (aux), 2004 dgettext(TEXT_DOMAIN, "pool is imp" 2005 "orted on host '%s' (hostid=%lx).\n" 2006 "Export the pool on the other " 2007 "system, then run 'zpool import'."), 2008 hostname, (unsigned long) hostid); 2009 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 2010 (void) snprintf(aux, sizeof (aux), 2011 dgettext(TEXT_DOMAIN, "pool has " 2012 "the multihost property on and " 2013 "the\nsystem's hostid is not set. " 2014 "Set a unique system hostid with " 2015 "the zgenhostid(8) command.\n")); 2016 } 2017 2018 (void) zfs_error_aux(hdl, aux); 2019 } 2020 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc); 2021 break; 2022 2023 case EINVAL: 2024 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 2025 break; 2026 2027 case EROFS: 2028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2029 "one or more devices is read only")); 2030 (void) zfs_error(hdl, EZFS_BADDEV, desc); 2031 break; 2032 2033 case ENXIO: 2034 if (nv && nvlist_lookup_nvlist(nv, 2035 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 2036 nvlist_lookup_nvlist(nvinfo, 2037 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 2038 (void) printf(dgettext(TEXT_DOMAIN, 2039 "The devices below are missing or " 2040 "corrupted, use '-m' to import the pool " 2041 "anyway:\n")); 2042 print_vdev_tree(hdl, NULL, missing, 2); 2043 (void) printf("\n"); 2044 } 2045 (void) zpool_standard_error(hdl, error, desc); 2046 break; 2047 2048 case EEXIST: 2049 (void) zpool_standard_error(hdl, error, desc); 2050 break; 2051 2052 case EBUSY: 2053 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2054 "one or more devices are already in use\n")); 2055 (void) zfs_error(hdl, EZFS_BADDEV, desc); 2056 break; 2057 case ENAMETOOLONG: 2058 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2059 "new name of at least one dataset is longer than " 2060 "the maximum allowable length")); 2061 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 2062 break; 2063 default: 2064 (void) zpool_standard_error(hdl, error, desc); 2065 zpool_explain_recover(hdl, 2066 newname ? origname : thename, -error, nv); 2067 break; 2068 } 2069 2070 nvlist_free(nv); 2071 ret = -1; 2072 } else { 2073 zpool_handle_t *zhp; 2074 2075 /* 2076 * This should never fail, but play it safe anyway. 2077 */ 2078 if (zpool_open_silent(hdl, thename, &zhp) != 0) 2079 ret = -1; 2080 else if (zhp != NULL) 2081 zpool_close(zhp); 2082 if (policy.zlp_rewind & 2083 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2084 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2085 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv); 2086 } 2087 nvlist_free(nv); 2088 return (0); 2089 } 2090 2091 return (ret); 2092 } 2093 2094 /* 2095 * Translate vdev names to guids. If a vdev_path is determined to be 2096 * unsuitable then a vd_errlist is allocated and the vdev path and errno 2097 * are added to it. 2098 */ 2099 static int 2100 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds, 2101 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist) 2102 { 2103 nvlist_t *errlist = NULL; 2104 int error = 0; 2105 2106 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL; 2107 elem = nvlist_next_nvpair(vds, elem)) { 2108 boolean_t spare, cache; 2109 2110 char *vd_path = nvpair_name(elem); 2111 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache, 2112 NULL); 2113 2114 if ((tgt == NULL) || cache || spare) { 2115 if (errlist == NULL) { 2116 errlist = fnvlist_alloc(); 2117 error = EINVAL; 2118 } 2119 2120 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE : 2121 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE); 2122 fnvlist_add_int64(errlist, vd_path, err); 2123 continue; 2124 } 2125 2126 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 2127 fnvlist_add_uint64(vdev_guids, vd_path, guid); 2128 2129 char msg[MAXNAMELEN]; 2130 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid); 2131 fnvlist_add_string(guids_to_paths, msg, vd_path); 2132 } 2133 2134 if (error != 0) { 2135 verify(errlist != NULL); 2136 if (vd_errlist != NULL) 2137 *vd_errlist = errlist; 2138 else 2139 fnvlist_free(errlist); 2140 } 2141 2142 return (error); 2143 } 2144 2145 static int 2146 xlate_init_err(int err) 2147 { 2148 switch (err) { 2149 case ENODEV: 2150 return (EZFS_NODEVICE); 2151 case EINVAL: 2152 case EROFS: 2153 return (EZFS_BADDEV); 2154 case EBUSY: 2155 return (EZFS_INITIALIZING); 2156 case ESRCH: 2157 return (EZFS_NO_INITIALIZE); 2158 } 2159 return (err); 2160 } 2161 2162 /* 2163 * Begin, suspend, or cancel the initialization (initializing of all free 2164 * blocks) for the given vdevs in the given pool. 2165 */ 2166 static int 2167 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2168 nvlist_t *vds, boolean_t wait) 2169 { 2170 int err; 2171 2172 nvlist_t *vdev_guids = fnvlist_alloc(); 2173 nvlist_t *guids_to_paths = fnvlist_alloc(); 2174 nvlist_t *vd_errlist = NULL; 2175 nvlist_t *errlist; 2176 nvpair_t *elem; 2177 2178 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2179 guids_to_paths, &vd_errlist); 2180 2181 if (err != 0) { 2182 verify(vd_errlist != NULL); 2183 goto list_errors; 2184 } 2185 2186 err = lzc_initialize(zhp->zpool_name, cmd_type, 2187 vdev_guids, &errlist); 2188 2189 if (err != 0) { 2190 if (errlist != NULL) { 2191 vd_errlist = fnvlist_lookup_nvlist(errlist, 2192 ZPOOL_INITIALIZE_VDEVS); 2193 goto list_errors; 2194 } 2195 (void) zpool_standard_error(zhp->zpool_hdl, err, 2196 dgettext(TEXT_DOMAIN, "operation failed")); 2197 goto out; 2198 } 2199 2200 if (wait) { 2201 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL; 2202 elem = nvlist_next_nvpair(vdev_guids, elem)) { 2203 2204 uint64_t guid = fnvpair_value_uint64(elem); 2205 2206 err = lzc_wait_tag(zhp->zpool_name, 2207 ZPOOL_WAIT_INITIALIZE, guid, NULL); 2208 if (err != 0) { 2209 (void) zpool_standard_error_fmt(zhp->zpool_hdl, 2210 err, dgettext(TEXT_DOMAIN, "error " 2211 "waiting for '%s' to initialize"), 2212 nvpair_name(elem)); 2213 2214 goto out; 2215 } 2216 } 2217 } 2218 goto out; 2219 2220 list_errors: 2221 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL; 2222 elem = nvlist_next_nvpair(vd_errlist, elem)) { 2223 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem)); 2224 char *path; 2225 2226 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2227 &path) != 0) 2228 path = nvpair_name(elem); 2229 2230 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2231 "cannot initialize '%s'", path); 2232 } 2233 2234 out: 2235 fnvlist_free(vdev_guids); 2236 fnvlist_free(guids_to_paths); 2237 2238 if (vd_errlist != NULL) 2239 fnvlist_free(vd_errlist); 2240 2241 return (err == 0 ? 0 : -1); 2242 } 2243 2244 int 2245 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2246 nvlist_t *vds) 2247 { 2248 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE)); 2249 } 2250 2251 int 2252 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2253 nvlist_t *vds) 2254 { 2255 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE)); 2256 } 2257 2258 static int 2259 xlate_trim_err(int err) 2260 { 2261 switch (err) { 2262 case ENODEV: 2263 return (EZFS_NODEVICE); 2264 case EINVAL: 2265 case EROFS: 2266 return (EZFS_BADDEV); 2267 case EBUSY: 2268 return (EZFS_TRIMMING); 2269 case ESRCH: 2270 return (EZFS_NO_TRIM); 2271 case EOPNOTSUPP: 2272 return (EZFS_TRIM_NOTSUP); 2273 } 2274 return (err); 2275 } 2276 2277 static int 2278 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids) 2279 { 2280 int err; 2281 nvpair_t *elem; 2282 2283 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL; 2284 elem = nvlist_next_nvpair(vdev_guids, elem)) { 2285 2286 uint64_t guid = fnvpair_value_uint64(elem); 2287 2288 err = lzc_wait_tag(zhp->zpool_name, 2289 ZPOOL_WAIT_TRIM, guid, NULL); 2290 if (err != 0) { 2291 (void) zpool_standard_error_fmt(zhp->zpool_hdl, 2292 err, dgettext(TEXT_DOMAIN, "error " 2293 "waiting to trim '%s'"), nvpair_name(elem)); 2294 2295 return (err); 2296 } 2297 } 2298 return (0); 2299 } 2300 2301 /* 2302 * Check errlist and report any errors, omitting ones which should be 2303 * suppressed. Returns B_TRUE if any errors were reported. 2304 */ 2305 static boolean_t 2306 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags, 2307 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist) 2308 { 2309 nvpair_t *elem; 2310 boolean_t reported_errs = B_FALSE; 2311 int num_vds = 0; 2312 int num_suppressed_errs = 0; 2313 2314 for (elem = nvlist_next_nvpair(vds, NULL); 2315 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) { 2316 num_vds++; 2317 } 2318 2319 for (elem = nvlist_next_nvpair(errlist, NULL); 2320 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) { 2321 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem)); 2322 char *path; 2323 2324 /* 2325 * If only the pool was specified, and it was not a secure 2326 * trim then suppress warnings for individual vdevs which 2327 * do not support trimming. 2328 */ 2329 if (vd_error == EZFS_TRIM_NOTSUP && 2330 trim_flags->fullpool && 2331 !trim_flags->secure) { 2332 num_suppressed_errs++; 2333 continue; 2334 } 2335 2336 reported_errs = B_TRUE; 2337 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2338 &path) != 0) 2339 path = nvpair_name(elem); 2340 2341 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2342 "cannot trim '%s'", path); 2343 } 2344 2345 if (num_suppressed_errs == num_vds) { 2346 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 2347 "no devices in pool support trim operations")); 2348 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP, 2349 dgettext(TEXT_DOMAIN, "cannot trim"))); 2350 reported_errs = B_TRUE; 2351 } 2352 2353 return (reported_errs); 2354 } 2355 2356 /* 2357 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for 2358 * the given vdevs in the given pool. 2359 */ 2360 int 2361 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds, 2362 trimflags_t *trim_flags) 2363 { 2364 int err; 2365 int retval = 0; 2366 2367 nvlist_t *vdev_guids = fnvlist_alloc(); 2368 nvlist_t *guids_to_paths = fnvlist_alloc(); 2369 nvlist_t *errlist = NULL; 2370 2371 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2372 guids_to_paths, &errlist); 2373 if (err != 0) { 2374 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist); 2375 retval = -1; 2376 goto out; 2377 } 2378 2379 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate, 2380 trim_flags->secure, vdev_guids, &errlist); 2381 if (err != 0) { 2382 nvlist_t *vd_errlist; 2383 if (errlist != NULL && nvlist_lookup_nvlist(errlist, 2384 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) { 2385 if (check_trim_errs(zhp, trim_flags, guids_to_paths, 2386 vds, vd_errlist)) { 2387 retval = -1; 2388 goto out; 2389 } 2390 } else { 2391 char msg[1024]; 2392 2393 (void) snprintf(msg, sizeof (msg), 2394 dgettext(TEXT_DOMAIN, "operation failed")); 2395 zpool_standard_error(zhp->zpool_hdl, err, msg); 2396 retval = -1; 2397 goto out; 2398 } 2399 } 2400 2401 2402 if (trim_flags->wait) 2403 retval = zpool_trim_wait(zhp, vdev_guids); 2404 2405 out: 2406 if (errlist != NULL) 2407 fnvlist_free(errlist); 2408 fnvlist_free(vdev_guids); 2409 fnvlist_free(guids_to_paths); 2410 return (retval); 2411 } 2412 2413 /* 2414 * Scan the pool. 2415 */ 2416 int 2417 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) 2418 { 2419 zfs_cmd_t zc = {"\0"}; 2420 char msg[1024]; 2421 int err; 2422 libzfs_handle_t *hdl = zhp->zpool_hdl; 2423 2424 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2425 zc.zc_cookie = func; 2426 zc.zc_flags = cmd; 2427 2428 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0) 2429 return (0); 2430 2431 err = errno; 2432 2433 /* ECANCELED on a scrub means we resumed a paused scrub */ 2434 if (err == ECANCELED && func == POOL_SCAN_SCRUB && 2435 cmd == POOL_SCRUB_NORMAL) 2436 return (0); 2437 2438 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL) 2439 return (0); 2440 2441 if (func == POOL_SCAN_SCRUB) { 2442 if (cmd == POOL_SCRUB_PAUSE) { 2443 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2444 "cannot pause scrubbing %s"), zc.zc_name); 2445 } else { 2446 assert(cmd == POOL_SCRUB_NORMAL); 2447 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2448 "cannot scrub %s"), zc.zc_name); 2449 } 2450 } else if (func == POOL_SCAN_RESILVER) { 2451 assert(cmd == POOL_SCRUB_NORMAL); 2452 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2453 "cannot restart resilver on %s"), zc.zc_name); 2454 } else if (func == POOL_SCAN_NONE) { 2455 (void) snprintf(msg, sizeof (msg), 2456 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 2457 zc.zc_name); 2458 } else { 2459 assert(!"unexpected result"); 2460 } 2461 2462 if (err == EBUSY) { 2463 nvlist_t *nvroot; 2464 pool_scan_stat_t *ps = NULL; 2465 uint_t psc; 2466 2467 verify(nvlist_lookup_nvlist(zhp->zpool_config, 2468 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 2469 (void) nvlist_lookup_uint64_array(nvroot, 2470 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 2471 if (ps && ps->pss_func == POOL_SCAN_SCRUB && 2472 ps->pss_state == DSS_SCANNING) { 2473 if (cmd == POOL_SCRUB_PAUSE) 2474 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg)); 2475 else 2476 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 2477 } else { 2478 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 2479 } 2480 } else if (err == ENOENT) { 2481 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 2482 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) { 2483 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg)); 2484 } else { 2485 return (zpool_standard_error(hdl, err, msg)); 2486 } 2487 } 2488 2489 /* 2490 * Find a vdev that matches the search criteria specified. We use the 2491 * the nvpair name to determine how we should look for the device. 2492 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 2493 * spare; but FALSE if its an INUSE spare. 2494 */ 2495 static nvlist_t * 2496 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 2497 boolean_t *l2cache, boolean_t *log) 2498 { 2499 uint_t c, children; 2500 nvlist_t **child; 2501 nvlist_t *ret; 2502 uint64_t is_log; 2503 char *srchkey; 2504 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 2505 2506 /* Nothing to look for */ 2507 if (search == NULL || pair == NULL) 2508 return (NULL); 2509 2510 /* Obtain the key we will use to search */ 2511 srchkey = nvpair_name(pair); 2512 2513 switch (nvpair_type(pair)) { 2514 case DATA_TYPE_UINT64: 2515 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 2516 uint64_t srchval, theguid; 2517 2518 verify(nvpair_value_uint64(pair, &srchval) == 0); 2519 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2520 &theguid) == 0); 2521 if (theguid == srchval) 2522 return (nv); 2523 } 2524 break; 2525 2526 case DATA_TYPE_STRING: { 2527 char *srchval, *val; 2528 2529 verify(nvpair_value_string(pair, &srchval) == 0); 2530 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 2531 break; 2532 2533 /* 2534 * Search for the requested value. Special cases: 2535 * 2536 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 2537 * "-part1", or "p1". The suffix is hidden from the user, 2538 * but included in the string, so this matches around it. 2539 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname() 2540 * is used to check all possible expanded paths. 2541 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2542 * 2543 * Otherwise, all other searches are simple string compares. 2544 */ 2545 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) { 2546 uint64_t wholedisk = 0; 2547 2548 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2549 &wholedisk); 2550 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0) 2551 return (nv); 2552 2553 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2554 char *type, *idx, *end, *p; 2555 uint64_t id, vdev_id; 2556 2557 /* 2558 * Determine our vdev type, keeping in mind 2559 * that the srchval is composed of a type and 2560 * vdev id pair (i.e. mirror-4). 2561 */ 2562 if ((type = strdup(srchval)) == NULL) 2563 return (NULL); 2564 2565 if ((p = strrchr(type, '-')) == NULL) { 2566 free(type); 2567 break; 2568 } 2569 idx = p + 1; 2570 *p = '\0'; 2571 2572 /* 2573 * If the types don't match then keep looking. 2574 */ 2575 if (strncmp(val, type, strlen(val)) != 0) { 2576 free(type); 2577 break; 2578 } 2579 2580 verify(zpool_vdev_is_interior(type)); 2581 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2582 &id) == 0); 2583 2584 errno = 0; 2585 vdev_id = strtoull(idx, &end, 10); 2586 2587 free(type); 2588 if (errno != 0) 2589 return (NULL); 2590 2591 /* 2592 * Now verify that we have the correct vdev id. 2593 */ 2594 if (vdev_id == id) 2595 return (nv); 2596 } 2597 2598 /* 2599 * Common case 2600 */ 2601 if (strcmp(srchval, val) == 0) 2602 return (nv); 2603 break; 2604 } 2605 2606 default: 2607 break; 2608 } 2609 2610 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2611 &child, &children) != 0) 2612 return (NULL); 2613 2614 for (c = 0; c < children; c++) { 2615 if ((ret = vdev_to_nvlist_iter(child[c], search, 2616 avail_spare, l2cache, NULL)) != NULL) { 2617 /* 2618 * The 'is_log' value is only set for the toplevel 2619 * vdev, not the leaf vdevs. So we always lookup the 2620 * log device from the root of the vdev tree (where 2621 * 'log' is non-NULL). 2622 */ 2623 if (log != NULL && 2624 nvlist_lookup_uint64(child[c], 2625 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2626 is_log) { 2627 *log = B_TRUE; 2628 } 2629 return (ret); 2630 } 2631 } 2632 2633 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2634 &child, &children) == 0) { 2635 for (c = 0; c < children; c++) { 2636 if ((ret = vdev_to_nvlist_iter(child[c], search, 2637 avail_spare, l2cache, NULL)) != NULL) { 2638 *avail_spare = B_TRUE; 2639 return (ret); 2640 } 2641 } 2642 } 2643 2644 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2645 &child, &children) == 0) { 2646 for (c = 0; c < children; c++) { 2647 if ((ret = vdev_to_nvlist_iter(child[c], search, 2648 avail_spare, l2cache, NULL)) != NULL) { 2649 *l2cache = B_TRUE; 2650 return (ret); 2651 } 2652 } 2653 } 2654 2655 return (NULL); 2656 } 2657 2658 /* 2659 * Given a physical path or guid, find the associated vdev. 2660 */ 2661 nvlist_t * 2662 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2663 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2664 { 2665 nvlist_t *search, *nvroot, *ret; 2666 uint64_t guid; 2667 char *end; 2668 2669 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2670 2671 guid = strtoull(ppath, &end, 0); 2672 if (guid != 0 && *end == '\0') { 2673 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2674 } else { 2675 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, 2676 ppath) == 0); 2677 } 2678 2679 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2680 &nvroot) == 0); 2681 2682 *avail_spare = B_FALSE; 2683 *l2cache = B_FALSE; 2684 if (log != NULL) 2685 *log = B_FALSE; 2686 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2687 nvlist_free(search); 2688 2689 return (ret); 2690 } 2691 2692 /* 2693 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2694 */ 2695 static boolean_t 2696 zpool_vdev_is_interior(const char *name) 2697 { 2698 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2699 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || 2700 strncmp(name, 2701 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || 2702 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2703 return (B_TRUE); 2704 2705 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 && 2706 !zpool_is_draid_spare(name)) 2707 return (B_TRUE); 2708 2709 return (B_FALSE); 2710 } 2711 2712 nvlist_t * 2713 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2714 boolean_t *l2cache, boolean_t *log) 2715 { 2716 char *end; 2717 nvlist_t *nvroot, *search, *ret; 2718 uint64_t guid; 2719 2720 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2721 2722 guid = strtoull(path, &end, 0); 2723 if (guid != 0 && *end == '\0') { 2724 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2725 } else if (zpool_vdev_is_interior(path)) { 2726 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2727 } else { 2728 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2729 } 2730 2731 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2732 &nvroot) == 0); 2733 2734 *avail_spare = B_FALSE; 2735 *l2cache = B_FALSE; 2736 if (log != NULL) 2737 *log = B_FALSE; 2738 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2739 nvlist_free(search); 2740 2741 return (ret); 2742 } 2743 2744 static int 2745 vdev_is_online(nvlist_t *nv) 2746 { 2747 uint64_t ival; 2748 2749 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2750 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2751 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2752 return (0); 2753 2754 return (1); 2755 } 2756 2757 /* 2758 * Helper function for zpool_get_physpaths(). 2759 */ 2760 static int 2761 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2762 size_t *bytes_written) 2763 { 2764 size_t bytes_left, pos, rsz; 2765 char *tmppath; 2766 const char *format; 2767 2768 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2769 &tmppath) != 0) 2770 return (EZFS_NODEVICE); 2771 2772 pos = *bytes_written; 2773 bytes_left = physpath_size - pos; 2774 format = (pos == 0) ? "%s" : " %s"; 2775 2776 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2777 *bytes_written += rsz; 2778 2779 if (rsz >= bytes_left) { 2780 /* if physpath was not copied properly, clear it */ 2781 if (bytes_left != 0) { 2782 physpath[pos] = 0; 2783 } 2784 return (EZFS_NOSPC); 2785 } 2786 return (0); 2787 } 2788 2789 static int 2790 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2791 size_t *rsz, boolean_t is_spare) 2792 { 2793 char *type; 2794 int ret; 2795 2796 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2797 return (EZFS_INVALCONFIG); 2798 2799 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2800 /* 2801 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2802 * For a spare vdev, we only want to boot from the active 2803 * spare device. 2804 */ 2805 if (is_spare) { 2806 uint64_t spare = 0; 2807 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2808 &spare); 2809 if (!spare) 2810 return (EZFS_INVALCONFIG); 2811 } 2812 2813 if (vdev_is_online(nv)) { 2814 if ((ret = vdev_get_one_physpath(nv, physpath, 2815 phypath_size, rsz)) != 0) 2816 return (ret); 2817 } 2818 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2819 strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2820 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2821 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2822 nvlist_t **child; 2823 uint_t count; 2824 int i, ret; 2825 2826 if (nvlist_lookup_nvlist_array(nv, 2827 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2828 return (EZFS_INVALCONFIG); 2829 2830 for (i = 0; i < count; i++) { 2831 ret = vdev_get_physpaths(child[i], physpath, 2832 phypath_size, rsz, is_spare); 2833 if (ret == EZFS_NOSPC) 2834 return (ret); 2835 } 2836 } 2837 2838 return (EZFS_POOL_INVALARG); 2839 } 2840 2841 /* 2842 * Get phys_path for a root pool config. 2843 * Return 0 on success; non-zero on failure. 2844 */ 2845 static int 2846 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2847 { 2848 size_t rsz; 2849 nvlist_t *vdev_root; 2850 nvlist_t **child; 2851 uint_t count; 2852 char *type; 2853 2854 rsz = 0; 2855 2856 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2857 &vdev_root) != 0) 2858 return (EZFS_INVALCONFIG); 2859 2860 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2861 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2862 &child, &count) != 0) 2863 return (EZFS_INVALCONFIG); 2864 2865 /* 2866 * root pool can only have a single top-level vdev. 2867 */ 2868 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2869 return (EZFS_POOL_INVALARG); 2870 2871 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2872 B_FALSE); 2873 2874 /* No online devices */ 2875 if (rsz == 0) 2876 return (EZFS_NODEVICE); 2877 2878 return (0); 2879 } 2880 2881 /* 2882 * Get phys_path for a root pool 2883 * Return 0 on success; non-zero on failure. 2884 */ 2885 int 2886 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2887 { 2888 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2889 phypath_size)); 2890 } 2891 2892 /* 2893 * Convert a vdev path to a GUID. Returns GUID or 0 on error. 2894 * 2895 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it 2896 * if the VDEV is a spare, l2cache, or log device. If they're NULL then 2897 * ignore them. 2898 */ 2899 static uint64_t 2900 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path, 2901 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log) 2902 { 2903 uint64_t guid; 2904 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE; 2905 nvlist_t *tgt; 2906 2907 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache, 2908 &log)) == NULL) 2909 return (0); 2910 2911 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0); 2912 if (is_spare != NULL) 2913 *is_spare = spare; 2914 if (is_l2cache != NULL) 2915 *is_l2cache = l2cache; 2916 if (is_log != NULL) 2917 *is_log = log; 2918 2919 return (guid); 2920 } 2921 2922 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */ 2923 uint64_t 2924 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path) 2925 { 2926 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL)); 2927 } 2928 2929 /* 2930 * Bring the specified vdev online. The 'flags' parameter is a set of the 2931 * ZFS_ONLINE_* flags. 2932 */ 2933 int 2934 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2935 vdev_state_t *newstate) 2936 { 2937 zfs_cmd_t zc = {"\0"}; 2938 char msg[1024]; 2939 char *pathname; 2940 nvlist_t *tgt; 2941 boolean_t avail_spare, l2cache, islog; 2942 libzfs_handle_t *hdl = zhp->zpool_hdl; 2943 int error; 2944 2945 if (flags & ZFS_ONLINE_EXPAND) { 2946 (void) snprintf(msg, sizeof (msg), 2947 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2948 } else { 2949 (void) snprintf(msg, sizeof (msg), 2950 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2951 } 2952 2953 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2954 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2955 &islog)) == NULL) 2956 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2957 2958 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2959 2960 if (avail_spare) 2961 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2962 2963 if ((flags & ZFS_ONLINE_EXPAND || 2964 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) && 2965 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) { 2966 uint64_t wholedisk = 0; 2967 2968 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2969 &wholedisk); 2970 2971 /* 2972 * XXX - L2ARC 1.0 devices can't support expansion. 2973 */ 2974 if (l2cache) { 2975 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2976 "cannot expand cache devices")); 2977 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2978 } 2979 2980 if (wholedisk) { 2981 const char *fullpath = path; 2982 char buf[MAXPATHLEN]; 2983 2984 if (path[0] != '/') { 2985 error = zfs_resolve_shortname(path, buf, 2986 sizeof (buf)); 2987 if (error != 0) 2988 return (zfs_error(hdl, EZFS_NODEVICE, 2989 msg)); 2990 2991 fullpath = buf; 2992 } 2993 2994 error = zpool_relabel_disk(hdl, fullpath, msg); 2995 if (error != 0) 2996 return (error); 2997 } 2998 } 2999 3000 zc.zc_cookie = VDEV_STATE_ONLINE; 3001 zc.zc_obj = flags; 3002 3003 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 3004 if (errno == EINVAL) { 3005 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 3006 "from this pool into a new one. Use '%s' " 3007 "instead"), "zpool detach"); 3008 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 3009 } 3010 return (zpool_standard_error(hdl, errno, msg)); 3011 } 3012 3013 *newstate = zc.zc_cookie; 3014 return (0); 3015 } 3016 3017 /* 3018 * Take the specified vdev offline 3019 */ 3020 int 3021 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 3022 { 3023 zfs_cmd_t zc = {"\0"}; 3024 char msg[1024]; 3025 nvlist_t *tgt; 3026 boolean_t avail_spare, l2cache; 3027 libzfs_handle_t *hdl = zhp->zpool_hdl; 3028 3029 (void) snprintf(msg, sizeof (msg), 3030 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 3031 3032 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3033 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3034 NULL)) == NULL) 3035 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3036 3037 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3038 3039 if (avail_spare) 3040 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3041 3042 zc.zc_cookie = VDEV_STATE_OFFLINE; 3043 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 3044 3045 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3046 return (0); 3047 3048 switch (errno) { 3049 case EBUSY: 3050 3051 /* 3052 * There are no other replicas of this device. 3053 */ 3054 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 3055 3056 case EEXIST: 3057 /* 3058 * The log device has unplayed logs 3059 */ 3060 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 3061 3062 default: 3063 return (zpool_standard_error(hdl, errno, msg)); 3064 } 3065 } 3066 3067 /* 3068 * Mark the given vdev faulted. 3069 */ 3070 int 3071 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3072 { 3073 zfs_cmd_t zc = {"\0"}; 3074 char msg[1024]; 3075 libzfs_handle_t *hdl = zhp->zpool_hdl; 3076 3077 (void) snprintf(msg, sizeof (msg), 3078 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid); 3079 3080 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3081 zc.zc_guid = guid; 3082 zc.zc_cookie = VDEV_STATE_FAULTED; 3083 zc.zc_obj = aux; 3084 3085 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3086 return (0); 3087 3088 switch (errno) { 3089 case EBUSY: 3090 3091 /* 3092 * There are no other replicas of this device. 3093 */ 3094 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 3095 3096 default: 3097 return (zpool_standard_error(hdl, errno, msg)); 3098 } 3099 3100 } 3101 3102 /* 3103 * Mark the given vdev degraded. 3104 */ 3105 int 3106 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3107 { 3108 zfs_cmd_t zc = {"\0"}; 3109 char msg[1024]; 3110 libzfs_handle_t *hdl = zhp->zpool_hdl; 3111 3112 (void) snprintf(msg, sizeof (msg), 3113 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid); 3114 3115 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3116 zc.zc_guid = guid; 3117 zc.zc_cookie = VDEV_STATE_DEGRADED; 3118 zc.zc_obj = aux; 3119 3120 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3121 return (0); 3122 3123 return (zpool_standard_error(hdl, errno, msg)); 3124 } 3125 3126 /* 3127 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 3128 * a hot spare. 3129 */ 3130 static boolean_t 3131 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 3132 { 3133 nvlist_t **child; 3134 uint_t c, children; 3135 char *type; 3136 3137 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 3138 &children) == 0) { 3139 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 3140 &type) == 0); 3141 3142 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 || 3143 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) && 3144 children == 2 && child[which] == tgt) 3145 return (B_TRUE); 3146 3147 for (c = 0; c < children; c++) 3148 if (is_replacing_spare(child[c], tgt, which)) 3149 return (B_TRUE); 3150 } 3151 3152 return (B_FALSE); 3153 } 3154 3155 /* 3156 * Attach new_disk (fully described by nvroot) to old_disk. 3157 * If 'replacing' is specified, the new disk will replace the old one. 3158 */ 3159 int 3160 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk, 3161 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild) 3162 { 3163 zfs_cmd_t zc = {"\0"}; 3164 char msg[1024]; 3165 int ret; 3166 nvlist_t *tgt; 3167 boolean_t avail_spare, l2cache, islog; 3168 uint64_t val; 3169 char *newname; 3170 nvlist_t **child; 3171 uint_t children; 3172 nvlist_t *config_root; 3173 libzfs_handle_t *hdl = zhp->zpool_hdl; 3174 3175 if (replacing) 3176 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 3177 "cannot replace %s with %s"), old_disk, new_disk); 3178 else 3179 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 3180 "cannot attach %s to %s"), new_disk, old_disk); 3181 3182 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3183 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 3184 &islog)) == NULL) 3185 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3186 3187 if (avail_spare) 3188 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3189 3190 if (l2cache) 3191 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3192 3193 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3194 zc.zc_cookie = replacing; 3195 zc.zc_simple = rebuild; 3196 3197 if (rebuild && 3198 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) { 3199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3200 "the loaded zfs module doesn't support device rebuilds")); 3201 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 3202 } 3203 3204 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3205 &child, &children) != 0 || children != 1) { 3206 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3207 "new device must be a single disk")); 3208 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 3209 } 3210 3211 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 3212 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 3213 3214 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL) 3215 return (-1); 3216 3217 /* 3218 * If the target is a hot spare that has been swapped in, we can only 3219 * replace it with another hot spare. 3220 */ 3221 if (replacing && 3222 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 3223 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 3224 NULL) == NULL || !avail_spare) && 3225 is_replacing_spare(config_root, tgt, 1)) { 3226 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3227 "can only be replaced by another hot spare")); 3228 free(newname); 3229 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 3230 } 3231 3232 free(newname); 3233 3234 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 3235 return (-1); 3236 3237 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 3238 3239 zcmd_free_nvlists(&zc); 3240 3241 if (ret == 0) 3242 return (0); 3243 3244 switch (errno) { 3245 case ENOTSUP: 3246 /* 3247 * Can't attach to or replace this type of vdev. 3248 */ 3249 if (replacing) { 3250 uint64_t version = zpool_get_prop_int(zhp, 3251 ZPOOL_PROP_VERSION, NULL); 3252 3253 if (islog) { 3254 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3255 "cannot replace a log with a spare")); 3256 } else if (rebuild) { 3257 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3258 "only mirror and dRAID vdevs support " 3259 "sequential reconstruction")); 3260 } else if (zpool_is_draid_spare(new_disk)) { 3261 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3262 "dRAID spares can only replace child " 3263 "devices in their parent's dRAID vdev")); 3264 } else if (version >= SPA_VERSION_MULTI_REPLACE) { 3265 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3266 "already in replacing/spare config; wait " 3267 "for completion or use 'zpool detach'")); 3268 } else { 3269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3270 "cannot replace a replacing device")); 3271 } 3272 } else { 3273 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3274 "can only attach to mirrors and top-level " 3275 "disks")); 3276 } 3277 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3278 break; 3279 3280 case EINVAL: 3281 /* 3282 * The new device must be a single disk. 3283 */ 3284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3285 "new device must be a single disk")); 3286 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3287 break; 3288 3289 case EBUSY: 3290 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, " 3291 "or device removal is in progress"), 3292 new_disk); 3293 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3294 break; 3295 3296 case EOVERFLOW: 3297 /* 3298 * The new device is too small. 3299 */ 3300 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3301 "device is too small")); 3302 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3303 break; 3304 3305 case EDOM: 3306 /* 3307 * The new device has a different optimal sector size. 3308 */ 3309 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3310 "new device has a different optimal sector size; use the " 3311 "option '-o ashift=N' to override the optimal size")); 3312 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3313 break; 3314 3315 case ENAMETOOLONG: 3316 /* 3317 * The resulting top-level vdev spec won't fit in the label. 3318 */ 3319 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 3320 break; 3321 3322 default: 3323 (void) zpool_standard_error(hdl, errno, msg); 3324 } 3325 3326 return (-1); 3327 } 3328 3329 /* 3330 * Detach the specified device. 3331 */ 3332 int 3333 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 3334 { 3335 zfs_cmd_t zc = {"\0"}; 3336 char msg[1024]; 3337 nvlist_t *tgt; 3338 boolean_t avail_spare, l2cache; 3339 libzfs_handle_t *hdl = zhp->zpool_hdl; 3340 3341 (void) snprintf(msg, sizeof (msg), 3342 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 3343 3344 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3345 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3346 NULL)) == NULL) 3347 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3348 3349 if (avail_spare) 3350 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3351 3352 if (l2cache) 3353 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3354 3355 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3356 3357 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 3358 return (0); 3359 3360 switch (errno) { 3361 3362 case ENOTSUP: 3363 /* 3364 * Can't detach from this type of vdev. 3365 */ 3366 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 3367 "applicable to mirror and replacing vdevs")); 3368 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3369 break; 3370 3371 case EBUSY: 3372 /* 3373 * There are no other replicas of this device. 3374 */ 3375 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 3376 break; 3377 3378 default: 3379 (void) zpool_standard_error(hdl, errno, msg); 3380 } 3381 3382 return (-1); 3383 } 3384 3385 /* 3386 * Find a mirror vdev in the source nvlist. 3387 * 3388 * The mchild array contains a list of disks in one of the top-level mirrors 3389 * of the source pool. The schild array contains a list of disks that the 3390 * user specified on the command line. We loop over the mchild array to 3391 * see if any entry in the schild array matches. 3392 * 3393 * If a disk in the mchild array is found in the schild array, we return 3394 * the index of that entry. Otherwise we return -1. 3395 */ 3396 static int 3397 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 3398 nvlist_t **schild, uint_t schildren) 3399 { 3400 uint_t mc; 3401 3402 for (mc = 0; mc < mchildren; mc++) { 3403 uint_t sc; 3404 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3405 mchild[mc], 0); 3406 3407 for (sc = 0; sc < schildren; sc++) { 3408 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3409 schild[sc], 0); 3410 boolean_t result = (strcmp(mpath, spath) == 0); 3411 3412 free(spath); 3413 if (result) { 3414 free(mpath); 3415 return (mc); 3416 } 3417 } 3418 3419 free(mpath); 3420 } 3421 3422 return (-1); 3423 } 3424 3425 /* 3426 * Split a mirror pool. If newroot points to null, then a new nvlist 3427 * is generated and it is the responsibility of the caller to free it. 3428 */ 3429 int 3430 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 3431 nvlist_t *props, splitflags_t flags) 3432 { 3433 zfs_cmd_t zc = {"\0"}; 3434 char msg[1024], *bias; 3435 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 3436 nvlist_t **varray = NULL, *zc_props = NULL; 3437 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 3438 libzfs_handle_t *hdl = zhp->zpool_hdl; 3439 uint64_t vers, readonly = B_FALSE; 3440 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 3441 int retval = 0; 3442 3443 (void) snprintf(msg, sizeof (msg), 3444 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 3445 3446 if (!zpool_name_valid(hdl, B_FALSE, newname)) 3447 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 3448 3449 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 3450 (void) fprintf(stderr, gettext("Internal error: unable to " 3451 "retrieve pool configuration\n")); 3452 return (-1); 3453 } 3454 3455 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 3456 == 0); 3457 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 3458 3459 if (props) { 3460 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 3461 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 3462 props, vers, flags, msg)) == NULL) 3463 return (-1); 3464 (void) nvlist_lookup_uint64(zc_props, 3465 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 3466 if (readonly) { 3467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3468 "property %s can only be set at import time"), 3469 zpool_prop_to_name(ZPOOL_PROP_READONLY)); 3470 return (-1); 3471 } 3472 } 3473 3474 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 3475 &children) != 0) { 3476 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3477 "Source pool is missing vdev tree")); 3478 nvlist_free(zc_props); 3479 return (-1); 3480 } 3481 3482 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 3483 vcount = 0; 3484 3485 if (*newroot == NULL || 3486 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 3487 &newchild, &newchildren) != 0) 3488 newchildren = 0; 3489 3490 for (c = 0; c < children; c++) { 3491 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 3492 boolean_t is_special = B_FALSE, is_dedup = B_FALSE; 3493 char *type; 3494 nvlist_t **mchild, *vdev; 3495 uint_t mchildren; 3496 int entry; 3497 3498 /* 3499 * Unlike cache & spares, slogs are stored in the 3500 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 3501 */ 3502 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3503 &is_log); 3504 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3505 &is_hole); 3506 if (is_log || is_hole) { 3507 /* 3508 * Create a hole vdev and put it in the config. 3509 */ 3510 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 3511 goto out; 3512 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 3513 VDEV_TYPE_HOLE) != 0) 3514 goto out; 3515 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 3516 1) != 0) 3517 goto out; 3518 if (lastlog == 0) 3519 lastlog = vcount; 3520 varray[vcount++] = vdev; 3521 continue; 3522 } 3523 lastlog = 0; 3524 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 3525 == 0); 3526 3527 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) { 3528 vdev = child[c]; 3529 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3530 goto out; 3531 continue; 3532 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 3533 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3534 "Source pool must be composed only of mirrors\n")); 3535 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3536 goto out; 3537 } 3538 3539 if (nvlist_lookup_string(child[c], 3540 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) { 3541 if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) 3542 is_special = B_TRUE; 3543 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) 3544 is_dedup = B_TRUE; 3545 } 3546 verify(nvlist_lookup_nvlist_array(child[c], 3547 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3548 3549 /* find or add an entry for this top-level vdev */ 3550 if (newchildren > 0 && 3551 (entry = find_vdev_entry(zhp, mchild, mchildren, 3552 newchild, newchildren)) >= 0) { 3553 /* We found a disk that the user specified. */ 3554 vdev = mchild[entry]; 3555 ++found; 3556 } else { 3557 /* User didn't specify a disk for this vdev. */ 3558 vdev = mchild[mchildren - 1]; 3559 } 3560 3561 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3562 goto out; 3563 3564 if (flags.dryrun != 0) { 3565 if (is_dedup == B_TRUE) { 3566 if (nvlist_add_string(varray[vcount - 1], 3567 ZPOOL_CONFIG_ALLOCATION_BIAS, 3568 VDEV_ALLOC_BIAS_DEDUP) != 0) 3569 goto out; 3570 } else if (is_special == B_TRUE) { 3571 if (nvlist_add_string(varray[vcount - 1], 3572 ZPOOL_CONFIG_ALLOCATION_BIAS, 3573 VDEV_ALLOC_BIAS_SPECIAL) != 0) 3574 goto out; 3575 } 3576 } 3577 } 3578 3579 /* did we find every disk the user specified? */ 3580 if (found != newchildren) { 3581 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3582 "include at most one disk from each mirror")); 3583 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3584 goto out; 3585 } 3586 3587 /* Prepare the nvlist for populating. */ 3588 if (*newroot == NULL) { 3589 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3590 goto out; 3591 freelist = B_TRUE; 3592 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3593 VDEV_TYPE_ROOT) != 0) 3594 goto out; 3595 } else { 3596 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3597 } 3598 3599 /* Add all the children we found */ 3600 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3601 lastlog == 0 ? vcount : lastlog) != 0) 3602 goto out; 3603 3604 /* 3605 * If we're just doing a dry run, exit now with success. 3606 */ 3607 if (flags.dryrun) { 3608 memory_err = B_FALSE; 3609 freelist = B_FALSE; 3610 goto out; 3611 } 3612 3613 /* now build up the config list & call the ioctl */ 3614 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3615 goto out; 3616 3617 if (nvlist_add_nvlist(newconfig, 3618 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3619 nvlist_add_string(newconfig, 3620 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3621 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3622 goto out; 3623 3624 /* 3625 * The new pool is automatically part of the namespace unless we 3626 * explicitly export it. 3627 */ 3628 if (!flags.import) 3629 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3630 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3631 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3632 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3633 goto out; 3634 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3635 goto out; 3636 3637 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3638 retval = zpool_standard_error(hdl, errno, msg); 3639 goto out; 3640 } 3641 3642 freelist = B_FALSE; 3643 memory_err = B_FALSE; 3644 3645 out: 3646 if (varray != NULL) { 3647 int v; 3648 3649 for (v = 0; v < vcount; v++) 3650 nvlist_free(varray[v]); 3651 free(varray); 3652 } 3653 zcmd_free_nvlists(&zc); 3654 nvlist_free(zc_props); 3655 nvlist_free(newconfig); 3656 if (freelist) { 3657 nvlist_free(*newroot); 3658 *newroot = NULL; 3659 } 3660 3661 if (retval != 0) 3662 return (retval); 3663 3664 if (memory_err) 3665 return (no_memory(hdl)); 3666 3667 return (0); 3668 } 3669 3670 /* 3671 * Remove the given device. 3672 */ 3673 int 3674 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3675 { 3676 zfs_cmd_t zc = {"\0"}; 3677 char msg[1024]; 3678 nvlist_t *tgt; 3679 boolean_t avail_spare, l2cache, islog; 3680 libzfs_handle_t *hdl = zhp->zpool_hdl; 3681 uint64_t version; 3682 3683 (void) snprintf(msg, sizeof (msg), 3684 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3685 3686 if (zpool_is_draid_spare(path)) { 3687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3688 "dRAID spares cannot be removed")); 3689 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3690 } 3691 3692 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3693 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3694 &islog)) == NULL) 3695 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3696 3697 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3698 if (islog && version < SPA_VERSION_HOLES) { 3699 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3700 "pool must be upgraded to support log removal")); 3701 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3702 } 3703 3704 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3705 3706 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3707 return (0); 3708 3709 switch (errno) { 3710 3711 case EINVAL: 3712 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3713 "invalid config; all top-level vdevs must " 3714 "have the same sector size and not be raidz.")); 3715 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3716 break; 3717 3718 case EBUSY: 3719 if (islog) { 3720 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3721 "Mount encrypted datasets to replay logs.")); 3722 } else { 3723 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3724 "Pool busy; removal may already be in progress")); 3725 } 3726 (void) zfs_error(hdl, EZFS_BUSY, msg); 3727 break; 3728 3729 case EACCES: 3730 if (islog) { 3731 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3732 "Mount encrypted datasets to replay logs.")); 3733 (void) zfs_error(hdl, EZFS_BUSY, msg); 3734 } else { 3735 (void) zpool_standard_error(hdl, errno, msg); 3736 } 3737 break; 3738 3739 default: 3740 (void) zpool_standard_error(hdl, errno, msg); 3741 } 3742 return (-1); 3743 } 3744 3745 int 3746 zpool_vdev_remove_cancel(zpool_handle_t *zhp) 3747 { 3748 zfs_cmd_t zc; 3749 char msg[1024]; 3750 libzfs_handle_t *hdl = zhp->zpool_hdl; 3751 3752 (void) snprintf(msg, sizeof (msg), 3753 dgettext(TEXT_DOMAIN, "cannot cancel removal")); 3754 3755 bzero(&zc, sizeof (zc)); 3756 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3757 zc.zc_cookie = 1; 3758 3759 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3760 return (0); 3761 3762 return (zpool_standard_error(hdl, errno, msg)); 3763 } 3764 3765 int 3766 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path, 3767 uint64_t *sizep) 3768 { 3769 char msg[1024]; 3770 nvlist_t *tgt; 3771 boolean_t avail_spare, l2cache, islog; 3772 libzfs_handle_t *hdl = zhp->zpool_hdl; 3773 3774 (void) snprintf(msg, sizeof (msg), 3775 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"), 3776 path); 3777 3778 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3779 &islog)) == NULL) 3780 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3781 3782 if (avail_spare || l2cache || islog) { 3783 *sizep = 0; 3784 return (0); 3785 } 3786 3787 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) { 3788 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3789 "indirect size not available")); 3790 return (zfs_error(hdl, EINVAL, msg)); 3791 } 3792 return (0); 3793 } 3794 3795 /* 3796 * Clear the errors for the pool, or the particular device if specified. 3797 */ 3798 int 3799 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3800 { 3801 zfs_cmd_t zc = {"\0"}; 3802 char msg[1024]; 3803 nvlist_t *tgt; 3804 zpool_load_policy_t policy; 3805 boolean_t avail_spare, l2cache; 3806 libzfs_handle_t *hdl = zhp->zpool_hdl; 3807 nvlist_t *nvi = NULL; 3808 int error; 3809 3810 if (path) 3811 (void) snprintf(msg, sizeof (msg), 3812 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3813 path); 3814 else 3815 (void) snprintf(msg, sizeof (msg), 3816 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3817 zhp->zpool_name); 3818 3819 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3820 if (path) { 3821 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3822 &l2cache, NULL)) == NULL) 3823 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3824 3825 /* 3826 * Don't allow error clearing for hot spares. Do allow 3827 * error clearing for l2cache devices. 3828 */ 3829 if (avail_spare) 3830 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3831 3832 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3833 &zc.zc_guid) == 0); 3834 } 3835 3836 zpool_get_load_policy(rewindnvl, &policy); 3837 zc.zc_cookie = policy.zlp_rewind; 3838 3839 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3840 return (-1); 3841 3842 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3843 return (-1); 3844 3845 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3846 errno == ENOMEM) { 3847 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3848 zcmd_free_nvlists(&zc); 3849 return (-1); 3850 } 3851 } 3852 3853 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) && 3854 errno != EPERM && errno != EACCES)) { 3855 if (policy.zlp_rewind & 3856 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3857 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3858 zpool_rewind_exclaim(hdl, zc.zc_name, 3859 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), 3860 nvi); 3861 nvlist_free(nvi); 3862 } 3863 zcmd_free_nvlists(&zc); 3864 return (0); 3865 } 3866 3867 zcmd_free_nvlists(&zc); 3868 return (zpool_standard_error(hdl, errno, msg)); 3869 } 3870 3871 /* 3872 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3873 */ 3874 int 3875 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3876 { 3877 zfs_cmd_t zc = {"\0"}; 3878 char msg[1024]; 3879 libzfs_handle_t *hdl = zhp->zpool_hdl; 3880 3881 (void) snprintf(msg, sizeof (msg), 3882 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3883 (u_longlong_t)guid); 3884 3885 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3886 zc.zc_guid = guid; 3887 zc.zc_cookie = ZPOOL_NO_REWIND; 3888 3889 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) 3890 return (0); 3891 3892 return (zpool_standard_error(hdl, errno, msg)); 3893 } 3894 3895 /* 3896 * Change the GUID for a pool. 3897 */ 3898 int 3899 zpool_reguid(zpool_handle_t *zhp) 3900 { 3901 char msg[1024]; 3902 libzfs_handle_t *hdl = zhp->zpool_hdl; 3903 zfs_cmd_t zc = {"\0"}; 3904 3905 (void) snprintf(msg, sizeof (msg), 3906 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3907 3908 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3909 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3910 return (0); 3911 3912 return (zpool_standard_error(hdl, errno, msg)); 3913 } 3914 3915 /* 3916 * Reopen the pool. 3917 */ 3918 int 3919 zpool_reopen_one(zpool_handle_t *zhp, void *data) 3920 { 3921 libzfs_handle_t *hdl = zpool_get_handle(zhp); 3922 const char *pool_name = zpool_get_name(zhp); 3923 boolean_t *scrub_restart = data; 3924 int error; 3925 3926 error = lzc_reopen(pool_name, *scrub_restart); 3927 if (error) { 3928 return (zpool_standard_error_fmt(hdl, error, 3929 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name)); 3930 } 3931 3932 return (0); 3933 } 3934 3935 /* call into libzfs_core to execute the sync IOCTL per pool */ 3936 int 3937 zpool_sync_one(zpool_handle_t *zhp, void *data) 3938 { 3939 int ret; 3940 libzfs_handle_t *hdl = zpool_get_handle(zhp); 3941 const char *pool_name = zpool_get_name(zhp); 3942 boolean_t *force = data; 3943 nvlist_t *innvl = fnvlist_alloc(); 3944 3945 fnvlist_add_boolean_value(innvl, "force", *force); 3946 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) { 3947 nvlist_free(innvl); 3948 return (zpool_standard_error_fmt(hdl, ret, 3949 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name)); 3950 } 3951 nvlist_free(innvl); 3952 3953 return (0); 3954 } 3955 3956 #define PATH_BUF_LEN 64 3957 3958 /* 3959 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3960 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3961 * We also check if this is a whole disk, in which case we strip off the 3962 * trailing 's0' slice name. 3963 * 3964 * This routine is also responsible for identifying when disks have been 3965 * reconfigured in a new location. The kernel will have opened the device by 3966 * devid, but the path will still refer to the old location. To catch this, we 3967 * first do a path -> devid translation (which is fast for the common case). If 3968 * the devid matches, we're done. If not, we do a reverse devid -> path 3969 * translation and issue the appropriate ioctl() to update the path of the vdev. 3970 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3971 * of these checks. 3972 */ 3973 char * 3974 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3975 int name_flags) 3976 { 3977 char *path, *type, *env; 3978 uint64_t value; 3979 char buf[PATH_BUF_LEN]; 3980 char tmpbuf[PATH_BUF_LEN]; 3981 3982 /* 3983 * vdev_name will be "root"/"root-0" for the root vdev, but it is the 3984 * zpool name that will be displayed to the user. 3985 */ 3986 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 3987 if (zhp != NULL && strcmp(type, "root") == 0) 3988 return (zfs_strdup(hdl, zpool_get_name(zhp))); 3989 3990 env = getenv("ZPOOL_VDEV_NAME_PATH"); 3991 if (env && (strtoul(env, NULL, 0) > 0 || 3992 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 3993 name_flags |= VDEV_NAME_PATH; 3994 3995 env = getenv("ZPOOL_VDEV_NAME_GUID"); 3996 if (env && (strtoul(env, NULL, 0) > 0 || 3997 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 3998 name_flags |= VDEV_NAME_GUID; 3999 4000 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS"); 4001 if (env && (strtoul(env, NULL, 0) > 0 || 4002 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 4003 name_flags |= VDEV_NAME_FOLLOW_LINKS; 4004 4005 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 4006 name_flags & VDEV_NAME_GUID) { 4007 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value); 4008 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value); 4009 path = buf; 4010 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 4011 if (name_flags & VDEV_NAME_FOLLOW_LINKS) { 4012 char *rp = realpath(path, NULL); 4013 if (rp) { 4014 strlcpy(buf, rp, sizeof (buf)); 4015 path = buf; 4016 free(rp); 4017 } 4018 } 4019 4020 /* 4021 * For a block device only use the name. 4022 */ 4023 if ((strcmp(type, VDEV_TYPE_DISK) == 0) && 4024 !(name_flags & VDEV_NAME_PATH)) { 4025 path = zfs_strip_path(path); 4026 } 4027 4028 /* 4029 * Remove the partition from the path if this is a whole disk. 4030 */ 4031 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 && 4032 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value) 4033 == 0 && value && !(name_flags & VDEV_NAME_PATH)) { 4034 return (zfs_strip_partition(path)); 4035 } 4036 } else { 4037 path = type; 4038 4039 /* 4040 * If it's a raidz device, we need to stick in the parity level. 4041 */ 4042 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 4043 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4044 &value) == 0); 4045 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 4046 (u_longlong_t)value); 4047 path = buf; 4048 } 4049 4050 /* 4051 * If it's a dRAID device, we add parity, groups, and spares. 4052 */ 4053 if (strcmp(path, VDEV_TYPE_DRAID) == 0) { 4054 uint64_t ndata, nparity, nspares; 4055 nvlist_t **child; 4056 uint_t children; 4057 4058 verify(nvlist_lookup_nvlist_array(nv, 4059 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0); 4060 verify(nvlist_lookup_uint64(nv, 4061 ZPOOL_CONFIG_NPARITY, &nparity) == 0); 4062 verify(nvlist_lookup_uint64(nv, 4063 ZPOOL_CONFIG_DRAID_NDATA, &ndata) == 0); 4064 verify(nvlist_lookup_uint64(nv, 4065 ZPOOL_CONFIG_DRAID_NSPARES, &nspares) == 0); 4066 4067 path = zpool_draid_name(buf, sizeof (buf), ndata, 4068 nparity, nspares, children); 4069 } 4070 4071 /* 4072 * We identify each top-level vdev by using a <type-id> 4073 * naming convention. 4074 */ 4075 if (name_flags & VDEV_NAME_TYPE_ID) { 4076 uint64_t id; 4077 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 4078 &id) == 0); 4079 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu", 4080 path, (u_longlong_t)id); 4081 path = tmpbuf; 4082 } 4083 } 4084 4085 return (zfs_strdup(hdl, path)); 4086 } 4087 4088 static int 4089 zbookmark_mem_compare(const void *a, const void *b) 4090 { 4091 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 4092 } 4093 4094 /* 4095 * Retrieve the persistent error log, uniquify the members, and return to the 4096 * caller. 4097 */ 4098 int 4099 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 4100 { 4101 zfs_cmd_t zc = {"\0"}; 4102 libzfs_handle_t *hdl = zhp->zpool_hdl; 4103 uint64_t count; 4104 zbookmark_phys_t *zb = NULL; 4105 int i; 4106 4107 /* 4108 * Retrieve the raw error list from the kernel. If the number of errors 4109 * has increased, allocate more space and continue until we get the 4110 * entire list. 4111 */ 4112 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 4113 &count) == 0); 4114 if (count == 0) 4115 return (0); 4116 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 4117 count * sizeof (zbookmark_phys_t)); 4118 zc.zc_nvlist_dst_size = count; 4119 (void) strcpy(zc.zc_name, zhp->zpool_name); 4120 for (;;) { 4121 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG, 4122 &zc) != 0) { 4123 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4124 if (errno == ENOMEM) { 4125 void *dst; 4126 4127 count = zc.zc_nvlist_dst_size; 4128 dst = zfs_alloc(zhp->zpool_hdl, count * 4129 sizeof (zbookmark_phys_t)); 4130 zc.zc_nvlist_dst = (uintptr_t)dst; 4131 } else { 4132 return (zpool_standard_error_fmt(hdl, errno, 4133 dgettext(TEXT_DOMAIN, "errors: List of " 4134 "errors unavailable"))); 4135 } 4136 } else { 4137 break; 4138 } 4139 } 4140 4141 /* 4142 * Sort the resulting bookmarks. This is a little confusing due to the 4143 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 4144 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks 4145 * _not_ copied as part of the process. So we point the start of our 4146 * array appropriate and decrement the total number of elements. 4147 */ 4148 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 4149 zc.zc_nvlist_dst_size; 4150 count -= zc.zc_nvlist_dst_size; 4151 4152 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 4153 4154 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 4155 4156 /* 4157 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 4158 */ 4159 for (i = 0; i < count; i++) { 4160 nvlist_t *nv; 4161 4162 /* ignoring zb_blkid and zb_level for now */ 4163 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 4164 zb[i-1].zb_object == zb[i].zb_object) 4165 continue; 4166 4167 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 4168 goto nomem; 4169 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 4170 zb[i].zb_objset) != 0) { 4171 nvlist_free(nv); 4172 goto nomem; 4173 } 4174 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 4175 zb[i].zb_object) != 0) { 4176 nvlist_free(nv); 4177 goto nomem; 4178 } 4179 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 4180 nvlist_free(nv); 4181 goto nomem; 4182 } 4183 nvlist_free(nv); 4184 } 4185 4186 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4187 return (0); 4188 4189 nomem: 4190 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4191 return (no_memory(zhp->zpool_hdl)); 4192 } 4193 4194 /* 4195 * Upgrade a ZFS pool to the latest on-disk version. 4196 */ 4197 int 4198 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 4199 { 4200 zfs_cmd_t zc = {"\0"}; 4201 libzfs_handle_t *hdl = zhp->zpool_hdl; 4202 4203 (void) strcpy(zc.zc_name, zhp->zpool_name); 4204 zc.zc_cookie = new_version; 4205 4206 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 4207 return (zpool_standard_error_fmt(hdl, errno, 4208 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 4209 zhp->zpool_name)); 4210 return (0); 4211 } 4212 4213 void 4214 zfs_save_arguments(int argc, char **argv, char *string, int len) 4215 { 4216 int i; 4217 4218 (void) strlcpy(string, basename(argv[0]), len); 4219 for (i = 1; i < argc; i++) { 4220 (void) strlcat(string, " ", len); 4221 (void) strlcat(string, argv[i], len); 4222 } 4223 } 4224 4225 int 4226 zpool_log_history(libzfs_handle_t *hdl, const char *message) 4227 { 4228 zfs_cmd_t zc = {"\0"}; 4229 nvlist_t *args; 4230 int err; 4231 4232 args = fnvlist_alloc(); 4233 fnvlist_add_string(args, "message", message); 4234 err = zcmd_write_src_nvlist(hdl, &zc, args); 4235 if (err == 0) 4236 err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc); 4237 nvlist_free(args); 4238 zcmd_free_nvlists(&zc); 4239 return (err); 4240 } 4241 4242 /* 4243 * Perform ioctl to get some command history of a pool. 4244 * 4245 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 4246 * logical offset of the history buffer to start reading from. 4247 * 4248 * Upon return, 'off' is the next logical offset to read from and 4249 * 'len' is the actual amount of bytes read into 'buf'. 4250 */ 4251 static int 4252 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 4253 { 4254 zfs_cmd_t zc = {"\0"}; 4255 libzfs_handle_t *hdl = zhp->zpool_hdl; 4256 4257 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4258 4259 zc.zc_history = (uint64_t)(uintptr_t)buf; 4260 zc.zc_history_len = *len; 4261 zc.zc_history_offset = *off; 4262 4263 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 4264 switch (errno) { 4265 case EPERM: 4266 return (zfs_error_fmt(hdl, EZFS_PERM, 4267 dgettext(TEXT_DOMAIN, 4268 "cannot show history for pool '%s'"), 4269 zhp->zpool_name)); 4270 case ENOENT: 4271 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 4272 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4273 "'%s'"), zhp->zpool_name)); 4274 case ENOTSUP: 4275 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 4276 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4277 "'%s', pool must be upgraded"), zhp->zpool_name)); 4278 default: 4279 return (zpool_standard_error_fmt(hdl, errno, 4280 dgettext(TEXT_DOMAIN, 4281 "cannot get history for '%s'"), zhp->zpool_name)); 4282 } 4283 } 4284 4285 *len = zc.zc_history_len; 4286 *off = zc.zc_history_offset; 4287 4288 return (0); 4289 } 4290 4291 /* 4292 * Retrieve the command history of a pool. 4293 */ 4294 int 4295 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off, 4296 boolean_t *eof) 4297 { 4298 char *buf; 4299 int buflen = 128 * 1024; 4300 nvlist_t **records = NULL; 4301 uint_t numrecords = 0; 4302 int err, i; 4303 uint64_t start = *off; 4304 4305 buf = malloc(buflen); 4306 if (buf == NULL) 4307 return (ENOMEM); 4308 /* process about 1MB a time */ 4309 while (*off - start < 1024 * 1024) { 4310 uint64_t bytes_read = buflen; 4311 uint64_t leftover; 4312 4313 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0) 4314 break; 4315 4316 /* if nothing else was read in, we're at EOF, just return */ 4317 if (!bytes_read) { 4318 *eof = B_TRUE; 4319 break; 4320 } 4321 4322 if ((err = zpool_history_unpack(buf, bytes_read, 4323 &leftover, &records, &numrecords)) != 0) 4324 break; 4325 *off -= leftover; 4326 if (leftover == bytes_read) { 4327 /* 4328 * no progress made, because buffer is not big enough 4329 * to hold this record; resize and retry. 4330 */ 4331 buflen *= 2; 4332 free(buf); 4333 buf = malloc(buflen); 4334 if (buf == NULL) 4335 return (ENOMEM); 4336 } 4337 } 4338 4339 free(buf); 4340 4341 if (!err) { 4342 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 4343 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 4344 records, numrecords) == 0); 4345 } 4346 for (i = 0; i < numrecords; i++) 4347 nvlist_free(records[i]); 4348 free(records); 4349 4350 return (err); 4351 } 4352 4353 /* 4354 * Retrieve the next event given the passed 'zevent_fd' file descriptor. 4355 * If there is a new event available 'nvp' will contain a newly allocated 4356 * nvlist and 'dropped' will be set to the number of missed events since 4357 * the last call to this function. When 'nvp' is set to NULL it indicates 4358 * no new events are available. In either case the function returns 0 and 4359 * it is up to the caller to free 'nvp'. In the case of a fatal error the 4360 * function will return a non-zero value. When the function is called in 4361 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed), 4362 * it will not return until a new event is available. 4363 */ 4364 int 4365 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp, 4366 int *dropped, unsigned flags, int zevent_fd) 4367 { 4368 zfs_cmd_t zc = {"\0"}; 4369 int error = 0; 4370 4371 *nvp = NULL; 4372 *dropped = 0; 4373 zc.zc_cleanup_fd = zevent_fd; 4374 4375 if (flags & ZEVENT_NONBLOCK) 4376 zc.zc_guid = ZEVENT_NONBLOCK; 4377 4378 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0) 4379 return (-1); 4380 4381 retry: 4382 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) { 4383 switch (errno) { 4384 case ESHUTDOWN: 4385 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 4386 dgettext(TEXT_DOMAIN, "zfs shutdown")); 4387 goto out; 4388 case ENOENT: 4389 /* Blocking error case should not occur */ 4390 if (!(flags & ZEVENT_NONBLOCK)) 4391 error = zpool_standard_error_fmt(hdl, errno, 4392 dgettext(TEXT_DOMAIN, "cannot get event")); 4393 4394 goto out; 4395 case ENOMEM: 4396 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 4397 error = zfs_error_fmt(hdl, EZFS_NOMEM, 4398 dgettext(TEXT_DOMAIN, "cannot get event")); 4399 goto out; 4400 } else { 4401 goto retry; 4402 } 4403 default: 4404 error = zpool_standard_error_fmt(hdl, errno, 4405 dgettext(TEXT_DOMAIN, "cannot get event")); 4406 goto out; 4407 } 4408 } 4409 4410 error = zcmd_read_dst_nvlist(hdl, &zc, nvp); 4411 if (error != 0) 4412 goto out; 4413 4414 *dropped = (int)zc.zc_cookie; 4415 out: 4416 zcmd_free_nvlists(&zc); 4417 4418 return (error); 4419 } 4420 4421 /* 4422 * Clear all events. 4423 */ 4424 int 4425 zpool_events_clear(libzfs_handle_t *hdl, int *count) 4426 { 4427 zfs_cmd_t zc = {"\0"}; 4428 char msg[1024]; 4429 4430 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 4431 "cannot clear events")); 4432 4433 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0) 4434 return (zpool_standard_error_fmt(hdl, errno, msg)); 4435 4436 if (count != NULL) 4437 *count = (int)zc.zc_cookie; /* # of events cleared */ 4438 4439 return (0); 4440 } 4441 4442 /* 4443 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for 4444 * the passed zevent_fd file handle. On success zero is returned, 4445 * otherwise -1 is returned and hdl->libzfs_error is set to the errno. 4446 */ 4447 int 4448 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd) 4449 { 4450 zfs_cmd_t zc = {"\0"}; 4451 int error = 0; 4452 4453 zc.zc_guid = eid; 4454 zc.zc_cleanup_fd = zevent_fd; 4455 4456 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) { 4457 switch (errno) { 4458 case ENOENT: 4459 error = zfs_error_fmt(hdl, EZFS_NOENT, 4460 dgettext(TEXT_DOMAIN, "cannot get event")); 4461 break; 4462 4463 case ENOMEM: 4464 error = zfs_error_fmt(hdl, EZFS_NOMEM, 4465 dgettext(TEXT_DOMAIN, "cannot get event")); 4466 break; 4467 4468 default: 4469 error = zpool_standard_error_fmt(hdl, errno, 4470 dgettext(TEXT_DOMAIN, "cannot get event")); 4471 break; 4472 } 4473 } 4474 4475 return (error); 4476 } 4477 4478 static void 4479 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4480 char *pathname, size_t len, boolean_t always_unmounted) 4481 { 4482 zfs_cmd_t zc = {"\0"}; 4483 boolean_t mounted = B_FALSE; 4484 char *mntpnt = NULL; 4485 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 4486 4487 if (dsobj == 0) { 4488 /* special case for the MOS */ 4489 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", 4490 (longlong_t)obj); 4491 return; 4492 } 4493 4494 /* get the dataset's name */ 4495 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4496 zc.zc_obj = dsobj; 4497 if (zfs_ioctl(zhp->zpool_hdl, 4498 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 4499 /* just write out a path of two object numbers */ 4500 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 4501 (longlong_t)dsobj, (longlong_t)obj); 4502 return; 4503 } 4504 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 4505 4506 /* find out if the dataset is mounted */ 4507 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname, 4508 &mntpnt); 4509 4510 /* get the corrupted object's path */ 4511 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 4512 zc.zc_obj = obj; 4513 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH, 4514 &zc) == 0) { 4515 if (mounted) { 4516 (void) snprintf(pathname, len, "%s%s", mntpnt, 4517 zc.zc_value); 4518 } else { 4519 (void) snprintf(pathname, len, "%s:%s", 4520 dsname, zc.zc_value); 4521 } 4522 } else { 4523 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, 4524 (longlong_t)obj); 4525 } 4526 free(mntpnt); 4527 } 4528 4529 void 4530 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4531 char *pathname, size_t len) 4532 { 4533 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE); 4534 } 4535 4536 void 4537 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4538 char *pathname, size_t len) 4539 { 4540 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE); 4541 } 4542 /* 4543 * Wait while the specified activity is in progress in the pool. 4544 */ 4545 int 4546 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity) 4547 { 4548 boolean_t missing; 4549 4550 int error = zpool_wait_status(zhp, activity, &missing, NULL); 4551 4552 if (missing) { 4553 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT, 4554 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"), 4555 zhp->zpool_name); 4556 return (ENOENT); 4557 } else { 4558 return (error); 4559 } 4560 } 4561 4562 /* 4563 * Wait for the given activity and return the status of the wait (whether or not 4564 * any waiting was done) in the 'waited' parameter. Non-existent pools are 4565 * reported via the 'missing' parameter, rather than by printing an error 4566 * message. This is convenient when this function is called in a loop over a 4567 * long period of time (as it is, for example, by zpool's wait cmd). In that 4568 * scenario, a pool being exported or destroyed should be considered a normal 4569 * event, so we don't want to print an error when we find that the pool doesn't 4570 * exist. 4571 */ 4572 int 4573 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity, 4574 boolean_t *missing, boolean_t *waited) 4575 { 4576 int error = lzc_wait(zhp->zpool_name, activity, waited); 4577 *missing = (error == ENOENT); 4578 if (*missing) 4579 return (0); 4580 4581 if (error != 0) { 4582 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4583 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"), 4584 zhp->zpool_name); 4585 } 4586 4587 return (error); 4588 } 4589 4590 int 4591 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap) 4592 { 4593 int error = lzc_set_bootenv(zhp->zpool_name, envmap); 4594 if (error != 0) { 4595 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4596 dgettext(TEXT_DOMAIN, 4597 "error setting bootenv in pool '%s'"), zhp->zpool_name); 4598 } 4599 4600 return (error); 4601 } 4602 4603 int 4604 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp) 4605 { 4606 nvlist_t *nvl; 4607 int error; 4608 4609 nvl = NULL; 4610 error = lzc_get_bootenv(zhp->zpool_name, &nvl); 4611 if (error != 0) { 4612 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4613 dgettext(TEXT_DOMAIN, 4614 "error getting bootenv in pool '%s'"), zhp->zpool_name); 4615 } else { 4616 *nvlp = nvl; 4617 } 4618 4619 return (error); 4620 } 4621