1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 25 * Copyright (c) 2011, 2024 by Delphix. All rights reserved. 26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 27 * Copyright (c) 2018 Datto Inc. 28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 29 * Copyright (c) 2017, Intel Corporation. 30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com> 31 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 32 * Copyright (c) 2021, 2023, Klara Inc. 33 */ 34 35 #include <errno.h> 36 #include <libintl.h> 37 #include <stdio.h> 38 #include <stdlib.h> 39 #include <strings.h> 40 #include <unistd.h> 41 #include <libgen.h> 42 #include <zone.h> 43 #include <sys/stat.h> 44 #include <sys/efi_partition.h> 45 #include <sys/systeminfo.h> 46 #include <sys/zfs_ioctl.h> 47 #include <sys/zfs_sysfs.h> 48 #include <sys/vdev_disk.h> 49 #include <sys/types.h> 50 #include <dlfcn.h> 51 #include <libzutil.h> 52 #include <fcntl.h> 53 54 #include "zfs_namecheck.h" 55 #include "zfs_prop.h" 56 #include "libzfs_impl.h" 57 #include "zfs_comutil.h" 58 #include "zfeature_common.h" 59 60 static boolean_t zpool_vdev_is_interior(const char *name); 61 62 typedef struct prop_flags { 63 unsigned int create:1; /* Validate property on creation */ 64 unsigned int import:1; /* Validate property on import */ 65 unsigned int vdevprop:1; /* Validate property as a VDEV property */ 66 } prop_flags_t; 67 68 /* 69 * ==================================================================== 70 * zpool property functions 71 * ==================================================================== 72 */ 73 74 static int 75 zpool_get_all_props(zpool_handle_t *zhp) 76 { 77 zfs_cmd_t zc = {"\0"}; 78 libzfs_handle_t *hdl = zhp->zpool_hdl; 79 80 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 81 82 if (zhp->zpool_n_propnames > 0) { 83 nvlist_t *innvl = fnvlist_alloc(); 84 fnvlist_add_string_array(innvl, ZPOOL_GET_PROPS_NAMES, 85 zhp->zpool_propnames, zhp->zpool_n_propnames); 86 zcmd_write_src_nvlist(hdl, &zc, innvl); 87 } 88 89 zcmd_alloc_dst_nvlist(hdl, &zc, 0); 90 91 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 92 if (errno == ENOMEM) 93 zcmd_expand_dst_nvlist(hdl, &zc); 94 else { 95 zcmd_free_nvlists(&zc); 96 return (-1); 97 } 98 } 99 100 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 101 zcmd_free_nvlists(&zc); 102 return (-1); 103 } 104 105 zcmd_free_nvlists(&zc); 106 107 return (0); 108 } 109 110 int 111 zpool_props_refresh(zpool_handle_t *zhp) 112 { 113 nvlist_t *old_props; 114 115 old_props = zhp->zpool_props; 116 117 if (zpool_get_all_props(zhp) != 0) 118 return (-1); 119 120 nvlist_free(old_props); 121 return (0); 122 } 123 124 static const char * 125 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 126 zprop_source_t *src) 127 { 128 nvlist_t *nv, *nvl; 129 const char *value; 130 zprop_source_t source; 131 132 nvl = zhp->zpool_props; 133 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 134 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 135 value = fnvlist_lookup_string(nv, ZPROP_VALUE); 136 } else { 137 source = ZPROP_SRC_DEFAULT; 138 if ((value = zpool_prop_default_string(prop)) == NULL) 139 value = "-"; 140 } 141 142 if (src) 143 *src = source; 144 145 return (value); 146 } 147 148 uint64_t 149 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 150 { 151 nvlist_t *nv, *nvl; 152 uint64_t value; 153 zprop_source_t source; 154 155 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 156 /* 157 * zpool_get_all_props() has most likely failed because 158 * the pool is faulted, but if all we need is the top level 159 * vdev's guid then get it from the zhp config nvlist. 160 */ 161 if ((prop == ZPOOL_PROP_GUID) && 162 (nvlist_lookup_nvlist(zhp->zpool_config, 163 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 164 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 165 == 0)) { 166 return (value); 167 } 168 return (zpool_prop_default_numeric(prop)); 169 } 170 171 nvl = zhp->zpool_props; 172 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 173 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 174 value = fnvlist_lookup_uint64(nv, ZPROP_VALUE); 175 } else { 176 source = ZPROP_SRC_DEFAULT; 177 value = zpool_prop_default_numeric(prop); 178 } 179 180 if (src) 181 *src = source; 182 183 return (value); 184 } 185 186 /* 187 * Map VDEV STATE to printed strings. 188 */ 189 const char * 190 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 191 { 192 switch (state) { 193 case VDEV_STATE_CLOSED: 194 case VDEV_STATE_OFFLINE: 195 return (gettext("OFFLINE")); 196 case VDEV_STATE_REMOVED: 197 return (gettext("REMOVED")); 198 case VDEV_STATE_CANT_OPEN: 199 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 200 return (gettext("FAULTED")); 201 else if (aux == VDEV_AUX_SPLIT_POOL) 202 return (gettext("SPLIT")); 203 else 204 return (gettext("UNAVAIL")); 205 case VDEV_STATE_FAULTED: 206 return (gettext("FAULTED")); 207 case VDEV_STATE_DEGRADED: 208 return (gettext("DEGRADED")); 209 case VDEV_STATE_HEALTHY: 210 return (gettext("ONLINE")); 211 212 default: 213 break; 214 } 215 216 return (gettext("UNKNOWN")); 217 } 218 219 /* 220 * Map POOL STATE to printed strings. 221 */ 222 const char * 223 zpool_pool_state_to_name(pool_state_t state) 224 { 225 switch (state) { 226 default: 227 break; 228 case POOL_STATE_ACTIVE: 229 return (gettext("ACTIVE")); 230 case POOL_STATE_EXPORTED: 231 return (gettext("EXPORTED")); 232 case POOL_STATE_DESTROYED: 233 return (gettext("DESTROYED")); 234 case POOL_STATE_SPARE: 235 return (gettext("SPARE")); 236 case POOL_STATE_L2CACHE: 237 return (gettext("L2CACHE")); 238 case POOL_STATE_UNINITIALIZED: 239 return (gettext("UNINITIALIZED")); 240 case POOL_STATE_UNAVAIL: 241 return (gettext("UNAVAIL")); 242 case POOL_STATE_POTENTIALLY_ACTIVE: 243 return (gettext("POTENTIALLY_ACTIVE")); 244 } 245 246 return (gettext("UNKNOWN")); 247 } 248 249 /* 250 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED", 251 * "SUSPENDED", etc). 252 */ 253 const char * 254 zpool_get_state_str(zpool_handle_t *zhp) 255 { 256 zpool_errata_t errata; 257 zpool_status_t status; 258 const char *str; 259 260 status = zpool_get_status(zhp, NULL, &errata); 261 262 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 263 str = gettext("FAULTED"); 264 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT || 265 status == ZPOOL_STATUS_IO_FAILURE_CONTINUE || 266 status == ZPOOL_STATUS_IO_FAILURE_MMP) { 267 str = gettext("SUSPENDED"); 268 } else { 269 nvlist_t *nvroot = fnvlist_lookup_nvlist( 270 zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE); 271 uint_t vsc; 272 vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array( 273 nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc); 274 str = zpool_state_to_name(vs->vs_state, vs->vs_aux); 275 } 276 return (str); 277 } 278 279 /* 280 * Get a zpool property value for 'prop' and return the value in 281 * a pre-allocated buffer. 282 */ 283 int 284 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, 285 size_t len, zprop_source_t *srctype, boolean_t literal) 286 { 287 uint64_t intval; 288 const char *strval; 289 zprop_source_t src = ZPROP_SRC_NONE; 290 291 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 292 switch (prop) { 293 case ZPOOL_PROP_NAME: 294 (void) strlcpy(buf, zpool_get_name(zhp), len); 295 break; 296 297 case ZPOOL_PROP_HEALTH: 298 (void) strlcpy(buf, zpool_get_state_str(zhp), len); 299 break; 300 301 case ZPOOL_PROP_GUID: 302 intval = zpool_get_prop_int(zhp, prop, &src); 303 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval); 304 break; 305 306 case ZPOOL_PROP_ALTROOT: 307 case ZPOOL_PROP_CACHEFILE: 308 case ZPOOL_PROP_COMMENT: 309 case ZPOOL_PROP_COMPATIBILITY: 310 if (zhp->zpool_props != NULL || 311 zpool_get_all_props(zhp) == 0) { 312 (void) strlcpy(buf, 313 zpool_get_prop_string(zhp, prop, &src), 314 len); 315 break; 316 } 317 zfs_fallthrough; 318 default: 319 (void) strlcpy(buf, "-", len); 320 break; 321 } 322 323 if (srctype != NULL) 324 *srctype = src; 325 return (0); 326 } 327 328 /* 329 * ZPOOL_PROP_DEDUPCACHED can be fetched by name only using 330 * the ZPOOL_GET_PROPS_NAMES mechanism 331 */ 332 if (prop == ZPOOL_PROP_DEDUPCACHED) { 333 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME); 334 (void) zpool_get_all_props(zhp); 335 } 336 337 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 338 prop != ZPOOL_PROP_NAME) 339 return (-1); 340 341 switch (zpool_prop_get_type(prop)) { 342 case PROP_TYPE_STRING: 343 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 344 len); 345 break; 346 347 case PROP_TYPE_NUMBER: 348 intval = zpool_get_prop_int(zhp, prop, &src); 349 350 switch (prop) { 351 case ZPOOL_PROP_DEDUP_TABLE_QUOTA: 352 /* 353 * If dedup quota is 0, we translate this into 'none' 354 * (unless literal is set). And if it is UINT64_MAX 355 * we translate that as 'automatic' (limit to size of 356 * the dedicated dedup VDEV. Otherwise, fall throught 357 * into the regular number formating. 358 */ 359 if (intval == 0) { 360 (void) strlcpy(buf, literal ? "0" : "none", 361 len); 362 break; 363 } else if (intval == UINT64_MAX) { 364 (void) strlcpy(buf, "auto", len); 365 break; 366 } 367 zfs_fallthrough; 368 369 case ZPOOL_PROP_SIZE: 370 case ZPOOL_PROP_ALLOCATED: 371 case ZPOOL_PROP_FREE: 372 case ZPOOL_PROP_FREEING: 373 case ZPOOL_PROP_LEAKED: 374 case ZPOOL_PROP_ASHIFT: 375 case ZPOOL_PROP_MAXBLOCKSIZE: 376 case ZPOOL_PROP_MAXDNODESIZE: 377 case ZPOOL_PROP_BCLONESAVED: 378 case ZPOOL_PROP_BCLONEUSED: 379 case ZPOOL_PROP_DEDUP_TABLE_SIZE: 380 case ZPOOL_PROP_DEDUPCACHED: 381 if (literal) 382 (void) snprintf(buf, len, "%llu", 383 (u_longlong_t)intval); 384 else 385 (void) zfs_nicenum(intval, buf, len); 386 break; 387 388 case ZPOOL_PROP_EXPANDSZ: 389 case ZPOOL_PROP_CHECKPOINT: 390 if (intval == 0) { 391 (void) strlcpy(buf, "-", len); 392 } else if (literal) { 393 (void) snprintf(buf, len, "%llu", 394 (u_longlong_t)intval); 395 } else { 396 (void) zfs_nicebytes(intval, buf, len); 397 } 398 break; 399 400 case ZPOOL_PROP_CAPACITY: 401 if (literal) { 402 (void) snprintf(buf, len, "%llu", 403 (u_longlong_t)intval); 404 } else { 405 (void) snprintf(buf, len, "%llu%%", 406 (u_longlong_t)intval); 407 } 408 break; 409 410 case ZPOOL_PROP_FRAGMENTATION: 411 if (intval == UINT64_MAX) { 412 (void) strlcpy(buf, "-", len); 413 } else if (literal) { 414 (void) snprintf(buf, len, "%llu", 415 (u_longlong_t)intval); 416 } else { 417 (void) snprintf(buf, len, "%llu%%", 418 (u_longlong_t)intval); 419 } 420 break; 421 422 case ZPOOL_PROP_BCLONERATIO: 423 case ZPOOL_PROP_DEDUPRATIO: 424 if (literal) 425 (void) snprintf(buf, len, "%llu.%02llu", 426 (u_longlong_t)(intval / 100), 427 (u_longlong_t)(intval % 100)); 428 else 429 (void) snprintf(buf, len, "%llu.%02llux", 430 (u_longlong_t)(intval / 100), 431 (u_longlong_t)(intval % 100)); 432 break; 433 434 case ZPOOL_PROP_HEALTH: 435 (void) strlcpy(buf, zpool_get_state_str(zhp), len); 436 break; 437 case ZPOOL_PROP_VERSION: 438 if (intval >= SPA_VERSION_FEATURES) { 439 (void) snprintf(buf, len, "-"); 440 break; 441 } 442 zfs_fallthrough; 443 default: 444 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval); 445 } 446 break; 447 448 case PROP_TYPE_INDEX: 449 intval = zpool_get_prop_int(zhp, prop, &src); 450 if (zpool_prop_index_to_string(prop, intval, &strval) 451 != 0) 452 return (-1); 453 (void) strlcpy(buf, strval, len); 454 break; 455 456 default: 457 abort(); 458 } 459 460 if (srctype) 461 *srctype = src; 462 463 return (0); 464 } 465 466 /* 467 * Get a zpool property value for 'propname' and return the value in 468 * a pre-allocated buffer. 469 */ 470 int 471 zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf, 472 size_t len, zprop_source_t *srctype) 473 { 474 nvlist_t *nv, *nvl; 475 uint64_t ival; 476 const char *value; 477 zprop_source_t source = ZPROP_SRC_LOCAL; 478 479 nvl = zhp->zpool_props; 480 if (nvlist_lookup_nvlist(nvl, propname, &nv) == 0) { 481 if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0) 482 source = ival; 483 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 484 } else { 485 source = ZPROP_SRC_DEFAULT; 486 value = "-"; 487 } 488 489 if (srctype) 490 *srctype = source; 491 492 (void) strlcpy(buf, value, len); 493 494 return (0); 495 } 496 497 /* 498 * Check if the bootfs name has the same pool name as it is set to. 499 * Assuming bootfs is a valid dataset name. 500 */ 501 static boolean_t 502 bootfs_name_valid(const char *pool, const char *bootfs) 503 { 504 int len = strlen(pool); 505 if (bootfs[0] == '\0') 506 return (B_TRUE); 507 508 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 509 return (B_FALSE); 510 511 if (strncmp(pool, bootfs, len) == 0 && 512 (bootfs[len] == '/' || bootfs[len] == '\0')) 513 return (B_TRUE); 514 515 return (B_FALSE); 516 } 517 518 /* 519 * Given an nvlist of zpool properties to be set, validate that they are 520 * correct, and parse any numeric properties (index, boolean, etc) if they are 521 * specified as strings. 522 */ 523 static nvlist_t * 524 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 525 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 526 { 527 nvpair_t *elem; 528 nvlist_t *retprops; 529 zpool_prop_t prop; 530 const char *strval; 531 uint64_t intval; 532 const char *check; 533 struct stat64 statbuf; 534 zpool_handle_t *zhp; 535 char *parent, *slash; 536 char report[1024]; 537 538 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 539 (void) no_memory(hdl); 540 return (NULL); 541 } 542 543 elem = NULL; 544 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 545 const char *propname = nvpair_name(elem); 546 547 if (flags.vdevprop && zpool_prop_vdev(propname)) { 548 vdev_prop_t vprop = vdev_name_to_prop(propname); 549 550 if (vdev_prop_readonly(vprop)) { 551 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 552 "is readonly"), propname); 553 (void) zfs_error(hdl, EZFS_PROPREADONLY, 554 errbuf); 555 goto error; 556 } 557 558 if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV, 559 retprops, &strval, &intval, errbuf) != 0) 560 goto error; 561 562 continue; 563 } else if (flags.vdevprop && vdev_prop_user(propname)) { 564 if (nvlist_add_nvpair(retprops, elem) != 0) { 565 (void) no_memory(hdl); 566 goto error; 567 } 568 continue; 569 } else if (flags.vdevprop) { 570 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 571 "invalid property: '%s'"), propname); 572 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 573 goto error; 574 } 575 576 prop = zpool_name_to_prop(propname); 577 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) { 578 int err; 579 char *fname = strchr(propname, '@') + 1; 580 581 err = zfeature_lookup_name(fname, NULL); 582 if (err != 0) { 583 ASSERT3U(err, ==, ENOENT); 584 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 585 "feature '%s' unsupported by kernel"), 586 fname); 587 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 588 goto error; 589 } 590 591 if (nvpair_type(elem) != DATA_TYPE_STRING) { 592 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 593 "'%s' must be a string"), propname); 594 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 595 goto error; 596 } 597 598 (void) nvpair_value_string(elem, &strval); 599 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 && 600 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) { 601 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 602 "property '%s' can only be set to " 603 "'enabled' or 'disabled'"), propname); 604 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 605 goto error; 606 } 607 608 if (!flags.create && 609 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) { 610 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 611 "property '%s' can only be set to " 612 "'disabled' at creation time"), propname); 613 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 614 goto error; 615 } 616 617 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 618 (void) no_memory(hdl); 619 goto error; 620 } 621 continue; 622 } else if (prop == ZPOOL_PROP_INVAL && 623 zfs_prop_user(propname)) { 624 /* 625 * This is a user property: make sure it's a 626 * string, and that it's less than ZAP_MAXNAMELEN. 627 */ 628 if (nvpair_type(elem) != DATA_TYPE_STRING) { 629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 630 "'%s' must be a string"), propname); 631 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 632 goto error; 633 } 634 635 if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) { 636 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 637 "property name '%s' is too long"), 638 propname); 639 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 640 goto error; 641 } 642 643 (void) nvpair_value_string(elem, &strval); 644 645 if (strlen(strval) >= ZFS_MAXPROPLEN) { 646 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 647 "property value '%s' is too long"), 648 strval); 649 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 650 goto error; 651 } 652 653 if (nvlist_add_string(retprops, propname, 654 strval) != 0) { 655 (void) no_memory(hdl); 656 goto error; 657 } 658 659 continue; 660 } 661 662 /* 663 * Make sure this property is valid and applies to this type. 664 */ 665 if (prop == ZPOOL_PROP_INVAL) { 666 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 667 "invalid property '%s'"), propname); 668 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 669 goto error; 670 } 671 672 if (zpool_prop_readonly(prop)) { 673 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 674 "is readonly"), propname); 675 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 676 goto error; 677 } 678 679 if (!flags.create && zpool_prop_setonce(prop)) { 680 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 681 "property '%s' can only be set at " 682 "creation time"), propname); 683 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 684 goto error; 685 } 686 687 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 688 &strval, &intval, errbuf) != 0) 689 goto error; 690 691 /* 692 * Perform additional checking for specific properties. 693 */ 694 switch (prop) { 695 case ZPOOL_PROP_VERSION: 696 if (intval < version || 697 !SPA_VERSION_IS_SUPPORTED(intval)) { 698 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 699 "property '%s' number %llu is invalid."), 700 propname, (unsigned long long)intval); 701 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 702 goto error; 703 } 704 break; 705 706 case ZPOOL_PROP_ASHIFT: 707 if (intval != 0 && 708 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) { 709 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 710 "property '%s' number %llu is invalid, " 711 "only values between %" PRId32 " and %" 712 PRId32 " are allowed."), 713 propname, (unsigned long long)intval, 714 ASHIFT_MIN, ASHIFT_MAX); 715 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 716 goto error; 717 } 718 break; 719 720 case ZPOOL_PROP_BOOTFS: 721 if (flags.create || flags.import) { 722 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 723 "property '%s' cannot be set at creation " 724 "or import time"), propname); 725 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 726 goto error; 727 } 728 729 if (version < SPA_VERSION_BOOTFS) { 730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 731 "pool must be upgraded to support " 732 "'%s' property"), propname); 733 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 734 goto error; 735 } 736 737 /* 738 * bootfs property value has to be a dataset name and 739 * the dataset has to be in the same pool as it sets to. 740 */ 741 if (!bootfs_name_valid(poolname, strval)) { 742 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 743 "is an invalid name"), strval); 744 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 745 goto error; 746 } 747 748 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 749 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 750 "could not open pool '%s'"), poolname); 751 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 752 goto error; 753 } 754 zpool_close(zhp); 755 break; 756 757 case ZPOOL_PROP_ALTROOT: 758 if (!flags.create && !flags.import) { 759 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 760 "property '%s' can only be set during pool " 761 "creation or import"), propname); 762 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 763 goto error; 764 } 765 766 if (strval[0] != '/') { 767 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 768 "bad alternate root '%s'"), strval); 769 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 770 goto error; 771 } 772 break; 773 774 case ZPOOL_PROP_CACHEFILE: 775 if (strval[0] == '\0') 776 break; 777 778 if (strcmp(strval, "none") == 0) 779 break; 780 781 if (strval[0] != '/') { 782 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 783 "property '%s' must be empty, an " 784 "absolute path, or 'none'"), propname); 785 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 786 goto error; 787 } 788 789 parent = strdup(strval); 790 if (parent == NULL) { 791 (void) zfs_error(hdl, EZFS_NOMEM, errbuf); 792 goto error; 793 } 794 slash = strrchr(parent, '/'); 795 796 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 797 strcmp(slash, "/..") == 0) { 798 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 799 "'%s' is not a valid file"), parent); 800 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 801 free(parent); 802 goto error; 803 } 804 805 *slash = '\0'; 806 807 if (parent[0] != '\0' && 808 (stat64(parent, &statbuf) != 0 || 809 !S_ISDIR(statbuf.st_mode))) { 810 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 811 "'%s' is not a valid directory"), 812 parent); 813 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 814 free(parent); 815 goto error; 816 } 817 free(parent); 818 819 break; 820 821 case ZPOOL_PROP_COMPATIBILITY: 822 switch (zpool_load_compat(strval, NULL, report, 1024)) { 823 case ZPOOL_COMPATIBILITY_OK: 824 case ZPOOL_COMPATIBILITY_WARNTOKEN: 825 break; 826 case ZPOOL_COMPATIBILITY_BADFILE: 827 case ZPOOL_COMPATIBILITY_BADTOKEN: 828 case ZPOOL_COMPATIBILITY_NOFILES: 829 zfs_error_aux(hdl, "%s", report); 830 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 831 goto error; 832 } 833 break; 834 835 case ZPOOL_PROP_COMMENT: 836 for (check = strval; *check != '\0'; check++) { 837 if (!isprint(*check)) { 838 zfs_error_aux(hdl, 839 dgettext(TEXT_DOMAIN, 840 "comment may only have printable " 841 "characters")); 842 (void) zfs_error(hdl, EZFS_BADPROP, 843 errbuf); 844 goto error; 845 } 846 } 847 if (strlen(strval) > ZPROP_MAX_COMMENT) { 848 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 849 "comment must not exceed %d characters"), 850 ZPROP_MAX_COMMENT); 851 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 852 goto error; 853 } 854 break; 855 case ZPOOL_PROP_READONLY: 856 if (!flags.import) { 857 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 858 "property '%s' can only be set at " 859 "import time"), propname); 860 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 861 goto error; 862 } 863 break; 864 case ZPOOL_PROP_MULTIHOST: 865 if (get_system_hostid() == 0) { 866 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 867 "requires a non-zero system hostid")); 868 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 869 goto error; 870 } 871 break; 872 case ZPOOL_PROP_DEDUPDITTO: 873 printf("Note: property '%s' no longer has " 874 "any effect\n", propname); 875 break; 876 877 default: 878 break; 879 } 880 } 881 882 return (retprops); 883 error: 884 nvlist_free(retprops); 885 return (NULL); 886 } 887 888 /* 889 * Set zpool property : propname=propval. 890 */ 891 int 892 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 893 { 894 zfs_cmd_t zc = {"\0"}; 895 int ret = -1; 896 char errbuf[ERRBUFLEN]; 897 nvlist_t *nvl = NULL; 898 nvlist_t *realprops; 899 uint64_t version; 900 prop_flags_t flags = { 0 }; 901 902 (void) snprintf(errbuf, sizeof (errbuf), 903 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 904 zhp->zpool_name); 905 906 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 907 return (no_memory(zhp->zpool_hdl)); 908 909 if (nvlist_add_string(nvl, propname, propval) != 0) { 910 nvlist_free(nvl); 911 return (no_memory(zhp->zpool_hdl)); 912 } 913 914 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 915 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 916 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 917 nvlist_free(nvl); 918 return (-1); 919 } 920 921 nvlist_free(nvl); 922 nvl = realprops; 923 924 /* 925 * Execute the corresponding ioctl() to set this property. 926 */ 927 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 928 929 zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl); 930 931 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 932 933 zcmd_free_nvlists(&zc); 934 nvlist_free(nvl); 935 936 if (ret) 937 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 938 else 939 (void) zpool_props_refresh(zhp); 940 941 return (ret); 942 } 943 944 int 945 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp, 946 zfs_type_t type, boolean_t literal) 947 { 948 libzfs_handle_t *hdl = zhp->zpool_hdl; 949 zprop_list_t *entry; 950 char buf[ZFS_MAXPROPLEN]; 951 nvlist_t *features = NULL; 952 nvpair_t *nvp; 953 zprop_list_t **last; 954 boolean_t firstexpand = (NULL == *plp); 955 int i; 956 957 if (zprop_expand_list(hdl, plp, type) != 0) 958 return (-1); 959 960 if (type == ZFS_TYPE_VDEV) 961 return (0); 962 963 last = plp; 964 while (*last != NULL) 965 last = &(*last)->pl_next; 966 967 if ((*plp)->pl_all) 968 features = zpool_get_features(zhp); 969 970 if ((*plp)->pl_all && firstexpand) { 971 /* Handle userprops in the all properties case */ 972 if (zhp->zpool_props == NULL && zpool_props_refresh(zhp)) 973 return (-1); 974 975 nvp = NULL; 976 while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) != 977 NULL) { 978 const char *propname = nvpair_name(nvp); 979 980 if (!zfs_prop_user(propname)) 981 continue; 982 983 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 984 entry->pl_prop = ZPROP_USERPROP; 985 entry->pl_user_prop = zfs_strdup(hdl, propname); 986 entry->pl_width = strlen(entry->pl_user_prop); 987 entry->pl_all = B_TRUE; 988 989 *last = entry; 990 last = &entry->pl_next; 991 } 992 993 for (i = 0; i < SPA_FEATURES; i++) { 994 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 995 entry->pl_prop = ZPROP_USERPROP; 996 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 997 spa_feature_table[i].fi_uname); 998 entry->pl_width = strlen(entry->pl_user_prop); 999 entry->pl_all = B_TRUE; 1000 1001 *last = entry; 1002 last = &entry->pl_next; 1003 } 1004 } 1005 1006 /* add any unsupported features */ 1007 for (nvp = nvlist_next_nvpair(features, NULL); 1008 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 1009 char *propname; 1010 boolean_t found; 1011 1012 if (zfeature_is_supported(nvpair_name(nvp))) 1013 continue; 1014 1015 propname = zfs_asprintf(hdl, "unsupported@%s", 1016 nvpair_name(nvp)); 1017 1018 /* 1019 * Before adding the property to the list make sure that no 1020 * other pool already added the same property. 1021 */ 1022 found = B_FALSE; 1023 entry = *plp; 1024 while (entry != NULL) { 1025 if (entry->pl_user_prop != NULL && 1026 strcmp(propname, entry->pl_user_prop) == 0) { 1027 found = B_TRUE; 1028 break; 1029 } 1030 entry = entry->pl_next; 1031 } 1032 if (found) { 1033 free(propname); 1034 continue; 1035 } 1036 1037 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 1038 entry->pl_prop = ZPROP_USERPROP; 1039 entry->pl_user_prop = propname; 1040 entry->pl_width = strlen(entry->pl_user_prop); 1041 entry->pl_all = B_TRUE; 1042 1043 *last = entry; 1044 last = &entry->pl_next; 1045 } 1046 1047 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 1048 if (entry->pl_fixed && !literal) 1049 continue; 1050 1051 if (entry->pl_prop != ZPROP_USERPROP && 1052 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 1053 NULL, literal) == 0) { 1054 if (strlen(buf) > entry->pl_width) 1055 entry->pl_width = strlen(buf); 1056 } else if (entry->pl_prop == ZPROP_INVAL && 1057 zfs_prop_user(entry->pl_user_prop) && 1058 zpool_get_userprop(zhp, entry->pl_user_prop, buf, 1059 sizeof (buf), NULL) == 0) { 1060 if (strlen(buf) > entry->pl_width) 1061 entry->pl_width = strlen(buf); 1062 } 1063 } 1064 1065 return (0); 1066 } 1067 1068 int 1069 vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname, 1070 zprop_list_t **plp) 1071 { 1072 zprop_list_t *entry; 1073 char buf[ZFS_MAXPROPLEN]; 1074 const char *strval = NULL; 1075 int err = 0; 1076 nvpair_t *elem = NULL; 1077 nvlist_t *vprops = NULL; 1078 nvlist_t *propval = NULL; 1079 const char *propname; 1080 vdev_prop_t prop; 1081 zprop_list_t **last; 1082 1083 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 1084 if (entry->pl_fixed) 1085 continue; 1086 1087 if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop, 1088 entry->pl_user_prop, buf, sizeof (buf), NULL, 1089 B_FALSE) == 0) { 1090 if (strlen(buf) > entry->pl_width) 1091 entry->pl_width = strlen(buf); 1092 } 1093 if (entry->pl_prop == VDEV_PROP_NAME && 1094 strlen(vdevname) > entry->pl_width) 1095 entry->pl_width = strlen(vdevname); 1096 } 1097 1098 /* Handle the all properties case */ 1099 last = plp; 1100 if (*last != NULL && (*last)->pl_all == B_TRUE) { 1101 while (*last != NULL) 1102 last = &(*last)->pl_next; 1103 1104 err = zpool_get_all_vdev_props(zhp, vdevname, &vprops); 1105 if (err != 0) 1106 return (err); 1107 1108 while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) { 1109 propname = nvpair_name(elem); 1110 1111 /* Skip properties that are not user defined */ 1112 if ((prop = vdev_name_to_prop(propname)) != 1113 VDEV_PROP_USERPROP) 1114 continue; 1115 1116 if (nvpair_value_nvlist(elem, &propval) != 0) 1117 continue; 1118 1119 strval = fnvlist_lookup_string(propval, ZPROP_VALUE); 1120 1121 entry = zfs_alloc(zhp->zpool_hdl, 1122 sizeof (zprop_list_t)); 1123 entry->pl_prop = prop; 1124 entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl, 1125 propname); 1126 entry->pl_width = strlen(strval); 1127 entry->pl_all = B_TRUE; 1128 *last = entry; 1129 last = &entry->pl_next; 1130 } 1131 } 1132 1133 return (0); 1134 } 1135 1136 /* 1137 * Get the state for the given feature on the given ZFS pool. 1138 */ 1139 int 1140 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 1141 size_t len) 1142 { 1143 uint64_t refcount; 1144 boolean_t found = B_FALSE; 1145 nvlist_t *features = zpool_get_features(zhp); 1146 boolean_t supported; 1147 const char *feature = strchr(propname, '@') + 1; 1148 1149 supported = zpool_prop_feature(propname); 1150 ASSERT(supported || zpool_prop_unsupported(propname)); 1151 1152 /* 1153 * Convert from feature name to feature guid. This conversion is 1154 * unnecessary for unsupported@... properties because they already 1155 * use guids. 1156 */ 1157 if (supported) { 1158 int ret; 1159 spa_feature_t fid; 1160 1161 ret = zfeature_lookup_name(feature, &fid); 1162 if (ret != 0) { 1163 (void) strlcpy(buf, "-", len); 1164 return (ENOTSUP); 1165 } 1166 feature = spa_feature_table[fid].fi_guid; 1167 } 1168 1169 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 1170 found = B_TRUE; 1171 1172 if (supported) { 1173 if (!found) { 1174 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 1175 } else { 1176 if (refcount == 0) 1177 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 1178 else 1179 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 1180 } 1181 } else { 1182 if (found) { 1183 if (refcount == 0) { 1184 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 1185 } else { 1186 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 1187 } 1188 } else { 1189 (void) strlcpy(buf, "-", len); 1190 return (ENOTSUP); 1191 } 1192 } 1193 1194 return (0); 1195 } 1196 1197 /* 1198 * Validate the given pool name, optionally putting an extended error message in 1199 * 'buf'. 1200 */ 1201 boolean_t 1202 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 1203 { 1204 namecheck_err_t why; 1205 char what; 1206 int ret; 1207 1208 ret = pool_namecheck(pool, &why, &what); 1209 1210 /* 1211 * The rules for reserved pool names were extended at a later point. 1212 * But we need to support users with existing pools that may now be 1213 * invalid. So we only check for this expanded set of names during a 1214 * create (or import), and only in userland. 1215 */ 1216 if (ret == 0 && !isopen && 1217 (strncmp(pool, "mirror", 6) == 0 || 1218 strncmp(pool, "raidz", 5) == 0 || 1219 strncmp(pool, "draid", 5) == 0 || 1220 strncmp(pool, "spare", 5) == 0 || 1221 strcmp(pool, "log") == 0)) { 1222 if (hdl != NULL) 1223 zfs_error_aux(hdl, 1224 dgettext(TEXT_DOMAIN, "name is reserved")); 1225 return (B_FALSE); 1226 } 1227 1228 1229 if (ret != 0) { 1230 if (hdl != NULL) { 1231 switch (why) { 1232 case NAME_ERR_TOOLONG: 1233 zfs_error_aux(hdl, 1234 dgettext(TEXT_DOMAIN, "name is too long")); 1235 break; 1236 1237 case NAME_ERR_INVALCHAR: 1238 zfs_error_aux(hdl, 1239 dgettext(TEXT_DOMAIN, "invalid character " 1240 "'%c' in pool name"), what); 1241 break; 1242 1243 case NAME_ERR_NOLETTER: 1244 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1245 "name must begin with a letter")); 1246 break; 1247 1248 case NAME_ERR_RESERVED: 1249 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1250 "name is reserved")); 1251 break; 1252 1253 case NAME_ERR_DISKLIKE: 1254 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1255 "pool name is reserved")); 1256 break; 1257 1258 case NAME_ERR_LEADING_SLASH: 1259 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1260 "leading slash in name")); 1261 break; 1262 1263 case NAME_ERR_EMPTY_COMPONENT: 1264 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1265 "empty component in name")); 1266 break; 1267 1268 case NAME_ERR_TRAILING_SLASH: 1269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1270 "trailing slash in name")); 1271 break; 1272 1273 case NAME_ERR_MULTIPLE_DELIMITERS: 1274 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1275 "multiple '@' and/or '#' delimiters in " 1276 "name")); 1277 break; 1278 1279 case NAME_ERR_NO_AT: 1280 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1281 "permission set is missing '@'")); 1282 break; 1283 1284 default: 1285 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1286 "(%d) not defined"), why); 1287 break; 1288 } 1289 } 1290 return (B_FALSE); 1291 } 1292 1293 return (B_TRUE); 1294 } 1295 1296 /* 1297 * Open a handle to the given pool, even if the pool is currently in the FAULTED 1298 * state. 1299 */ 1300 zpool_handle_t * 1301 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 1302 { 1303 zpool_handle_t *zhp; 1304 boolean_t missing; 1305 1306 /* 1307 * Make sure the pool name is valid. 1308 */ 1309 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 1310 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1311 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1312 pool); 1313 return (NULL); 1314 } 1315 1316 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t)); 1317 1318 zhp->zpool_hdl = hdl; 1319 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1320 1321 if (zpool_refresh_stats(zhp, &missing) != 0) { 1322 zpool_close(zhp); 1323 return (NULL); 1324 } 1325 1326 if (missing) { 1327 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1328 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1329 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1330 zpool_close(zhp); 1331 return (NULL); 1332 } 1333 1334 return (zhp); 1335 } 1336 1337 /* 1338 * Like the above, but silent on error. Used when iterating over pools (because 1339 * the configuration cache may be out of date). 1340 */ 1341 int 1342 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1343 { 1344 zpool_handle_t *zhp; 1345 boolean_t missing; 1346 1347 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t)); 1348 1349 zhp->zpool_hdl = hdl; 1350 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1351 1352 if (zpool_refresh_stats(zhp, &missing) != 0) { 1353 zpool_close(zhp); 1354 return (-1); 1355 } 1356 1357 if (missing) { 1358 zpool_close(zhp); 1359 *ret = NULL; 1360 return (0); 1361 } 1362 1363 *ret = zhp; 1364 return (0); 1365 } 1366 1367 /* 1368 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1369 * state. 1370 */ 1371 zpool_handle_t * 1372 zpool_open(libzfs_handle_t *hdl, const char *pool) 1373 { 1374 zpool_handle_t *zhp; 1375 1376 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1377 return (NULL); 1378 1379 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1380 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1381 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1382 zpool_close(zhp); 1383 return (NULL); 1384 } 1385 1386 return (zhp); 1387 } 1388 1389 /* 1390 * Close the handle. Simply frees the memory associated with the handle. 1391 */ 1392 void 1393 zpool_close(zpool_handle_t *zhp) 1394 { 1395 nvlist_free(zhp->zpool_config); 1396 nvlist_free(zhp->zpool_old_config); 1397 nvlist_free(zhp->zpool_props); 1398 free(zhp); 1399 } 1400 1401 /* 1402 * Return the name of the pool. 1403 */ 1404 const char * 1405 zpool_get_name(zpool_handle_t *zhp) 1406 { 1407 return (zhp->zpool_name); 1408 } 1409 1410 1411 /* 1412 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1413 */ 1414 int 1415 zpool_get_state(zpool_handle_t *zhp) 1416 { 1417 return (zhp->zpool_state); 1418 } 1419 1420 /* 1421 * Check if vdev list contains a special vdev 1422 */ 1423 static boolean_t 1424 zpool_has_special_vdev(nvlist_t *nvroot) 1425 { 1426 nvlist_t **child; 1427 uint_t children; 1428 1429 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child, 1430 &children) == 0) { 1431 for (uint_t c = 0; c < children; c++) { 1432 const char *bias; 1433 1434 if (nvlist_lookup_string(child[c], 1435 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 && 1436 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) { 1437 return (B_TRUE); 1438 } 1439 } 1440 } 1441 return (B_FALSE); 1442 } 1443 1444 /* 1445 * Check if vdev list contains a dRAID vdev 1446 */ 1447 static boolean_t 1448 zpool_has_draid_vdev(nvlist_t *nvroot) 1449 { 1450 nvlist_t **child; 1451 uint_t children; 1452 1453 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1454 &child, &children) == 0) { 1455 for (uint_t c = 0; c < children; c++) { 1456 const char *type; 1457 1458 if (nvlist_lookup_string(child[c], 1459 ZPOOL_CONFIG_TYPE, &type) == 0 && 1460 strcmp(type, VDEV_TYPE_DRAID) == 0) { 1461 return (B_TRUE); 1462 } 1463 } 1464 } 1465 return (B_FALSE); 1466 } 1467 1468 /* 1469 * Output a dRAID top-level vdev name in to the provided buffer. 1470 */ 1471 static char * 1472 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity, 1473 uint64_t spares, uint64_t children) 1474 { 1475 snprintf(name, len, "%s%llu:%llud:%lluc:%llus", 1476 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data, 1477 (u_longlong_t)children, (u_longlong_t)spares); 1478 1479 return (name); 1480 } 1481 1482 /* 1483 * Return B_TRUE if the provided name is a dRAID spare name. 1484 */ 1485 boolean_t 1486 zpool_is_draid_spare(const char *name) 1487 { 1488 uint64_t spare_id, parity, vdev_id; 1489 1490 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu", 1491 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id, 1492 (u_longlong_t *)&spare_id) == 3) { 1493 return (B_TRUE); 1494 } 1495 1496 return (B_FALSE); 1497 } 1498 1499 /* 1500 * Create the named pool, using the provided vdev list. It is assumed 1501 * that the consumer has already validated the contents of the nvlist, so we 1502 * don't have to worry about error semantics. 1503 */ 1504 int 1505 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1506 nvlist_t *props, nvlist_t *fsprops) 1507 { 1508 zfs_cmd_t zc = {"\0"}; 1509 nvlist_t *zc_fsprops = NULL; 1510 nvlist_t *zc_props = NULL; 1511 nvlist_t *hidden_args = NULL; 1512 uint8_t *wkeydata = NULL; 1513 uint_t wkeylen = 0; 1514 char errbuf[ERRBUFLEN]; 1515 int ret = -1; 1516 1517 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1518 "cannot create '%s'"), pool); 1519 1520 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1521 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); 1522 1523 zcmd_write_conf_nvlist(hdl, &zc, nvroot); 1524 1525 if (props) { 1526 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1527 1528 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1529 SPA_VERSION_1, flags, errbuf)) == NULL) { 1530 goto create_failed; 1531 } 1532 } 1533 1534 if (fsprops) { 1535 uint64_t zoned; 1536 const char *zonestr; 1537 1538 zoned = ((nvlist_lookup_string(fsprops, 1539 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1540 strcmp(zonestr, "on") == 0); 1541 1542 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1543 fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) { 1544 goto create_failed; 1545 } 1546 1547 if (nvlist_exists(zc_fsprops, 1548 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) && 1549 !zpool_has_special_vdev(nvroot)) { 1550 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1551 "%s property requires a special vdev"), 1552 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)); 1553 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 1554 goto create_failed; 1555 } 1556 1557 if (!zc_props && 1558 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1559 goto create_failed; 1560 } 1561 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE, 1562 &wkeydata, &wkeylen) != 0) { 1563 zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf); 1564 goto create_failed; 1565 } 1566 if (nvlist_add_nvlist(zc_props, 1567 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1568 goto create_failed; 1569 } 1570 if (wkeydata != NULL) { 1571 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0) 1572 goto create_failed; 1573 1574 if (nvlist_add_uint8_array(hidden_args, "wkeydata", 1575 wkeydata, wkeylen) != 0) 1576 goto create_failed; 1577 1578 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS, 1579 hidden_args) != 0) 1580 goto create_failed; 1581 } 1582 } 1583 1584 if (zc_props) 1585 zcmd_write_src_nvlist(hdl, &zc, zc_props); 1586 1587 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1588 1589 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1590 1591 zcmd_free_nvlists(&zc); 1592 nvlist_free(zc_props); 1593 nvlist_free(zc_fsprops); 1594 nvlist_free(hidden_args); 1595 if (wkeydata != NULL) 1596 free(wkeydata); 1597 1598 switch (errno) { 1599 case EBUSY: 1600 /* 1601 * This can happen if the user has specified the same 1602 * device multiple times. We can't reliably detect this 1603 * until we try to add it and see we already have a 1604 * label. This can also happen under if the device is 1605 * part of an active md or lvm device. 1606 */ 1607 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1608 "one or more vdevs refer to the same device, or " 1609 "one of\nthe devices is part of an active md or " 1610 "lvm device")); 1611 return (zfs_error(hdl, EZFS_BADDEV, errbuf)); 1612 1613 case ERANGE: 1614 /* 1615 * This happens if the record size is smaller or larger 1616 * than the allowed size range, or not a power of 2. 1617 * 1618 * NOTE: although zfs_valid_proplist is called earlier, 1619 * this case may have slipped through since the 1620 * pool does not exist yet and it is therefore 1621 * impossible to read properties e.g. max blocksize 1622 * from the pool. 1623 */ 1624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1625 "record size invalid")); 1626 return (zfs_error(hdl, EZFS_BADPROP, errbuf)); 1627 1628 case EOVERFLOW: 1629 /* 1630 * This occurs when one of the devices is below 1631 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1632 * device was the problem device since there's no 1633 * reliable way to determine device size from userland. 1634 */ 1635 { 1636 char buf[64]; 1637 1638 zfs_nicebytes(SPA_MINDEVSIZE, buf, 1639 sizeof (buf)); 1640 1641 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1642 "one or more devices is less than the " 1643 "minimum size (%s)"), buf); 1644 } 1645 return (zfs_error(hdl, EZFS_BADDEV, errbuf)); 1646 1647 case ENOSPC: 1648 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1649 "one or more devices is out of space")); 1650 return (zfs_error(hdl, EZFS_BADDEV, errbuf)); 1651 1652 case EINVAL: 1653 if (zpool_has_draid_vdev(nvroot) && 1654 zfeature_lookup_name("draid", NULL) != 0) { 1655 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1656 "dRAID vdevs are unsupported by the " 1657 "kernel")); 1658 return (zfs_error(hdl, EZFS_BADDEV, errbuf)); 1659 } else { 1660 return (zpool_standard_error(hdl, errno, 1661 errbuf)); 1662 } 1663 1664 default: 1665 return (zpool_standard_error(hdl, errno, errbuf)); 1666 } 1667 } 1668 1669 create_failed: 1670 zcmd_free_nvlists(&zc); 1671 nvlist_free(zc_props); 1672 nvlist_free(zc_fsprops); 1673 nvlist_free(hidden_args); 1674 if (wkeydata != NULL) 1675 free(wkeydata); 1676 return (ret); 1677 } 1678 1679 /* 1680 * Destroy the given pool. It is up to the caller to ensure that there are no 1681 * datasets left in the pool. 1682 */ 1683 int 1684 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1685 { 1686 zfs_cmd_t zc = {"\0"}; 1687 zfs_handle_t *zfp = NULL; 1688 libzfs_handle_t *hdl = zhp->zpool_hdl; 1689 char errbuf[ERRBUFLEN]; 1690 1691 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1692 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1693 return (-1); 1694 1695 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1696 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1697 1698 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1699 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1700 "cannot destroy '%s'"), zhp->zpool_name); 1701 1702 if (errno == EROFS) { 1703 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1704 "one or more devices is read only")); 1705 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 1706 } else { 1707 (void) zpool_standard_error(hdl, errno, errbuf); 1708 } 1709 1710 if (zfp) 1711 zfs_close(zfp); 1712 return (-1); 1713 } 1714 1715 if (zfp) { 1716 remove_mountpoint(zfp); 1717 zfs_close(zfp); 1718 } 1719 1720 return (0); 1721 } 1722 1723 /* 1724 * Create a checkpoint in the given pool. 1725 */ 1726 int 1727 zpool_checkpoint(zpool_handle_t *zhp) 1728 { 1729 libzfs_handle_t *hdl = zhp->zpool_hdl; 1730 char errbuf[ERRBUFLEN]; 1731 int error; 1732 1733 error = lzc_pool_checkpoint(zhp->zpool_name); 1734 if (error != 0) { 1735 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1736 "cannot checkpoint '%s'"), zhp->zpool_name); 1737 (void) zpool_standard_error(hdl, error, errbuf); 1738 return (-1); 1739 } 1740 1741 return (0); 1742 } 1743 1744 /* 1745 * Discard the checkpoint from the given pool. 1746 */ 1747 int 1748 zpool_discard_checkpoint(zpool_handle_t *zhp) 1749 { 1750 libzfs_handle_t *hdl = zhp->zpool_hdl; 1751 char errbuf[ERRBUFLEN]; 1752 int error; 1753 1754 error = lzc_pool_checkpoint_discard(zhp->zpool_name); 1755 if (error != 0) { 1756 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1757 "cannot discard checkpoint in '%s'"), zhp->zpool_name); 1758 (void) zpool_standard_error(hdl, error, errbuf); 1759 return (-1); 1760 } 1761 1762 return (0); 1763 } 1764 1765 /* 1766 * Load data type for the given pool. 1767 */ 1768 int 1769 zpool_prefetch(zpool_handle_t *zhp, zpool_prefetch_type_t type) 1770 { 1771 libzfs_handle_t *hdl = zhp->zpool_hdl; 1772 char msg[1024]; 1773 int error; 1774 1775 error = lzc_pool_prefetch(zhp->zpool_name, type); 1776 if (error != 0) { 1777 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1778 "cannot prefetch %s in '%s'"), 1779 type == ZPOOL_PREFETCH_DDT ? "ddt" : "", zhp->zpool_name); 1780 (void) zpool_standard_error(hdl, error, msg); 1781 return (-1); 1782 } 1783 1784 return (0); 1785 } 1786 1787 /* 1788 * Add the given vdevs to the pool. The caller must have already performed the 1789 * necessary verification to ensure that the vdev specification is well-formed. 1790 */ 1791 int 1792 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot, boolean_t check_ashift) 1793 { 1794 zfs_cmd_t zc = {"\0"}; 1795 int ret; 1796 libzfs_handle_t *hdl = zhp->zpool_hdl; 1797 char errbuf[ERRBUFLEN]; 1798 nvlist_t **spares, **l2cache; 1799 uint_t nspares, nl2cache; 1800 1801 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1802 "cannot add to '%s'"), zhp->zpool_name); 1803 1804 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1805 SPA_VERSION_SPARES && 1806 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1807 &spares, &nspares) == 0) { 1808 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1809 "upgraded to add hot spares")); 1810 return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); 1811 } 1812 1813 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1814 SPA_VERSION_L2CACHE && 1815 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1816 &l2cache, &nl2cache) == 0) { 1817 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1818 "upgraded to add cache devices")); 1819 return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); 1820 } 1821 1822 zcmd_write_conf_nvlist(hdl, &zc, nvroot); 1823 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1824 zc.zc_flags = check_ashift; 1825 1826 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1827 switch (errno) { 1828 case EBUSY: 1829 /* 1830 * This can happen if the user has specified the same 1831 * device multiple times. We can't reliably detect this 1832 * until we try to add it and see we already have a 1833 * label. 1834 */ 1835 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1836 "one or more vdevs refer to the same device")); 1837 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 1838 break; 1839 1840 case EINVAL: 1841 1842 if (zpool_has_draid_vdev(nvroot) && 1843 zfeature_lookup_name("draid", NULL) != 0) { 1844 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1845 "dRAID vdevs are unsupported by the " 1846 "kernel")); 1847 } else { 1848 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1849 "invalid config; a pool with removing/" 1850 "removed vdevs does not support adding " 1851 "raidz or dRAID vdevs")); 1852 } 1853 1854 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 1855 break; 1856 1857 case EOVERFLOW: 1858 /* 1859 * This occurs when one of the devices is below 1860 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1861 * device was the problem device since there's no 1862 * reliable way to determine device size from userland. 1863 */ 1864 { 1865 char buf[64]; 1866 1867 zfs_nicebytes(SPA_MINDEVSIZE, buf, 1868 sizeof (buf)); 1869 1870 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1871 "device is less than the minimum " 1872 "size (%s)"), buf); 1873 } 1874 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 1875 break; 1876 1877 case ENOTSUP: 1878 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1879 "pool must be upgraded to add these vdevs")); 1880 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 1881 break; 1882 1883 default: 1884 (void) zpool_standard_error(hdl, errno, errbuf); 1885 } 1886 1887 ret = -1; 1888 } else { 1889 ret = 0; 1890 } 1891 1892 zcmd_free_nvlists(&zc); 1893 1894 return (ret); 1895 } 1896 1897 /* 1898 * Exports the pool from the system. The caller must ensure that there are no 1899 * mounted datasets in the pool. 1900 */ 1901 static int 1902 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1903 const char *log_str) 1904 { 1905 zfs_cmd_t zc = {"\0"}; 1906 1907 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1908 zc.zc_cookie = force; 1909 zc.zc_guid = hardforce; 1910 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1911 1912 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1913 switch (errno) { 1914 case EXDEV: 1915 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1916 "use '-f' to override the following errors:\n" 1917 "'%s' has an active shared spare which could be" 1918 " used by other pools once '%s' is exported."), 1919 zhp->zpool_name, zhp->zpool_name); 1920 return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1921 dgettext(TEXT_DOMAIN, "cannot export '%s'"), 1922 zhp->zpool_name)); 1923 default: 1924 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1925 dgettext(TEXT_DOMAIN, "cannot export '%s'"), 1926 zhp->zpool_name)); 1927 } 1928 } 1929 1930 return (0); 1931 } 1932 1933 int 1934 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1935 { 1936 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1937 } 1938 1939 int 1940 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1941 { 1942 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1943 } 1944 1945 static void 1946 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1947 nvlist_t *config) 1948 { 1949 nvlist_t *nv = NULL; 1950 uint64_t rewindto; 1951 int64_t loss = -1; 1952 struct tm t; 1953 char timestr[128]; 1954 1955 if (!hdl->libzfs_printerr || config == NULL) 1956 return; 1957 1958 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1959 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1960 return; 1961 } 1962 1963 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1964 return; 1965 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1966 1967 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1968 ctime_r((time_t *)&rewindto, timestr) != NULL) { 1969 timestr[24] = 0; 1970 if (dryrun) { 1971 (void) printf(dgettext(TEXT_DOMAIN, 1972 "Would be able to return %s " 1973 "to its state as of %s.\n"), 1974 name, timestr); 1975 } else { 1976 (void) printf(dgettext(TEXT_DOMAIN, 1977 "Pool %s returned to its state as of %s.\n"), 1978 name, timestr); 1979 } 1980 if (loss > 120) { 1981 (void) printf(dgettext(TEXT_DOMAIN, 1982 "%s approximately %lld "), 1983 dryrun ? "Would discard" : "Discarded", 1984 ((longlong_t)loss + 30) / 60); 1985 (void) printf(dgettext(TEXT_DOMAIN, 1986 "minutes of transactions.\n")); 1987 } else if (loss > 0) { 1988 (void) printf(dgettext(TEXT_DOMAIN, 1989 "%s approximately %lld "), 1990 dryrun ? "Would discard" : "Discarded", 1991 (longlong_t)loss); 1992 (void) printf(dgettext(TEXT_DOMAIN, 1993 "seconds of transactions.\n")); 1994 } 1995 } 1996 } 1997 1998 void 1999 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 2000 nvlist_t *config, char *buf, size_t size) 2001 { 2002 nvlist_t *nv = NULL; 2003 int64_t loss = -1; 2004 uint64_t edata = UINT64_MAX; 2005 uint64_t rewindto; 2006 struct tm t; 2007 char timestr[128], temp[1024]; 2008 2009 if (!hdl->libzfs_printerr) 2010 return; 2011 2012 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 2013 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 2014 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 2015 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 2016 goto no_info; 2017 2018 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 2019 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 2020 &edata); 2021 2022 (void) snprintf(buf, size, dgettext(TEXT_DOMAIN, 2023 "Recovery is possible, but will result in some data loss.\n")); 2024 2025 if (localtime_r((time_t *)&rewindto, &t) != NULL && 2026 ctime_r((time_t *)&rewindto, timestr) != NULL) { 2027 timestr[24] = 0; 2028 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, 2029 "\tReturning the pool to its state as of %s\n" 2030 "\tshould correct the problem. "), timestr); 2031 (void) strlcat(buf, temp, size); 2032 } else { 2033 (void) strlcat(buf, dgettext(TEXT_DOMAIN, 2034 "\tReverting the pool to an earlier state " 2035 "should correct the problem.\n\t"), size); 2036 } 2037 2038 if (loss > 120) { 2039 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, 2040 "Approximately %lld minutes of data\n" 2041 "\tmust be discarded, irreversibly. "), 2042 ((longlong_t)loss + 30) / 60); 2043 (void) strlcat(buf, temp, size); 2044 } else if (loss > 0) { 2045 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, 2046 "Approximately %lld seconds of data\n" 2047 "\tmust be discarded, irreversibly. "), 2048 (longlong_t)loss); 2049 (void) strlcat(buf, temp, size); 2050 } 2051 if (edata != 0 && edata != UINT64_MAX) { 2052 if (edata == 1) { 2053 (void) strlcat(buf, dgettext(TEXT_DOMAIN, 2054 "After rewind, at least\n" 2055 "\tone persistent user-data error will remain. "), 2056 size); 2057 } else { 2058 (void) strlcat(buf, dgettext(TEXT_DOMAIN, 2059 "After rewind, several\n" 2060 "\tpersistent user-data errors will remain. "), 2061 size); 2062 } 2063 } 2064 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, 2065 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 2066 reason >= 0 ? "clear" : "import", name); 2067 (void) strlcat(buf, temp, size); 2068 2069 (void) strlcat(buf, dgettext(TEXT_DOMAIN, 2070 "A scrub of the pool\n" 2071 "\tis strongly recommended after recovery.\n"), size); 2072 return; 2073 2074 no_info: 2075 (void) strlcat(buf, dgettext(TEXT_DOMAIN, 2076 "Destroy and re-create the pool from\n\ta backup source.\n"), size); 2077 } 2078 2079 /* 2080 * zpool_import() is a contracted interface. Should be kept the same 2081 * if possible. 2082 * 2083 * Applications should use zpool_import_props() to import a pool with 2084 * new properties value to be set. 2085 */ 2086 int 2087 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 2088 char *altroot) 2089 { 2090 nvlist_t *props = NULL; 2091 int ret; 2092 2093 if (altroot != NULL) { 2094 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 2095 return (zfs_error_fmt(hdl, EZFS_NOMEM, 2096 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 2097 newname)); 2098 } 2099 2100 if (nvlist_add_string(props, 2101 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 2102 nvlist_add_string(props, 2103 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 2104 nvlist_free(props); 2105 return (zfs_error_fmt(hdl, EZFS_NOMEM, 2106 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 2107 newname)); 2108 } 2109 } 2110 2111 ret = zpool_import_props(hdl, config, newname, props, 2112 ZFS_IMPORT_NORMAL); 2113 nvlist_free(props); 2114 return (ret); 2115 } 2116 2117 static void 2118 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 2119 int indent) 2120 { 2121 nvlist_t **child; 2122 uint_t c, children; 2123 char *vname; 2124 uint64_t is_log = 0; 2125 2126 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 2127 &is_log); 2128 2129 if (name != NULL) 2130 (void) printf("\t%*s%s%s\n", indent, "", name, 2131 is_log ? " [log]" : ""); 2132 2133 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2134 &child, &children) != 0) 2135 return; 2136 2137 for (c = 0; c < children; c++) { 2138 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID); 2139 print_vdev_tree(hdl, vname, child[c], indent + 2); 2140 free(vname); 2141 } 2142 } 2143 2144 void 2145 zpool_collect_unsup_feat(nvlist_t *config, char *buf, size_t size) 2146 { 2147 nvlist_t *nvinfo, *unsup_feat; 2148 char temp[512]; 2149 2150 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 2151 unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT); 2152 2153 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); 2154 nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 2155 const char *desc = fnvpair_value_string(nvp); 2156 if (strlen(desc) > 0) { 2157 (void) snprintf(temp, 512, "\t%s (%s)\n", 2158 nvpair_name(nvp), desc); 2159 (void) strlcat(buf, temp, size); 2160 } else { 2161 (void) snprintf(temp, 512, "\t%s\n", nvpair_name(nvp)); 2162 (void) strlcat(buf, temp, size); 2163 } 2164 } 2165 } 2166 2167 /* 2168 * Import the given pool using the known configuration and a list of 2169 * properties to be set. The configuration should have come from 2170 * zpool_find_import(). The 'newname' parameters control whether the pool 2171 * is imported with a different name. 2172 */ 2173 int 2174 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 2175 nvlist_t *props, int flags) 2176 { 2177 zfs_cmd_t zc = {"\0"}; 2178 zpool_load_policy_t policy; 2179 nvlist_t *nv = NULL; 2180 nvlist_t *nvinfo = NULL; 2181 nvlist_t *missing = NULL; 2182 const char *thename; 2183 const char *origname; 2184 int ret; 2185 int error = 0; 2186 char buf[2048]; 2187 char errbuf[ERRBUFLEN]; 2188 2189 origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME); 2190 2191 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 2192 "cannot import pool '%s'"), origname); 2193 2194 if (newname != NULL) { 2195 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2196 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 2197 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 2198 newname)); 2199 thename = newname; 2200 } else { 2201 thename = origname; 2202 } 2203 2204 if (props != NULL) { 2205 uint64_t version; 2206 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2207 2208 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION); 2209 2210 if ((props = zpool_valid_proplist(hdl, origname, 2211 props, version, flags, errbuf)) == NULL) 2212 return (-1); 2213 zcmd_write_src_nvlist(hdl, &zc, props); 2214 nvlist_free(props); 2215 } 2216 2217 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 2218 2219 zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID); 2220 2221 zcmd_write_conf_nvlist(hdl, &zc, config); 2222 zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2); 2223 2224 zc.zc_cookie = flags; 2225 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 2226 errno == ENOMEM) 2227 zcmd_expand_dst_nvlist(hdl, &zc); 2228 if (ret != 0) 2229 error = errno; 2230 2231 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 2232 2233 zcmd_free_nvlists(&zc); 2234 2235 zpool_get_load_policy(config, &policy); 2236 2237 if (error) { 2238 char desc[1024]; 2239 char aux[256]; 2240 2241 /* 2242 * Dry-run failed, but we print out what success 2243 * looks like if we found a best txg 2244 */ 2245 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) { 2246 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2247 B_TRUE, nv); 2248 nvlist_free(nv); 2249 return (-1); 2250 } 2251 2252 if (newname == NULL) 2253 (void) snprintf(desc, sizeof (desc), 2254 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 2255 thename); 2256 else 2257 (void) snprintf(desc, sizeof (desc), 2258 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 2259 origname, thename); 2260 2261 switch (error) { 2262 case ENOTSUP: 2263 if (nv != NULL && nvlist_lookup_nvlist(nv, 2264 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 2265 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 2266 (void) printf(dgettext(TEXT_DOMAIN, "This " 2267 "pool uses the following feature(s) not " 2268 "supported by this system:\n")); 2269 memset(buf, 0, 2048); 2270 zpool_collect_unsup_feat(nv, buf, 2048); 2271 (void) printf("%s", buf); 2272 if (nvlist_exists(nvinfo, 2273 ZPOOL_CONFIG_CAN_RDONLY)) { 2274 (void) printf(dgettext(TEXT_DOMAIN, 2275 "All unsupported features are only " 2276 "required for writing to the pool." 2277 "\nThe pool can be imported using " 2278 "'-o readonly=on'.\n")); 2279 } 2280 } 2281 /* 2282 * Unsupported version. 2283 */ 2284 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 2285 break; 2286 2287 case EREMOTEIO: 2288 if (nv != NULL && nvlist_lookup_nvlist(nv, 2289 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) { 2290 const char *hostname = "<unknown>"; 2291 uint64_t hostid = 0; 2292 mmp_state_t mmp_state; 2293 2294 mmp_state = fnvlist_lookup_uint64(nvinfo, 2295 ZPOOL_CONFIG_MMP_STATE); 2296 2297 if (nvlist_exists(nvinfo, 2298 ZPOOL_CONFIG_MMP_HOSTNAME)) 2299 hostname = fnvlist_lookup_string(nvinfo, 2300 ZPOOL_CONFIG_MMP_HOSTNAME); 2301 2302 if (nvlist_exists(nvinfo, 2303 ZPOOL_CONFIG_MMP_HOSTID)) 2304 hostid = fnvlist_lookup_uint64(nvinfo, 2305 ZPOOL_CONFIG_MMP_HOSTID); 2306 2307 if (mmp_state == MMP_STATE_ACTIVE) { 2308 (void) snprintf(aux, sizeof (aux), 2309 dgettext(TEXT_DOMAIN, "pool is imp" 2310 "orted on host '%s' (hostid=%lx).\n" 2311 "Export the pool on the other " 2312 "system, then run 'zpool import'."), 2313 hostname, (unsigned long) hostid); 2314 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 2315 (void) snprintf(aux, sizeof (aux), 2316 dgettext(TEXT_DOMAIN, "pool has " 2317 "the multihost property on and " 2318 "the\nsystem's hostid is not set. " 2319 "Set a unique system hostid with " 2320 "the zgenhostid(8) command.\n")); 2321 } 2322 2323 (void) zfs_error_aux(hdl, "%s", aux); 2324 } 2325 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc); 2326 break; 2327 2328 case EINVAL: 2329 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 2330 break; 2331 2332 case EROFS: 2333 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2334 "one or more devices is read only")); 2335 (void) zfs_error(hdl, EZFS_BADDEV, desc); 2336 break; 2337 2338 case ENXIO: 2339 if (nv && nvlist_lookup_nvlist(nv, 2340 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 2341 nvlist_lookup_nvlist(nvinfo, 2342 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 2343 (void) printf(dgettext(TEXT_DOMAIN, 2344 "The devices below are missing or " 2345 "corrupted, use '-m' to import the pool " 2346 "anyway:\n")); 2347 print_vdev_tree(hdl, NULL, missing, 2); 2348 (void) printf("\n"); 2349 } 2350 (void) zpool_standard_error(hdl, error, desc); 2351 break; 2352 2353 case EEXIST: 2354 (void) zpool_standard_error(hdl, error, desc); 2355 break; 2356 2357 case EBUSY: 2358 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2359 "one or more devices are already in use\n")); 2360 (void) zfs_error(hdl, EZFS_BADDEV, desc); 2361 break; 2362 case ENAMETOOLONG: 2363 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2364 "new name of at least one dataset is longer than " 2365 "the maximum allowable length")); 2366 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 2367 break; 2368 default: 2369 (void) zpool_standard_error(hdl, error, desc); 2370 memset(buf, 0, 2048); 2371 zpool_explain_recover(hdl, 2372 newname ? origname : thename, -error, nv, 2373 buf, 2048); 2374 (void) printf("\t%s", buf); 2375 break; 2376 } 2377 2378 nvlist_free(nv); 2379 ret = -1; 2380 } else { 2381 zpool_handle_t *zhp; 2382 2383 /* 2384 * This should never fail, but play it safe anyway. 2385 */ 2386 if (zpool_open_silent(hdl, thename, &zhp) != 0) 2387 ret = -1; 2388 else if (zhp != NULL) 2389 zpool_close(zhp); 2390 if (policy.zlp_rewind & 2391 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2392 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2393 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv); 2394 } 2395 nvlist_free(nv); 2396 } 2397 2398 return (ret); 2399 } 2400 2401 /* 2402 * Translate vdev names to guids. If a vdev_path is determined to be 2403 * unsuitable then a vd_errlist is allocated and the vdev path and errno 2404 * are added to it. 2405 */ 2406 static int 2407 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds, 2408 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist) 2409 { 2410 nvlist_t *errlist = NULL; 2411 int error = 0; 2412 2413 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL; 2414 elem = nvlist_next_nvpair(vds, elem)) { 2415 boolean_t spare, cache; 2416 2417 const char *vd_path = nvpair_name(elem); 2418 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache, 2419 NULL); 2420 2421 if ((tgt == NULL) || cache || spare) { 2422 if (errlist == NULL) { 2423 errlist = fnvlist_alloc(); 2424 error = EINVAL; 2425 } 2426 2427 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE : 2428 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE); 2429 fnvlist_add_int64(errlist, vd_path, err); 2430 continue; 2431 } 2432 2433 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 2434 fnvlist_add_uint64(vdev_guids, vd_path, guid); 2435 2436 char msg[MAXNAMELEN]; 2437 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid); 2438 fnvlist_add_string(guids_to_paths, msg, vd_path); 2439 } 2440 2441 if (error != 0) { 2442 verify(errlist != NULL); 2443 if (vd_errlist != NULL) 2444 *vd_errlist = errlist; 2445 else 2446 fnvlist_free(errlist); 2447 } 2448 2449 return (error); 2450 } 2451 2452 static int 2453 xlate_init_err(int err) 2454 { 2455 switch (err) { 2456 case ENODEV: 2457 return (EZFS_NODEVICE); 2458 case EINVAL: 2459 case EROFS: 2460 return (EZFS_BADDEV); 2461 case EBUSY: 2462 return (EZFS_INITIALIZING); 2463 case ESRCH: 2464 return (EZFS_NO_INITIALIZE); 2465 } 2466 return (err); 2467 } 2468 2469 /* 2470 * Begin, suspend, cancel, or uninit (clear) the initialization (initializing 2471 * of all free blocks) for the given vdevs in the given pool. 2472 */ 2473 static int 2474 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2475 nvlist_t *vds, boolean_t wait) 2476 { 2477 int err; 2478 2479 nvlist_t *vdev_guids = fnvlist_alloc(); 2480 nvlist_t *guids_to_paths = fnvlist_alloc(); 2481 nvlist_t *vd_errlist = NULL; 2482 nvlist_t *errlist; 2483 nvpair_t *elem; 2484 2485 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2486 guids_to_paths, &vd_errlist); 2487 2488 if (err != 0) { 2489 verify(vd_errlist != NULL); 2490 goto list_errors; 2491 } 2492 2493 err = lzc_initialize(zhp->zpool_name, cmd_type, 2494 vdev_guids, &errlist); 2495 2496 if (err != 0) { 2497 if (errlist != NULL && nvlist_lookup_nvlist(errlist, 2498 ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) { 2499 goto list_errors; 2500 } 2501 2502 if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) { 2503 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 2504 "uninitialize is not supported by kernel")); 2505 } 2506 2507 (void) zpool_standard_error(zhp->zpool_hdl, err, 2508 dgettext(TEXT_DOMAIN, "operation failed")); 2509 goto out; 2510 } 2511 2512 if (wait) { 2513 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL; 2514 elem = nvlist_next_nvpair(vdev_guids, elem)) { 2515 2516 uint64_t guid = fnvpair_value_uint64(elem); 2517 2518 err = lzc_wait_tag(zhp->zpool_name, 2519 ZPOOL_WAIT_INITIALIZE, guid, NULL); 2520 if (err != 0) { 2521 (void) zpool_standard_error_fmt(zhp->zpool_hdl, 2522 err, dgettext(TEXT_DOMAIN, "error " 2523 "waiting for '%s' to initialize"), 2524 nvpair_name(elem)); 2525 2526 goto out; 2527 } 2528 } 2529 } 2530 goto out; 2531 2532 list_errors: 2533 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL; 2534 elem = nvlist_next_nvpair(vd_errlist, elem)) { 2535 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem)); 2536 const char *path; 2537 2538 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2539 &path) != 0) 2540 path = nvpair_name(elem); 2541 2542 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2543 "cannot initialize '%s'", path); 2544 } 2545 2546 out: 2547 fnvlist_free(vdev_guids); 2548 fnvlist_free(guids_to_paths); 2549 2550 if (vd_errlist != NULL) 2551 fnvlist_free(vd_errlist); 2552 2553 return (err == 0 ? 0 : -1); 2554 } 2555 2556 int 2557 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2558 nvlist_t *vds) 2559 { 2560 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE)); 2561 } 2562 2563 int 2564 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2565 nvlist_t *vds) 2566 { 2567 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE)); 2568 } 2569 2570 static int 2571 xlate_trim_err(int err) 2572 { 2573 switch (err) { 2574 case ENODEV: 2575 return (EZFS_NODEVICE); 2576 case EINVAL: 2577 case EROFS: 2578 return (EZFS_BADDEV); 2579 case EBUSY: 2580 return (EZFS_TRIMMING); 2581 case ESRCH: 2582 return (EZFS_NO_TRIM); 2583 case EOPNOTSUPP: 2584 return (EZFS_TRIM_NOTSUP); 2585 } 2586 return (err); 2587 } 2588 2589 static int 2590 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids) 2591 { 2592 int err; 2593 nvpair_t *elem; 2594 2595 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL; 2596 elem = nvlist_next_nvpair(vdev_guids, elem)) { 2597 2598 uint64_t guid = fnvpair_value_uint64(elem); 2599 2600 err = lzc_wait_tag(zhp->zpool_name, 2601 ZPOOL_WAIT_TRIM, guid, NULL); 2602 if (err != 0) { 2603 (void) zpool_standard_error_fmt(zhp->zpool_hdl, 2604 err, dgettext(TEXT_DOMAIN, "error " 2605 "waiting to trim '%s'"), nvpair_name(elem)); 2606 2607 return (err); 2608 } 2609 } 2610 return (0); 2611 } 2612 2613 /* 2614 * Check errlist and report any errors, omitting ones which should be 2615 * suppressed. Returns B_TRUE if any errors were reported. 2616 */ 2617 static boolean_t 2618 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags, 2619 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist) 2620 { 2621 nvpair_t *elem; 2622 boolean_t reported_errs = B_FALSE; 2623 int num_vds = 0; 2624 int num_suppressed_errs = 0; 2625 2626 for (elem = nvlist_next_nvpair(vds, NULL); 2627 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) { 2628 num_vds++; 2629 } 2630 2631 for (elem = nvlist_next_nvpair(errlist, NULL); 2632 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) { 2633 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem)); 2634 const char *path; 2635 2636 /* 2637 * If only the pool was specified, and it was not a secure 2638 * trim then suppress warnings for individual vdevs which 2639 * do not support trimming. 2640 */ 2641 if (vd_error == EZFS_TRIM_NOTSUP && 2642 trim_flags->fullpool && 2643 !trim_flags->secure) { 2644 num_suppressed_errs++; 2645 continue; 2646 } 2647 2648 reported_errs = B_TRUE; 2649 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2650 &path) != 0) 2651 path = nvpair_name(elem); 2652 2653 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2654 "cannot trim '%s'", path); 2655 } 2656 2657 if (num_suppressed_errs == num_vds) { 2658 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 2659 "no devices in pool support trim operations")); 2660 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP, 2661 dgettext(TEXT_DOMAIN, "cannot trim"))); 2662 reported_errs = B_TRUE; 2663 } 2664 2665 return (reported_errs); 2666 } 2667 2668 /* 2669 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for 2670 * the given vdevs in the given pool. 2671 */ 2672 int 2673 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds, 2674 trimflags_t *trim_flags) 2675 { 2676 int err; 2677 int retval = 0; 2678 2679 nvlist_t *vdev_guids = fnvlist_alloc(); 2680 nvlist_t *guids_to_paths = fnvlist_alloc(); 2681 nvlist_t *errlist = NULL; 2682 2683 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2684 guids_to_paths, &errlist); 2685 if (err != 0) { 2686 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist); 2687 retval = -1; 2688 goto out; 2689 } 2690 2691 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate, 2692 trim_flags->secure, vdev_guids, &errlist); 2693 if (err != 0) { 2694 nvlist_t *vd_errlist; 2695 if (errlist != NULL && nvlist_lookup_nvlist(errlist, 2696 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) { 2697 if (check_trim_errs(zhp, trim_flags, guids_to_paths, 2698 vds, vd_errlist)) { 2699 retval = -1; 2700 goto out; 2701 } 2702 } else { 2703 char errbuf[ERRBUFLEN]; 2704 2705 (void) snprintf(errbuf, sizeof (errbuf), 2706 dgettext(TEXT_DOMAIN, "operation failed")); 2707 zpool_standard_error(zhp->zpool_hdl, err, errbuf); 2708 retval = -1; 2709 goto out; 2710 } 2711 } 2712 2713 2714 if (trim_flags->wait) 2715 retval = zpool_trim_wait(zhp, vdev_guids); 2716 2717 out: 2718 if (errlist != NULL) 2719 fnvlist_free(errlist); 2720 fnvlist_free(vdev_guids); 2721 fnvlist_free(guids_to_paths); 2722 return (retval); 2723 } 2724 2725 /* 2726 * Scan the pool. 2727 */ 2728 int 2729 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) 2730 { 2731 char errbuf[ERRBUFLEN]; 2732 int err; 2733 libzfs_handle_t *hdl = zhp->zpool_hdl; 2734 2735 nvlist_t *args = fnvlist_alloc(); 2736 fnvlist_add_uint64(args, "scan_type", (uint64_t)func); 2737 fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd); 2738 2739 err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL); 2740 fnvlist_free(args); 2741 2742 if (err == 0) { 2743 return (0); 2744 } else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) { 2745 zfs_cmd_t zc = {"\0"}; 2746 (void) strlcpy(zc.zc_name, zhp->zpool_name, 2747 sizeof (zc.zc_name)); 2748 zc.zc_cookie = func; 2749 zc.zc_flags = cmd; 2750 2751 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0) 2752 return (0); 2753 } 2754 2755 /* 2756 * An ECANCELED on a scrub means one of the following: 2757 * 1. we resumed a paused scrub. 2758 * 2. we resumed a paused error scrub. 2759 * 3. Error scrub is not run because of no error log. 2760 */ 2761 if (err == ECANCELED && (func == POOL_SCAN_SCRUB || 2762 func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL) 2763 return (0); 2764 /* 2765 * The following cases have been handled here: 2766 * 1. Paused a scrub/error scrub if there is none in progress. 2767 */ 2768 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == 2769 POOL_SCRUB_PAUSE) { 2770 return (0); 2771 } 2772 2773 ASSERT3U(func, >=, POOL_SCAN_NONE); 2774 ASSERT3U(func, <, POOL_SCAN_FUNCS); 2775 2776 if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) { 2777 if (cmd == POOL_SCRUB_PAUSE) { 2778 (void) snprintf(errbuf, sizeof (errbuf), 2779 dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"), 2780 zhp->zpool_name); 2781 } else { 2782 assert(cmd == POOL_SCRUB_NORMAL); 2783 (void) snprintf(errbuf, sizeof (errbuf), 2784 dgettext(TEXT_DOMAIN, "cannot scrub %s"), 2785 zhp->zpool_name); 2786 } 2787 } else if (func == POOL_SCAN_RESILVER) { 2788 assert(cmd == POOL_SCRUB_NORMAL); 2789 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 2790 "cannot restart resilver on %s"), zhp->zpool_name); 2791 } else if (func == POOL_SCAN_NONE) { 2792 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 2793 "cannot cancel scrubbing %s"), zhp->zpool_name); 2794 } else { 2795 assert(!"unexpected result"); 2796 } 2797 2798 /* 2799 * With EBUSY, five cases are possible: 2800 * 2801 * Current state Requested 2802 * 1. Normal Scrub Running Normal Scrub or Error Scrub 2803 * 2. Normal Scrub Paused Error Scrub 2804 * 3. Normal Scrub Paused Pause Normal Scrub 2805 * 4. Error Scrub Running Normal Scrub or Error Scrub 2806 * 5. Error Scrub Paused Pause Error Scrub 2807 * 6. Resilvering Anything else 2808 */ 2809 if (err == EBUSY) { 2810 nvlist_t *nvroot; 2811 pool_scan_stat_t *ps = NULL; 2812 uint_t psc; 2813 2814 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config, 2815 ZPOOL_CONFIG_VDEV_TREE); 2816 (void) nvlist_lookup_uint64_array(nvroot, 2817 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 2818 if (ps && ps->pss_func == POOL_SCAN_SCRUB && 2819 ps->pss_state == DSS_SCANNING) { 2820 if (ps->pss_pass_scrub_pause == 0) { 2821 /* handles case 1 */ 2822 assert(cmd == POOL_SCRUB_NORMAL); 2823 return (zfs_error(hdl, EZFS_SCRUBBING, 2824 errbuf)); 2825 } else { 2826 if (func == POOL_SCAN_ERRORSCRUB) { 2827 /* handles case 2 */ 2828 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL); 2829 return (zfs_error(hdl, 2830 EZFS_SCRUB_PAUSED_TO_CANCEL, 2831 errbuf)); 2832 } else { 2833 /* handles case 3 */ 2834 ASSERT3U(func, ==, POOL_SCAN_SCRUB); 2835 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE); 2836 return (zfs_error(hdl, 2837 EZFS_SCRUB_PAUSED, errbuf)); 2838 } 2839 } 2840 } else if (ps && 2841 ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB && 2842 ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) { 2843 if (ps->pss_pass_error_scrub_pause == 0) { 2844 /* handles case 4 */ 2845 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL); 2846 return (zfs_error(hdl, EZFS_ERRORSCRUBBING, 2847 errbuf)); 2848 } else { 2849 /* handles case 5 */ 2850 ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB); 2851 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE); 2852 return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED, 2853 errbuf)); 2854 } 2855 } else { 2856 /* handles case 6 */ 2857 return (zfs_error(hdl, EZFS_RESILVERING, errbuf)); 2858 } 2859 } else if (err == ENOENT) { 2860 return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf)); 2861 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) { 2862 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf)); 2863 } else { 2864 return (zpool_standard_error(hdl, err, errbuf)); 2865 } 2866 } 2867 2868 /* 2869 * Find a vdev that matches the search criteria specified. We use the 2870 * the nvpair name to determine how we should look for the device. 2871 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 2872 * spare; but FALSE if its an INUSE spare. 2873 * 2874 * If 'return_parent' is set, then return the *parent* of the vdev you're 2875 * searching for rather than the vdev itself. 2876 */ 2877 static nvlist_t * 2878 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 2879 boolean_t *l2cache, boolean_t *log, boolean_t return_parent) 2880 { 2881 uint_t c, children; 2882 nvlist_t **child; 2883 nvlist_t *ret; 2884 uint64_t is_log; 2885 const char *srchkey; 2886 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 2887 const char *tmp = NULL; 2888 boolean_t is_root; 2889 2890 /* Nothing to look for */ 2891 if (search == NULL || pair == NULL) 2892 return (NULL); 2893 2894 /* Obtain the key we will use to search */ 2895 srchkey = nvpair_name(pair); 2896 2897 nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &tmp); 2898 if (strcmp(tmp, "root") == 0) 2899 is_root = B_TRUE; 2900 else 2901 is_root = B_FALSE; 2902 2903 switch (nvpair_type(pair)) { 2904 case DATA_TYPE_UINT64: 2905 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 2906 uint64_t srchval = fnvpair_value_uint64(pair); 2907 uint64_t theguid = fnvlist_lookup_uint64(nv, 2908 ZPOOL_CONFIG_GUID); 2909 if (theguid == srchval) 2910 return (nv); 2911 } 2912 break; 2913 2914 case DATA_TYPE_STRING: { 2915 const char *srchval, *val; 2916 2917 srchval = fnvpair_value_string(pair); 2918 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 2919 break; 2920 2921 /* 2922 * Search for the requested value. Special cases: 2923 * 2924 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 2925 * "-part1", or "p1". The suffix is hidden from the user, 2926 * but included in the string, so this matches around it. 2927 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname() 2928 * is used to check all possible expanded paths. 2929 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2930 * 2931 * Otherwise, all other searches are simple string compares. 2932 */ 2933 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) { 2934 uint64_t wholedisk = 0; 2935 2936 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2937 &wholedisk); 2938 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0) 2939 return (nv); 2940 2941 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) { 2942 char *type, *idx, *end, *p; 2943 uint64_t id, vdev_id; 2944 2945 /* 2946 * Determine our vdev type, keeping in mind 2947 * that the srchval is composed of a type and 2948 * vdev id pair (i.e. mirror-4). 2949 */ 2950 if ((type = strdup(srchval)) == NULL) 2951 return (NULL); 2952 2953 if ((p = strrchr(type, '-')) == NULL) { 2954 free(type); 2955 break; 2956 } 2957 idx = p + 1; 2958 *p = '\0'; 2959 2960 /* 2961 * If the types don't match then keep looking. 2962 */ 2963 if (strncmp(val, type, strlen(val)) != 0) { 2964 free(type); 2965 break; 2966 } 2967 2968 verify(zpool_vdev_is_interior(type)); 2969 2970 id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID); 2971 errno = 0; 2972 vdev_id = strtoull(idx, &end, 10); 2973 2974 /* 2975 * If we are looking for a raidz and a parity is 2976 * specified, make sure it matches. 2977 */ 2978 int rzlen = strlen(VDEV_TYPE_RAIDZ); 2979 assert(rzlen == strlen(VDEV_TYPE_DRAID)); 2980 int typlen = strlen(type); 2981 if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 || 2982 strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) && 2983 typlen != rzlen) { 2984 uint64_t vdev_parity; 2985 int parity = *(type + rzlen) - '0'; 2986 2987 if (parity <= 0 || parity > 3 || 2988 (typlen - rzlen) != 1) { 2989 /* 2990 * Nonsense parity specified, can 2991 * never match 2992 */ 2993 free(type); 2994 return (NULL); 2995 } 2996 vdev_parity = fnvlist_lookup_uint64(nv, 2997 ZPOOL_CONFIG_NPARITY); 2998 if ((int)vdev_parity != parity) { 2999 free(type); 3000 break; 3001 } 3002 } 3003 3004 free(type); 3005 if (errno != 0) 3006 return (NULL); 3007 3008 /* 3009 * Now verify that we have the correct vdev id. 3010 */ 3011 if (vdev_id == id) 3012 return (nv); 3013 } 3014 3015 /* 3016 * Common case 3017 */ 3018 if (strcmp(srchval, val) == 0) 3019 return (nv); 3020 break; 3021 } 3022 3023 default: 3024 break; 3025 } 3026 3027 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 3028 &child, &children) != 0) 3029 return (NULL); 3030 3031 for (c = 0; c < children; c++) { 3032 if ((ret = vdev_to_nvlist_iter(child[c], search, 3033 avail_spare, l2cache, NULL, return_parent)) != NULL) { 3034 /* 3035 * The 'is_log' value is only set for the toplevel 3036 * vdev, not the leaf vdevs. So we always lookup the 3037 * log device from the root of the vdev tree (where 3038 * 'log' is non-NULL). 3039 */ 3040 if (log != NULL && 3041 nvlist_lookup_uint64(child[c], 3042 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 3043 is_log) { 3044 *log = B_TRUE; 3045 } 3046 return (ret && return_parent && !is_root ? nv : ret); 3047 } 3048 } 3049 3050 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 3051 &child, &children) == 0) { 3052 for (c = 0; c < children; c++) { 3053 if ((ret = vdev_to_nvlist_iter(child[c], search, 3054 avail_spare, l2cache, NULL, return_parent)) 3055 != NULL) { 3056 *avail_spare = B_TRUE; 3057 return (ret && return_parent && 3058 !is_root ? nv : ret); 3059 } 3060 } 3061 } 3062 3063 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 3064 &child, &children) == 0) { 3065 for (c = 0; c < children; c++) { 3066 if ((ret = vdev_to_nvlist_iter(child[c], search, 3067 avail_spare, l2cache, NULL, return_parent)) 3068 != NULL) { 3069 *l2cache = B_TRUE; 3070 return (ret && return_parent && 3071 !is_root ? nv : ret); 3072 } 3073 } 3074 } 3075 3076 return (NULL); 3077 } 3078 3079 /* 3080 * Given a physical path or guid, find the associated vdev. 3081 */ 3082 nvlist_t * 3083 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 3084 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 3085 { 3086 nvlist_t *search, *nvroot, *ret; 3087 uint64_t guid; 3088 char *end; 3089 3090 search = fnvlist_alloc(); 3091 3092 guid = strtoull(ppath, &end, 0); 3093 if (guid != 0 && *end == '\0') { 3094 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid); 3095 } else { 3096 fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath); 3097 } 3098 3099 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config, 3100 ZPOOL_CONFIG_VDEV_TREE); 3101 3102 *avail_spare = B_FALSE; 3103 *l2cache = B_FALSE; 3104 if (log != NULL) 3105 *log = B_FALSE; 3106 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log, 3107 B_FALSE); 3108 fnvlist_free(search); 3109 3110 return (ret); 3111 } 3112 3113 /* 3114 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 3115 */ 3116 static boolean_t 3117 zpool_vdev_is_interior(const char *name) 3118 { 3119 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 3120 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || 3121 strncmp(name, 3122 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || 3123 strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 || 3124 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 3125 return (B_TRUE); 3126 3127 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 && 3128 !zpool_is_draid_spare(name)) 3129 return (B_TRUE); 3130 3131 return (B_FALSE); 3132 } 3133 3134 /* 3135 * Lookup the nvlist for a given vdev or vdev's parent (depending on 3136 * if 'return_parent' is set). 3137 */ 3138 static nvlist_t * 3139 __zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 3140 boolean_t *l2cache, boolean_t *log, boolean_t return_parent) 3141 { 3142 char *end; 3143 nvlist_t *nvroot, *search, *ret; 3144 uint64_t guid; 3145 boolean_t __avail_spare, __l2cache, __log; 3146 3147 search = fnvlist_alloc(); 3148 3149 guid = strtoull(path, &end, 0); 3150 if (guid != 0 && *end == '\0') { 3151 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid); 3152 } else if (zpool_vdev_is_interior(path)) { 3153 fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path); 3154 } else { 3155 fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path); 3156 } 3157 3158 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config, 3159 ZPOOL_CONFIG_VDEV_TREE); 3160 3161 /* 3162 * User can pass NULL for avail_spare, l2cache, and log, but 3163 * we still need to provide variables to vdev_to_nvlist_iter(), so 3164 * just point them to junk variables here. 3165 */ 3166 if (!avail_spare) 3167 avail_spare = &__avail_spare; 3168 if (!l2cache) 3169 l2cache = &__l2cache; 3170 if (!log) 3171 log = &__log; 3172 3173 *avail_spare = B_FALSE; 3174 *l2cache = B_FALSE; 3175 if (log != NULL) 3176 *log = B_FALSE; 3177 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log, 3178 return_parent); 3179 fnvlist_free(search); 3180 3181 return (ret); 3182 } 3183 3184 nvlist_t * 3185 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 3186 boolean_t *l2cache, boolean_t *log) 3187 { 3188 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log, 3189 B_FALSE)); 3190 } 3191 3192 /* Given a vdev path, return its parent's nvlist */ 3193 nvlist_t * 3194 zpool_find_parent_vdev(zpool_handle_t *zhp, const char *path, 3195 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 3196 { 3197 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log, 3198 B_TRUE)); 3199 } 3200 3201 /* 3202 * Convert a vdev path to a GUID. Returns GUID or 0 on error. 3203 * 3204 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it 3205 * if the VDEV is a spare, l2cache, or log device. If they're NULL then 3206 * ignore them. 3207 */ 3208 static uint64_t 3209 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path, 3210 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log) 3211 { 3212 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE; 3213 nvlist_t *tgt; 3214 3215 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache, 3216 &log)) == NULL) 3217 return (0); 3218 3219 if (is_spare != NULL) 3220 *is_spare = spare; 3221 if (is_l2cache != NULL) 3222 *is_l2cache = l2cache; 3223 if (is_log != NULL) 3224 *is_log = log; 3225 3226 return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID)); 3227 } 3228 3229 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */ 3230 uint64_t 3231 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path) 3232 { 3233 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL)); 3234 } 3235 3236 /* 3237 * Bring the specified vdev online. The 'flags' parameter is a set of the 3238 * ZFS_ONLINE_* flags. 3239 */ 3240 int 3241 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 3242 vdev_state_t *newstate) 3243 { 3244 zfs_cmd_t zc = {"\0"}; 3245 char errbuf[ERRBUFLEN]; 3246 nvlist_t *tgt; 3247 boolean_t avail_spare, l2cache, islog; 3248 libzfs_handle_t *hdl = zhp->zpool_hdl; 3249 3250 if (flags & ZFS_ONLINE_EXPAND) { 3251 (void) snprintf(errbuf, sizeof (errbuf), 3252 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 3253 } else { 3254 (void) snprintf(errbuf, sizeof (errbuf), 3255 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 3256 } 3257 3258 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3259 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3260 &islog)) == NULL) 3261 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 3262 3263 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3264 3265 if (!(flags & ZFS_ONLINE_SPARE) && avail_spare) 3266 return (zfs_error(hdl, EZFS_ISSPARE, errbuf)); 3267 3268 #ifndef __FreeBSD__ 3269 const char *pathname; 3270 if ((flags & ZFS_ONLINE_EXPAND || 3271 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) && 3272 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) { 3273 uint64_t wholedisk = 0; 3274 3275 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 3276 &wholedisk); 3277 3278 /* 3279 * XXX - L2ARC 1.0 devices can't support expansion. 3280 */ 3281 if (l2cache) { 3282 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3283 "cannot expand cache devices")); 3284 return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf)); 3285 } 3286 3287 if (wholedisk) { 3288 const char *fullpath = path; 3289 char buf[MAXPATHLEN]; 3290 int error; 3291 3292 if (path[0] != '/') { 3293 error = zfs_resolve_shortname(path, buf, 3294 sizeof (buf)); 3295 if (error != 0) 3296 return (zfs_error(hdl, EZFS_NODEVICE, 3297 errbuf)); 3298 3299 fullpath = buf; 3300 } 3301 3302 error = zpool_relabel_disk(hdl, fullpath, errbuf); 3303 if (error != 0) 3304 return (error); 3305 } 3306 } 3307 #endif 3308 3309 zc.zc_cookie = VDEV_STATE_ONLINE; 3310 zc.zc_obj = flags; 3311 3312 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 3313 if (errno == EINVAL) { 3314 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 3315 "from this pool into a new one. Use '%s' " 3316 "instead"), "zpool detach"); 3317 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf)); 3318 } 3319 return (zpool_standard_error(hdl, errno, errbuf)); 3320 } 3321 3322 *newstate = zc.zc_cookie; 3323 return (0); 3324 } 3325 3326 /* 3327 * Take the specified vdev offline 3328 */ 3329 int 3330 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 3331 { 3332 zfs_cmd_t zc = {"\0"}; 3333 char errbuf[ERRBUFLEN]; 3334 nvlist_t *tgt; 3335 boolean_t avail_spare, l2cache; 3336 libzfs_handle_t *hdl = zhp->zpool_hdl; 3337 3338 (void) snprintf(errbuf, sizeof (errbuf), 3339 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 3340 3341 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3342 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3343 NULL)) == NULL) 3344 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 3345 3346 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3347 3348 if (avail_spare) 3349 return (zfs_error(hdl, EZFS_ISSPARE, errbuf)); 3350 3351 zc.zc_cookie = VDEV_STATE_OFFLINE; 3352 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 3353 3354 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3355 return (0); 3356 3357 switch (errno) { 3358 case EBUSY: 3359 3360 /* 3361 * There are no other replicas of this device. 3362 */ 3363 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf)); 3364 3365 case EEXIST: 3366 /* 3367 * The log device has unplayed logs 3368 */ 3369 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf)); 3370 3371 default: 3372 return (zpool_standard_error(hdl, errno, errbuf)); 3373 } 3374 } 3375 3376 /* 3377 * Remove the specified vdev asynchronously from the configuration, so 3378 * that it may come ONLINE if reinserted. This is called from zed on 3379 * Udev remove event. 3380 * Note: We also have a similar function zpool_vdev_remove() that 3381 * removes the vdev from the pool. 3382 */ 3383 int 3384 zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path) 3385 { 3386 zfs_cmd_t zc = {"\0"}; 3387 char errbuf[ERRBUFLEN]; 3388 nvlist_t *tgt; 3389 boolean_t avail_spare, l2cache; 3390 libzfs_handle_t *hdl = zhp->zpool_hdl; 3391 3392 (void) snprintf(errbuf, sizeof (errbuf), 3393 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3394 3395 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3396 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3397 NULL)) == NULL) 3398 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 3399 3400 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3401 3402 zc.zc_cookie = VDEV_STATE_REMOVED; 3403 3404 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3405 return (0); 3406 3407 return (zpool_standard_error(hdl, errno, errbuf)); 3408 } 3409 3410 /* 3411 * Mark the given vdev faulted. 3412 */ 3413 int 3414 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3415 { 3416 zfs_cmd_t zc = {"\0"}; 3417 char errbuf[ERRBUFLEN]; 3418 libzfs_handle_t *hdl = zhp->zpool_hdl; 3419 3420 (void) snprintf(errbuf, sizeof (errbuf), 3421 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid); 3422 3423 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3424 zc.zc_guid = guid; 3425 zc.zc_cookie = VDEV_STATE_FAULTED; 3426 zc.zc_obj = aux; 3427 3428 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3429 return (0); 3430 3431 switch (errno) { 3432 case EBUSY: 3433 3434 /* 3435 * There are no other replicas of this device. 3436 */ 3437 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf)); 3438 3439 default: 3440 return (zpool_standard_error(hdl, errno, errbuf)); 3441 } 3442 3443 } 3444 3445 /* 3446 * Generic set vdev state function 3447 */ 3448 static int 3449 zpool_vdev_set_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux, 3450 vdev_state_t state) 3451 { 3452 zfs_cmd_t zc = {"\0"}; 3453 char errbuf[ERRBUFLEN]; 3454 libzfs_handle_t *hdl = zhp->zpool_hdl; 3455 3456 (void) snprintf(errbuf, sizeof (errbuf), 3457 dgettext(TEXT_DOMAIN, "cannot set %s %llu"), 3458 zpool_state_to_name(state, aux), (u_longlong_t)guid); 3459 3460 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3461 zc.zc_guid = guid; 3462 zc.zc_cookie = state; 3463 zc.zc_obj = aux; 3464 3465 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3466 return (0); 3467 3468 return (zpool_standard_error(hdl, errno, errbuf)); 3469 } 3470 3471 /* 3472 * Mark the given vdev degraded. 3473 */ 3474 int 3475 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3476 { 3477 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_DEGRADED)); 3478 } 3479 3480 /* 3481 * Mark the given vdev as in a removed state (as if the device does not exist). 3482 * 3483 * This is different than zpool_vdev_remove() which does a removal of a device 3484 * from the pool (but the device does exist). 3485 */ 3486 int 3487 zpool_vdev_set_removed_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3488 { 3489 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_REMOVED)); 3490 } 3491 3492 /* 3493 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 3494 * a hot spare. 3495 */ 3496 static boolean_t 3497 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 3498 { 3499 nvlist_t **child; 3500 uint_t c, children; 3501 3502 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 3503 &children) == 0) { 3504 const char *type = fnvlist_lookup_string(search, 3505 ZPOOL_CONFIG_TYPE); 3506 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 || 3507 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) && 3508 children == 2 && child[which] == tgt) 3509 return (B_TRUE); 3510 3511 for (c = 0; c < children; c++) 3512 if (is_replacing_spare(child[c], tgt, which)) 3513 return (B_TRUE); 3514 } 3515 3516 return (B_FALSE); 3517 } 3518 3519 /* 3520 * Attach new_disk (fully described by nvroot) to old_disk. 3521 * If 'replacing' is specified, the new disk will replace the old one. 3522 */ 3523 int 3524 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk, 3525 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild) 3526 { 3527 zfs_cmd_t zc = {"\0"}; 3528 char errbuf[ERRBUFLEN]; 3529 int ret; 3530 nvlist_t *tgt; 3531 boolean_t avail_spare, l2cache, islog; 3532 uint64_t val; 3533 char *newname; 3534 const char *type; 3535 nvlist_t **child; 3536 uint_t children; 3537 nvlist_t *config_root; 3538 libzfs_handle_t *hdl = zhp->zpool_hdl; 3539 3540 if (replacing) 3541 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3542 "cannot replace %s with %s"), old_disk, new_disk); 3543 else 3544 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3545 "cannot attach %s to %s"), new_disk, old_disk); 3546 3547 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3548 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 3549 &islog)) == NULL) 3550 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 3551 3552 if (avail_spare) 3553 return (zfs_error(hdl, EZFS_ISSPARE, errbuf)); 3554 3555 if (l2cache) 3556 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf)); 3557 3558 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3559 zc.zc_cookie = replacing; 3560 zc.zc_simple = rebuild; 3561 3562 if (rebuild && 3563 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) { 3564 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3565 "the loaded zfs module doesn't support device rebuilds")); 3566 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3567 } 3568 3569 type = fnvlist_lookup_string(tgt, ZPOOL_CONFIG_TYPE); 3570 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 && 3571 zfeature_lookup_guid("org.openzfs:raidz_expansion", NULL) != 0) { 3572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3573 "the loaded zfs module doesn't support raidz expansion")); 3574 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3575 } 3576 3577 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3578 &child, &children) != 0 || children != 1) { 3579 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3580 "new device must be a single disk")); 3581 return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf)); 3582 } 3583 3584 config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 3585 ZPOOL_CONFIG_VDEV_TREE); 3586 3587 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL) 3588 return (-1); 3589 3590 /* 3591 * If the target is a hot spare that has been swapped in, we can only 3592 * replace it with another hot spare. 3593 */ 3594 if (replacing && 3595 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 3596 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 3597 NULL) == NULL || !avail_spare) && 3598 is_replacing_spare(config_root, tgt, 1)) { 3599 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3600 "can only be replaced by another hot spare")); 3601 free(newname); 3602 return (zfs_error(hdl, EZFS_BADTARGET, errbuf)); 3603 } 3604 3605 free(newname); 3606 3607 zcmd_write_conf_nvlist(hdl, &zc, nvroot); 3608 3609 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 3610 3611 zcmd_free_nvlists(&zc); 3612 3613 if (ret == 0) 3614 return (0); 3615 3616 switch (errno) { 3617 case ENOTSUP: 3618 /* 3619 * Can't attach to or replace this type of vdev. 3620 */ 3621 if (replacing) { 3622 uint64_t version = zpool_get_prop_int(zhp, 3623 ZPOOL_PROP_VERSION, NULL); 3624 3625 if (islog) { 3626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3627 "cannot replace a log with a spare")); 3628 } else if (rebuild) { 3629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3630 "only mirror and dRAID vdevs support " 3631 "sequential reconstruction")); 3632 } else if (zpool_is_draid_spare(new_disk)) { 3633 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3634 "dRAID spares can only replace child " 3635 "devices in their parent's dRAID vdev")); 3636 } else if (version >= SPA_VERSION_MULTI_REPLACE) { 3637 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3638 "already in replacing/spare config; wait " 3639 "for completion or use 'zpool detach'")); 3640 } else { 3641 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3642 "cannot replace a replacing device")); 3643 } 3644 } else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) { 3645 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3646 "raidz_expansion feature must be enabled " 3647 "in order to attach a device to raidz")); 3648 } else { 3649 char status[64] = {0}; 3650 zpool_prop_get_feature(zhp, 3651 "feature@device_rebuild", status, 63); 3652 if (rebuild && 3653 strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) { 3654 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3655 "device_rebuild feature must be enabled " 3656 "in order to use sequential " 3657 "reconstruction")); 3658 } else { 3659 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3660 "can only attach to mirrors and top-level " 3661 "disks")); 3662 } 3663 } 3664 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf); 3665 break; 3666 3667 case EINVAL: 3668 /* 3669 * The new device must be a single disk. 3670 */ 3671 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3672 "new device must be a single disk")); 3673 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3674 break; 3675 3676 case EBUSY: 3677 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 3678 new_disk); 3679 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 3680 break; 3681 3682 case EOVERFLOW: 3683 /* 3684 * The new device is too small. 3685 */ 3686 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3687 "device is too small")); 3688 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 3689 break; 3690 3691 case EDOM: 3692 /* 3693 * The new device has a different optimal sector size. 3694 */ 3695 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3696 "new device has a different optimal sector size; use the " 3697 "option '-o ashift=N' to override the optimal size")); 3698 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 3699 break; 3700 3701 case ENAMETOOLONG: 3702 /* 3703 * The resulting top-level vdev spec won't fit in the label. 3704 */ 3705 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3706 break; 3707 3708 case ENXIO: 3709 /* 3710 * The existing raidz vdev has offline children 3711 */ 3712 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) { 3713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3714 "raidz vdev has devices that are are offline or " 3715 "being replaced")); 3716 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 3717 break; 3718 } else { 3719 (void) zpool_standard_error(hdl, errno, errbuf); 3720 } 3721 break; 3722 3723 case EADDRINUSE: 3724 /* 3725 * The boot reserved area is already being used (FreeBSD) 3726 */ 3727 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) { 3728 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3729 "the reserved boot area needed for the expansion " 3730 "is already being used by a boot loader")); 3731 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 3732 } else { 3733 (void) zpool_standard_error(hdl, errno, errbuf); 3734 } 3735 break; 3736 3737 case ZFS_ERR_ASHIFT_MISMATCH: 3738 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3739 "The new device cannot have a higher alignment requirement " 3740 "than the top-level vdev.")); 3741 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf); 3742 break; 3743 default: 3744 (void) zpool_standard_error(hdl, errno, errbuf); 3745 } 3746 3747 return (-1); 3748 } 3749 3750 /* 3751 * Detach the specified device. 3752 */ 3753 int 3754 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 3755 { 3756 zfs_cmd_t zc = {"\0"}; 3757 char errbuf[ERRBUFLEN]; 3758 nvlist_t *tgt; 3759 boolean_t avail_spare, l2cache; 3760 libzfs_handle_t *hdl = zhp->zpool_hdl; 3761 3762 (void) snprintf(errbuf, sizeof (errbuf), 3763 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 3764 3765 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3766 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3767 NULL)) == NULL) 3768 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 3769 3770 if (avail_spare) 3771 return (zfs_error(hdl, EZFS_ISSPARE, errbuf)); 3772 3773 if (l2cache) 3774 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf)); 3775 3776 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3777 3778 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 3779 return (0); 3780 3781 switch (errno) { 3782 3783 case ENOTSUP: 3784 /* 3785 * Can't detach from this type of vdev. 3786 */ 3787 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 3788 "applicable to mirror and replacing vdevs")); 3789 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf); 3790 break; 3791 3792 case EBUSY: 3793 /* 3794 * There are no other replicas of this device. 3795 */ 3796 (void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf); 3797 break; 3798 3799 default: 3800 (void) zpool_standard_error(hdl, errno, errbuf); 3801 } 3802 3803 return (-1); 3804 } 3805 3806 /* 3807 * Find a mirror vdev in the source nvlist. 3808 * 3809 * The mchild array contains a list of disks in one of the top-level mirrors 3810 * of the source pool. The schild array contains a list of disks that the 3811 * user specified on the command line. We loop over the mchild array to 3812 * see if any entry in the schild array matches. 3813 * 3814 * If a disk in the mchild array is found in the schild array, we return 3815 * the index of that entry. Otherwise we return -1. 3816 */ 3817 static int 3818 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 3819 nvlist_t **schild, uint_t schildren) 3820 { 3821 uint_t mc; 3822 3823 for (mc = 0; mc < mchildren; mc++) { 3824 uint_t sc; 3825 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3826 mchild[mc], 0); 3827 3828 for (sc = 0; sc < schildren; sc++) { 3829 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3830 schild[sc], 0); 3831 boolean_t result = (strcmp(mpath, spath) == 0); 3832 3833 free(spath); 3834 if (result) { 3835 free(mpath); 3836 return (mc); 3837 } 3838 } 3839 3840 free(mpath); 3841 } 3842 3843 return (-1); 3844 } 3845 3846 /* 3847 * Split a mirror pool. If newroot points to null, then a new nvlist 3848 * is generated and it is the responsibility of the caller to free it. 3849 */ 3850 int 3851 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 3852 nvlist_t *props, splitflags_t flags) 3853 { 3854 zfs_cmd_t zc = {"\0"}; 3855 char errbuf[ERRBUFLEN]; 3856 const char *bias; 3857 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 3858 nvlist_t **varray = NULL, *zc_props = NULL; 3859 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 3860 libzfs_handle_t *hdl = zhp->zpool_hdl; 3861 uint64_t vers, readonly = B_FALSE; 3862 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 3863 int retval = 0; 3864 3865 (void) snprintf(errbuf, sizeof (errbuf), 3866 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 3867 3868 if (!zpool_name_valid(hdl, B_FALSE, newname)) 3869 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); 3870 3871 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 3872 (void) fprintf(stderr, gettext("Internal error: unable to " 3873 "retrieve pool configuration\n")); 3874 return (-1); 3875 } 3876 3877 tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 3878 vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION); 3879 3880 if (props) { 3881 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 3882 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 3883 props, vers, flags, errbuf)) == NULL) 3884 return (-1); 3885 (void) nvlist_lookup_uint64(zc_props, 3886 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 3887 if (readonly) { 3888 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3889 "property %s can only be set at import time"), 3890 zpool_prop_to_name(ZPOOL_PROP_READONLY)); 3891 return (-1); 3892 } 3893 } 3894 3895 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 3896 &children) != 0) { 3897 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3898 "Source pool is missing vdev tree")); 3899 nvlist_free(zc_props); 3900 return (-1); 3901 } 3902 3903 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 3904 vcount = 0; 3905 3906 if (*newroot == NULL || 3907 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 3908 &newchild, &newchildren) != 0) 3909 newchildren = 0; 3910 3911 for (c = 0; c < children; c++) { 3912 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 3913 boolean_t is_special = B_FALSE, is_dedup = B_FALSE; 3914 const char *type; 3915 nvlist_t **mchild, *vdev; 3916 uint_t mchildren; 3917 int entry; 3918 3919 /* 3920 * Unlike cache & spares, slogs are stored in the 3921 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 3922 */ 3923 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3924 &is_log); 3925 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3926 &is_hole); 3927 if (is_log || is_hole) { 3928 /* 3929 * Create a hole vdev and put it in the config. 3930 */ 3931 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 3932 goto out; 3933 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 3934 VDEV_TYPE_HOLE) != 0) 3935 goto out; 3936 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 3937 1) != 0) 3938 goto out; 3939 if (lastlog == 0) 3940 lastlog = vcount; 3941 varray[vcount++] = vdev; 3942 continue; 3943 } 3944 lastlog = 0; 3945 type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE); 3946 3947 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) { 3948 vdev = child[c]; 3949 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3950 goto out; 3951 continue; 3952 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 3953 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3954 "Source pool must be composed only of mirrors\n")); 3955 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3956 goto out; 3957 } 3958 3959 if (nvlist_lookup_string(child[c], 3960 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) { 3961 if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) 3962 is_special = B_TRUE; 3963 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) 3964 is_dedup = B_TRUE; 3965 } 3966 verify(nvlist_lookup_nvlist_array(child[c], 3967 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3968 3969 /* find or add an entry for this top-level vdev */ 3970 if (newchildren > 0 && 3971 (entry = find_vdev_entry(zhp, mchild, mchildren, 3972 newchild, newchildren)) >= 0) { 3973 /* We found a disk that the user specified. */ 3974 vdev = mchild[entry]; 3975 ++found; 3976 } else { 3977 /* User didn't specify a disk for this vdev. */ 3978 vdev = mchild[mchildren - 1]; 3979 } 3980 3981 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3982 goto out; 3983 3984 if (flags.dryrun != 0) { 3985 if (is_dedup == B_TRUE) { 3986 if (nvlist_add_string(varray[vcount - 1], 3987 ZPOOL_CONFIG_ALLOCATION_BIAS, 3988 VDEV_ALLOC_BIAS_DEDUP) != 0) 3989 goto out; 3990 } else if (is_special == B_TRUE) { 3991 if (nvlist_add_string(varray[vcount - 1], 3992 ZPOOL_CONFIG_ALLOCATION_BIAS, 3993 VDEV_ALLOC_BIAS_SPECIAL) != 0) 3994 goto out; 3995 } 3996 } 3997 } 3998 3999 /* did we find every disk the user specified? */ 4000 if (found != newchildren) { 4001 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 4002 "include at most one disk from each mirror")); 4003 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4004 goto out; 4005 } 4006 4007 /* Prepare the nvlist for populating. */ 4008 if (*newroot == NULL) { 4009 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 4010 goto out; 4011 freelist = B_TRUE; 4012 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 4013 VDEV_TYPE_ROOT) != 0) 4014 goto out; 4015 } else { 4016 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 4017 } 4018 4019 /* Add all the children we found */ 4020 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 4021 (const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0) 4022 goto out; 4023 4024 /* 4025 * If we're just doing a dry run, exit now with success. 4026 */ 4027 if (flags.dryrun) { 4028 memory_err = B_FALSE; 4029 freelist = B_FALSE; 4030 goto out; 4031 } 4032 4033 /* now build up the config list & call the ioctl */ 4034 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 4035 goto out; 4036 4037 if (nvlist_add_nvlist(newconfig, 4038 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 4039 nvlist_add_string(newconfig, 4040 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 4041 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 4042 goto out; 4043 4044 /* 4045 * The new pool is automatically part of the namespace unless we 4046 * explicitly export it. 4047 */ 4048 if (!flags.import) 4049 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 4050 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4051 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 4052 zcmd_write_conf_nvlist(hdl, &zc, newconfig); 4053 if (zc_props != NULL) 4054 zcmd_write_src_nvlist(hdl, &zc, zc_props); 4055 4056 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 4057 retval = zpool_standard_error(hdl, errno, errbuf); 4058 goto out; 4059 } 4060 4061 freelist = B_FALSE; 4062 memory_err = B_FALSE; 4063 4064 out: 4065 if (varray != NULL) { 4066 int v; 4067 4068 for (v = 0; v < vcount; v++) 4069 nvlist_free(varray[v]); 4070 free(varray); 4071 } 4072 zcmd_free_nvlists(&zc); 4073 nvlist_free(zc_props); 4074 nvlist_free(newconfig); 4075 if (freelist) { 4076 nvlist_free(*newroot); 4077 *newroot = NULL; 4078 } 4079 4080 if (retval != 0) 4081 return (retval); 4082 4083 if (memory_err) 4084 return (no_memory(hdl)); 4085 4086 return (0); 4087 } 4088 4089 /* 4090 * Remove the given device. 4091 */ 4092 int 4093 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 4094 { 4095 zfs_cmd_t zc = {"\0"}; 4096 char errbuf[ERRBUFLEN]; 4097 nvlist_t *tgt; 4098 boolean_t avail_spare, l2cache, islog; 4099 libzfs_handle_t *hdl = zhp->zpool_hdl; 4100 uint64_t version; 4101 4102 (void) snprintf(errbuf, sizeof (errbuf), 4103 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 4104 4105 if (zpool_is_draid_spare(path)) { 4106 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4107 "dRAID spares cannot be removed")); 4108 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 4109 } 4110 4111 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4112 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 4113 &islog)) == NULL) 4114 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 4115 4116 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 4117 if (islog && version < SPA_VERSION_HOLES) { 4118 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4119 "pool must be upgraded to support log removal")); 4120 return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); 4121 } 4122 4123 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 4124 4125 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 4126 return (0); 4127 4128 switch (errno) { 4129 4130 case EALREADY: 4131 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4132 "removal for this vdev is already in progress.")); 4133 (void) zfs_error(hdl, EZFS_BUSY, errbuf); 4134 break; 4135 4136 case EINVAL: 4137 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4138 "invalid config; all top-level vdevs must " 4139 "have the same sector size and not be raidz.")); 4140 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4141 break; 4142 4143 case EBUSY: 4144 if (islog) { 4145 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4146 "Mount encrypted datasets to replay logs.")); 4147 } else { 4148 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4149 "Pool busy; removal may already be in progress")); 4150 } 4151 (void) zfs_error(hdl, EZFS_BUSY, errbuf); 4152 break; 4153 4154 case EACCES: 4155 if (islog) { 4156 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4157 "Mount encrypted datasets to replay logs.")); 4158 (void) zfs_error(hdl, EZFS_BUSY, errbuf); 4159 } else { 4160 (void) zpool_standard_error(hdl, errno, errbuf); 4161 } 4162 break; 4163 4164 default: 4165 (void) zpool_standard_error(hdl, errno, errbuf); 4166 } 4167 return (-1); 4168 } 4169 4170 int 4171 zpool_vdev_remove_cancel(zpool_handle_t *zhp) 4172 { 4173 zfs_cmd_t zc = {{0}}; 4174 char errbuf[ERRBUFLEN]; 4175 libzfs_handle_t *hdl = zhp->zpool_hdl; 4176 4177 (void) snprintf(errbuf, sizeof (errbuf), 4178 dgettext(TEXT_DOMAIN, "cannot cancel removal")); 4179 4180 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4181 zc.zc_cookie = 1; 4182 4183 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 4184 return (0); 4185 4186 return (zpool_standard_error(hdl, errno, errbuf)); 4187 } 4188 4189 int 4190 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path, 4191 uint64_t *sizep) 4192 { 4193 char errbuf[ERRBUFLEN]; 4194 nvlist_t *tgt; 4195 boolean_t avail_spare, l2cache, islog; 4196 libzfs_handle_t *hdl = zhp->zpool_hdl; 4197 4198 (void) snprintf(errbuf, sizeof (errbuf), 4199 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"), 4200 path); 4201 4202 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 4203 &islog)) == NULL) 4204 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 4205 4206 if (avail_spare || l2cache || islog) { 4207 *sizep = 0; 4208 return (0); 4209 } 4210 4211 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) { 4212 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4213 "indirect size not available")); 4214 return (zfs_error(hdl, EINVAL, errbuf)); 4215 } 4216 return (0); 4217 } 4218 4219 /* 4220 * Clear the errors for the pool, or the particular device if specified. 4221 */ 4222 int 4223 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 4224 { 4225 zfs_cmd_t zc = {"\0"}; 4226 char errbuf[ERRBUFLEN]; 4227 nvlist_t *tgt; 4228 zpool_load_policy_t policy; 4229 boolean_t avail_spare, l2cache; 4230 libzfs_handle_t *hdl = zhp->zpool_hdl; 4231 nvlist_t *nvi = NULL; 4232 int error; 4233 4234 if (path) 4235 (void) snprintf(errbuf, sizeof (errbuf), 4236 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 4237 path); 4238 else 4239 (void) snprintf(errbuf, sizeof (errbuf), 4240 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 4241 zhp->zpool_name); 4242 4243 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4244 if (path) { 4245 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 4246 &l2cache, NULL)) == NULL) 4247 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 4248 4249 /* 4250 * Don't allow error clearing for hot spares. Do allow 4251 * error clearing for l2cache devices. 4252 */ 4253 if (avail_spare) 4254 return (zfs_error(hdl, EZFS_ISSPARE, errbuf)); 4255 4256 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 4257 } 4258 4259 zpool_get_load_policy(rewindnvl, &policy); 4260 zc.zc_cookie = policy.zlp_rewind; 4261 4262 zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2); 4263 zcmd_write_src_nvlist(hdl, &zc, rewindnvl); 4264 4265 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 4266 errno == ENOMEM) 4267 zcmd_expand_dst_nvlist(hdl, &zc); 4268 4269 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) && 4270 errno != EPERM && errno != EACCES)) { 4271 if (policy.zlp_rewind & 4272 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 4273 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 4274 zpool_rewind_exclaim(hdl, zc.zc_name, 4275 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), 4276 nvi); 4277 nvlist_free(nvi); 4278 } 4279 zcmd_free_nvlists(&zc); 4280 return (0); 4281 } 4282 4283 zcmd_free_nvlists(&zc); 4284 return (zpool_standard_error(hdl, errno, errbuf)); 4285 } 4286 4287 /* 4288 * Similar to zpool_clear(), but takes a GUID (used by fmd). 4289 */ 4290 int 4291 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 4292 { 4293 zfs_cmd_t zc = {"\0"}; 4294 char errbuf[ERRBUFLEN]; 4295 libzfs_handle_t *hdl = zhp->zpool_hdl; 4296 4297 (void) snprintf(errbuf, sizeof (errbuf), 4298 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 4299 (u_longlong_t)guid); 4300 4301 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4302 zc.zc_guid = guid; 4303 zc.zc_cookie = ZPOOL_NO_REWIND; 4304 4305 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) 4306 return (0); 4307 4308 return (zpool_standard_error(hdl, errno, errbuf)); 4309 } 4310 4311 /* 4312 * Change the GUID for a pool. 4313 * 4314 * Similar to zpool_reguid(), but may take a GUID. 4315 * 4316 * If the guid argument is NULL, then no GUID is passed in the nvlist to the 4317 * ioctl(). 4318 */ 4319 int 4320 zpool_set_guid(zpool_handle_t *zhp, const uint64_t *guid) 4321 { 4322 char errbuf[ERRBUFLEN]; 4323 libzfs_handle_t *hdl = zhp->zpool_hdl; 4324 nvlist_t *nvl = NULL; 4325 zfs_cmd_t zc = {"\0"}; 4326 int error = -1; 4327 4328 if (guid != NULL) { 4329 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 4330 return (no_memory(hdl)); 4331 4332 if (nvlist_add_uint64(nvl, ZPOOL_REGUID_GUID, *guid) != 0) { 4333 nvlist_free(nvl); 4334 return (no_memory(hdl)); 4335 } 4336 4337 zcmd_write_src_nvlist(hdl, &zc, nvl); 4338 } 4339 4340 (void) snprintf(errbuf, sizeof (errbuf), 4341 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 4342 4343 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4344 error = zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc); 4345 if (error) { 4346 return (zpool_standard_error(hdl, errno, errbuf)); 4347 } 4348 if (guid != NULL) { 4349 zcmd_free_nvlists(&zc); 4350 nvlist_free(nvl); 4351 } 4352 return (0); 4353 } 4354 4355 /* 4356 * Change the GUID for a pool. 4357 */ 4358 int 4359 zpool_reguid(zpool_handle_t *zhp) 4360 { 4361 return (zpool_set_guid(zhp, NULL)); 4362 } 4363 4364 /* 4365 * Reopen the pool. 4366 */ 4367 int 4368 zpool_reopen_one(zpool_handle_t *zhp, void *data) 4369 { 4370 libzfs_handle_t *hdl = zpool_get_handle(zhp); 4371 const char *pool_name = zpool_get_name(zhp); 4372 boolean_t *scrub_restart = data; 4373 int error; 4374 4375 error = lzc_reopen(pool_name, *scrub_restart); 4376 if (error) { 4377 return (zpool_standard_error_fmt(hdl, error, 4378 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name)); 4379 } 4380 4381 return (0); 4382 } 4383 4384 /* call into libzfs_core to execute the sync IOCTL per pool */ 4385 int 4386 zpool_sync_one(zpool_handle_t *zhp, void *data) 4387 { 4388 int ret; 4389 libzfs_handle_t *hdl = zpool_get_handle(zhp); 4390 const char *pool_name = zpool_get_name(zhp); 4391 boolean_t *force = data; 4392 nvlist_t *innvl = fnvlist_alloc(); 4393 4394 fnvlist_add_boolean_value(innvl, "force", *force); 4395 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) { 4396 nvlist_free(innvl); 4397 return (zpool_standard_error_fmt(hdl, ret, 4398 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name)); 4399 } 4400 nvlist_free(innvl); 4401 4402 return (0); 4403 } 4404 4405 #define PATH_BUF_LEN 64 4406 4407 /* 4408 * Given a vdev, return the name to display in iostat. If the vdev has a path, 4409 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 4410 * We also check if this is a whole disk, in which case we strip off the 4411 * trailing 's0' slice name. 4412 * 4413 * This routine is also responsible for identifying when disks have been 4414 * reconfigured in a new location. The kernel will have opened the device by 4415 * devid, but the path will still refer to the old location. To catch this, we 4416 * first do a path -> devid translation (which is fast for the common case). If 4417 * the devid matches, we're done. If not, we do a reverse devid -> path 4418 * translation and issue the appropriate ioctl() to update the path of the vdev. 4419 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 4420 * of these checks. 4421 */ 4422 char * 4423 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 4424 int name_flags) 4425 { 4426 const char *type, *tpath; 4427 const char *path; 4428 uint64_t value; 4429 char buf[PATH_BUF_LEN]; 4430 char tmpbuf[PATH_BUF_LEN * 2]; 4431 4432 /* 4433 * vdev_name will be "root"/"root-0" for the root vdev, but it is the 4434 * zpool name that will be displayed to the user. 4435 */ 4436 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE); 4437 if (zhp != NULL && strcmp(type, "root") == 0) 4438 return (zfs_strdup(hdl, zpool_get_name(zhp))); 4439 4440 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH")) 4441 name_flags |= VDEV_NAME_PATH; 4442 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID")) 4443 name_flags |= VDEV_NAME_GUID; 4444 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS")) 4445 name_flags |= VDEV_NAME_FOLLOW_LINKS; 4446 4447 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 4448 name_flags & VDEV_NAME_GUID) { 4449 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value); 4450 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value); 4451 path = buf; 4452 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) { 4453 path = tpath; 4454 4455 if (name_flags & VDEV_NAME_FOLLOW_LINKS) { 4456 char *rp = realpath(path, NULL); 4457 if (rp) { 4458 strlcpy(buf, rp, sizeof (buf)); 4459 path = buf; 4460 free(rp); 4461 } 4462 } 4463 4464 /* 4465 * For a block device only use the name. 4466 */ 4467 if ((strcmp(type, VDEV_TYPE_DISK) == 0) && 4468 !(name_flags & VDEV_NAME_PATH)) { 4469 path = zfs_strip_path(path); 4470 } 4471 4472 /* 4473 * Remove the partition from the path if this is a whole disk. 4474 */ 4475 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 && 4476 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value) 4477 == 0 && value && !(name_flags & VDEV_NAME_PATH)) { 4478 return (zfs_strip_partition(path)); 4479 } 4480 } else { 4481 path = type; 4482 4483 /* 4484 * If it's a raidz device, we need to stick in the parity level. 4485 */ 4486 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 4487 value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY); 4488 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 4489 (u_longlong_t)value); 4490 path = buf; 4491 } 4492 4493 /* 4494 * If it's a dRAID device, we add parity, groups, and spares. 4495 */ 4496 if (strcmp(path, VDEV_TYPE_DRAID) == 0) { 4497 uint64_t ndata, nparity, nspares; 4498 nvlist_t **child; 4499 uint_t children; 4500 4501 verify(nvlist_lookup_nvlist_array(nv, 4502 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0); 4503 nparity = fnvlist_lookup_uint64(nv, 4504 ZPOOL_CONFIG_NPARITY); 4505 ndata = fnvlist_lookup_uint64(nv, 4506 ZPOOL_CONFIG_DRAID_NDATA); 4507 nspares = fnvlist_lookup_uint64(nv, 4508 ZPOOL_CONFIG_DRAID_NSPARES); 4509 4510 path = zpool_draid_name(buf, sizeof (buf), ndata, 4511 nparity, nspares, children); 4512 } 4513 4514 /* 4515 * We identify each top-level vdev by using a <type-id> 4516 * naming convention. 4517 */ 4518 if (name_flags & VDEV_NAME_TYPE_ID) { 4519 uint64_t id = fnvlist_lookup_uint64(nv, 4520 ZPOOL_CONFIG_ID); 4521 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu", 4522 path, (u_longlong_t)id); 4523 path = tmpbuf; 4524 } 4525 } 4526 4527 return (zfs_strdup(hdl, path)); 4528 } 4529 4530 static int 4531 zbookmark_mem_compare(const void *a, const void *b) 4532 { 4533 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 4534 } 4535 4536 void 4537 zpool_add_propname(zpool_handle_t *zhp, const char *propname) 4538 { 4539 assert(zhp->zpool_n_propnames < ZHP_MAX_PROPNAMES); 4540 zhp->zpool_propnames[zhp->zpool_n_propnames] = propname; 4541 zhp->zpool_n_propnames++; 4542 } 4543 4544 /* 4545 * Retrieve the persistent error log, uniquify the members, and return to the 4546 * caller. 4547 */ 4548 int 4549 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 4550 { 4551 zfs_cmd_t zc = {"\0"}; 4552 libzfs_handle_t *hdl = zhp->zpool_hdl; 4553 zbookmark_phys_t *buf; 4554 uint64_t buflen = 10000; /* approx. 1MB of RAM */ 4555 4556 if (fnvlist_lookup_uint64(zhp->zpool_config, 4557 ZPOOL_CONFIG_ERRCOUNT) == 0) 4558 return (0); 4559 4560 /* 4561 * Retrieve the raw error list from the kernel. If it doesn't fit, 4562 * allocate a larger buffer and retry. 4563 */ 4564 (void) strcpy(zc.zc_name, zhp->zpool_name); 4565 for (;;) { 4566 buf = zfs_alloc(zhp->zpool_hdl, 4567 buflen * sizeof (zbookmark_phys_t)); 4568 zc.zc_nvlist_dst = (uintptr_t)buf; 4569 zc.zc_nvlist_dst_size = buflen; 4570 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG, 4571 &zc) != 0) { 4572 free(buf); 4573 if (errno == ENOMEM) { 4574 buflen *= 2; 4575 } else { 4576 return (zpool_standard_error_fmt(hdl, errno, 4577 dgettext(TEXT_DOMAIN, "errors: List of " 4578 "errors unavailable"))); 4579 } 4580 } else { 4581 break; 4582 } 4583 } 4584 4585 /* 4586 * Sort the resulting bookmarks. This is a little confusing due to the 4587 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 4588 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks 4589 * _not_ copied as part of the process. So we point the start of our 4590 * array appropriate and decrement the total number of elements. 4591 */ 4592 zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size; 4593 uint64_t zblen = buflen - zc.zc_nvlist_dst_size; 4594 4595 qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 4596 4597 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 4598 4599 /* 4600 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 4601 */ 4602 for (uint64_t i = 0; i < zblen; i++) { 4603 nvlist_t *nv; 4604 4605 /* ignoring zb_blkid and zb_level for now */ 4606 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 4607 zb[i-1].zb_object == zb[i].zb_object) 4608 continue; 4609 4610 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 4611 goto nomem; 4612 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 4613 zb[i].zb_objset) != 0) { 4614 nvlist_free(nv); 4615 goto nomem; 4616 } 4617 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 4618 zb[i].zb_object) != 0) { 4619 nvlist_free(nv); 4620 goto nomem; 4621 } 4622 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 4623 nvlist_free(nv); 4624 goto nomem; 4625 } 4626 nvlist_free(nv); 4627 } 4628 4629 free(buf); 4630 return (0); 4631 4632 nomem: 4633 free(buf); 4634 return (no_memory(zhp->zpool_hdl)); 4635 } 4636 4637 /* 4638 * Upgrade a ZFS pool to the latest on-disk version. 4639 */ 4640 int 4641 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 4642 { 4643 zfs_cmd_t zc = {"\0"}; 4644 libzfs_handle_t *hdl = zhp->zpool_hdl; 4645 4646 (void) strcpy(zc.zc_name, zhp->zpool_name); 4647 zc.zc_cookie = new_version; 4648 4649 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 4650 return (zpool_standard_error_fmt(hdl, errno, 4651 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 4652 zhp->zpool_name)); 4653 return (0); 4654 } 4655 4656 void 4657 zfs_save_arguments(int argc, char **argv, char *string, int len) 4658 { 4659 int i; 4660 4661 (void) strlcpy(string, zfs_basename(argv[0]), len); 4662 for (i = 1; i < argc; i++) { 4663 (void) strlcat(string, " ", len); 4664 (void) strlcat(string, argv[i], len); 4665 } 4666 } 4667 4668 int 4669 zpool_log_history(libzfs_handle_t *hdl, const char *message) 4670 { 4671 zfs_cmd_t zc = {"\0"}; 4672 nvlist_t *args; 4673 4674 args = fnvlist_alloc(); 4675 fnvlist_add_string(args, "message", message); 4676 zcmd_write_src_nvlist(hdl, &zc, args); 4677 int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc); 4678 nvlist_free(args); 4679 zcmd_free_nvlists(&zc); 4680 return (err); 4681 } 4682 4683 /* 4684 * Perform ioctl to get some command history of a pool. 4685 * 4686 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 4687 * logical offset of the history buffer to start reading from. 4688 * 4689 * Upon return, 'off' is the next logical offset to read from and 4690 * 'len' is the actual amount of bytes read into 'buf'. 4691 */ 4692 static int 4693 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 4694 { 4695 zfs_cmd_t zc = {"\0"}; 4696 libzfs_handle_t *hdl = zhp->zpool_hdl; 4697 4698 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4699 4700 zc.zc_history = (uint64_t)(uintptr_t)buf; 4701 zc.zc_history_len = *len; 4702 zc.zc_history_offset = *off; 4703 4704 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 4705 switch (errno) { 4706 case EPERM: 4707 return (zfs_error_fmt(hdl, EZFS_PERM, 4708 dgettext(TEXT_DOMAIN, 4709 "cannot show history for pool '%s'"), 4710 zhp->zpool_name)); 4711 case ENOENT: 4712 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 4713 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4714 "'%s'"), zhp->zpool_name)); 4715 case ENOTSUP: 4716 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 4717 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4718 "'%s', pool must be upgraded"), zhp->zpool_name)); 4719 default: 4720 return (zpool_standard_error_fmt(hdl, errno, 4721 dgettext(TEXT_DOMAIN, 4722 "cannot get history for '%s'"), zhp->zpool_name)); 4723 } 4724 } 4725 4726 *len = zc.zc_history_len; 4727 *off = zc.zc_history_offset; 4728 4729 return (0); 4730 } 4731 4732 /* 4733 * Retrieve the command history of a pool. 4734 */ 4735 int 4736 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off, 4737 boolean_t *eof) 4738 { 4739 libzfs_handle_t *hdl = zhp->zpool_hdl; 4740 char *buf; 4741 int buflen = 128 * 1024; 4742 nvlist_t **records = NULL; 4743 uint_t numrecords = 0; 4744 int err = 0, i; 4745 uint64_t start = *off; 4746 4747 buf = zfs_alloc(hdl, buflen); 4748 4749 /* process about 1MiB a time */ 4750 while (*off - start < 1024 * 1024) { 4751 uint64_t bytes_read = buflen; 4752 uint64_t leftover; 4753 4754 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0) 4755 break; 4756 4757 /* if nothing else was read in, we're at EOF, just return */ 4758 if (!bytes_read) { 4759 *eof = B_TRUE; 4760 break; 4761 } 4762 4763 if ((err = zpool_history_unpack(buf, bytes_read, 4764 &leftover, &records, &numrecords)) != 0) { 4765 zpool_standard_error_fmt(hdl, err, 4766 dgettext(TEXT_DOMAIN, 4767 "cannot get history for '%s'"), zhp->zpool_name); 4768 break; 4769 } 4770 *off -= leftover; 4771 if (leftover == bytes_read) { 4772 /* 4773 * no progress made, because buffer is not big enough 4774 * to hold this record; resize and retry. 4775 */ 4776 buflen *= 2; 4777 free(buf); 4778 buf = zfs_alloc(hdl, buflen); 4779 } 4780 } 4781 4782 free(buf); 4783 4784 if (!err) { 4785 *nvhisp = fnvlist_alloc(); 4786 fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 4787 (const nvlist_t **)records, numrecords); 4788 } 4789 for (i = 0; i < numrecords; i++) 4790 nvlist_free(records[i]); 4791 free(records); 4792 4793 return (err); 4794 } 4795 4796 /* 4797 * Retrieve the next event given the passed 'zevent_fd' file descriptor. 4798 * If there is a new event available 'nvp' will contain a newly allocated 4799 * nvlist and 'dropped' will be set to the number of missed events since 4800 * the last call to this function. When 'nvp' is set to NULL it indicates 4801 * no new events are available. In either case the function returns 0 and 4802 * it is up to the caller to free 'nvp'. In the case of a fatal error the 4803 * function will return a non-zero value. When the function is called in 4804 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed), 4805 * it will not return until a new event is available. 4806 */ 4807 int 4808 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp, 4809 int *dropped, unsigned flags, int zevent_fd) 4810 { 4811 zfs_cmd_t zc = {"\0"}; 4812 int error = 0; 4813 4814 *nvp = NULL; 4815 *dropped = 0; 4816 zc.zc_cleanup_fd = zevent_fd; 4817 4818 if (flags & ZEVENT_NONBLOCK) 4819 zc.zc_guid = ZEVENT_NONBLOCK; 4820 4821 zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE); 4822 4823 retry: 4824 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) { 4825 switch (errno) { 4826 case ESHUTDOWN: 4827 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 4828 dgettext(TEXT_DOMAIN, "zfs shutdown")); 4829 goto out; 4830 case ENOENT: 4831 /* Blocking error case should not occur */ 4832 if (!(flags & ZEVENT_NONBLOCK)) 4833 error = zpool_standard_error_fmt(hdl, errno, 4834 dgettext(TEXT_DOMAIN, "cannot get event")); 4835 4836 goto out; 4837 case ENOMEM: 4838 zcmd_expand_dst_nvlist(hdl, &zc); 4839 goto retry; 4840 default: 4841 error = zpool_standard_error_fmt(hdl, errno, 4842 dgettext(TEXT_DOMAIN, "cannot get event")); 4843 goto out; 4844 } 4845 } 4846 4847 error = zcmd_read_dst_nvlist(hdl, &zc, nvp); 4848 if (error != 0) 4849 goto out; 4850 4851 *dropped = (int)zc.zc_cookie; 4852 out: 4853 zcmd_free_nvlists(&zc); 4854 4855 return (error); 4856 } 4857 4858 /* 4859 * Clear all events. 4860 */ 4861 int 4862 zpool_events_clear(libzfs_handle_t *hdl, int *count) 4863 { 4864 zfs_cmd_t zc = {"\0"}; 4865 4866 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0) 4867 return (zpool_standard_error(hdl, errno, 4868 dgettext(TEXT_DOMAIN, "cannot clear events"))); 4869 4870 if (count != NULL) 4871 *count = (int)zc.zc_cookie; /* # of events cleared */ 4872 4873 return (0); 4874 } 4875 4876 /* 4877 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for 4878 * the passed zevent_fd file handle. On success zero is returned, 4879 * otherwise -1 is returned and hdl->libzfs_error is set to the errno. 4880 */ 4881 int 4882 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd) 4883 { 4884 zfs_cmd_t zc = {"\0"}; 4885 int error = 0; 4886 4887 zc.zc_guid = eid; 4888 zc.zc_cleanup_fd = zevent_fd; 4889 4890 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) { 4891 switch (errno) { 4892 case ENOENT: 4893 error = zfs_error_fmt(hdl, EZFS_NOENT, 4894 dgettext(TEXT_DOMAIN, "cannot get event")); 4895 break; 4896 4897 case ENOMEM: 4898 error = zfs_error_fmt(hdl, EZFS_NOMEM, 4899 dgettext(TEXT_DOMAIN, "cannot get event")); 4900 break; 4901 4902 default: 4903 error = zpool_standard_error_fmt(hdl, errno, 4904 dgettext(TEXT_DOMAIN, "cannot get event")); 4905 break; 4906 } 4907 } 4908 4909 return (error); 4910 } 4911 4912 static void 4913 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4914 char *pathname, size_t len, boolean_t always_unmounted) 4915 { 4916 zfs_cmd_t zc = {"\0"}; 4917 boolean_t mounted = B_FALSE; 4918 char *mntpnt = NULL; 4919 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 4920 4921 if (dsobj == 0) { 4922 /* special case for the MOS */ 4923 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", 4924 (longlong_t)obj); 4925 return; 4926 } 4927 4928 /* get the dataset's name */ 4929 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4930 zc.zc_obj = dsobj; 4931 if (zfs_ioctl(zhp->zpool_hdl, 4932 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 4933 /* just write out a path of two object numbers */ 4934 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 4935 (longlong_t)dsobj, (longlong_t)obj); 4936 return; 4937 } 4938 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 4939 4940 /* find out if the dataset is mounted */ 4941 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname, 4942 &mntpnt); 4943 4944 /* get the corrupted object's path */ 4945 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 4946 zc.zc_obj = obj; 4947 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH, 4948 &zc) == 0) { 4949 if (mounted) { 4950 (void) snprintf(pathname, len, "%s%s", mntpnt, 4951 zc.zc_value); 4952 } else { 4953 (void) snprintf(pathname, len, "%s:%s", 4954 dsname, zc.zc_value); 4955 } 4956 } else { 4957 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, 4958 (longlong_t)obj); 4959 } 4960 free(mntpnt); 4961 } 4962 4963 void 4964 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4965 char *pathname, size_t len) 4966 { 4967 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE); 4968 } 4969 4970 void 4971 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4972 char *pathname, size_t len) 4973 { 4974 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE); 4975 } 4976 /* 4977 * Wait while the specified activity is in progress in the pool. 4978 */ 4979 int 4980 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity) 4981 { 4982 boolean_t missing; 4983 4984 int error = zpool_wait_status(zhp, activity, &missing, NULL); 4985 4986 if (missing) { 4987 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT, 4988 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"), 4989 zhp->zpool_name); 4990 return (ENOENT); 4991 } else { 4992 return (error); 4993 } 4994 } 4995 4996 /* 4997 * Wait for the given activity and return the status of the wait (whether or not 4998 * any waiting was done) in the 'waited' parameter. Non-existent pools are 4999 * reported via the 'missing' parameter, rather than by printing an error 5000 * message. This is convenient when this function is called in a loop over a 5001 * long period of time (as it is, for example, by zpool's wait cmd). In that 5002 * scenario, a pool being exported or destroyed should be considered a normal 5003 * event, so we don't want to print an error when we find that the pool doesn't 5004 * exist. 5005 */ 5006 int 5007 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity, 5008 boolean_t *missing, boolean_t *waited) 5009 { 5010 int error = lzc_wait(zhp->zpool_name, activity, waited); 5011 *missing = (error == ENOENT); 5012 if (*missing) 5013 return (0); 5014 5015 if (error != 0) { 5016 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 5017 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"), 5018 zhp->zpool_name); 5019 } 5020 5021 return (error); 5022 } 5023 5024 int 5025 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap) 5026 { 5027 int error = lzc_set_bootenv(zhp->zpool_name, envmap); 5028 if (error != 0) { 5029 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 5030 dgettext(TEXT_DOMAIN, 5031 "error setting bootenv in pool '%s'"), zhp->zpool_name); 5032 } 5033 5034 return (error); 5035 } 5036 5037 int 5038 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp) 5039 { 5040 nvlist_t *nvl; 5041 int error; 5042 5043 nvl = NULL; 5044 error = lzc_get_bootenv(zhp->zpool_name, &nvl); 5045 if (error != 0) { 5046 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 5047 dgettext(TEXT_DOMAIN, 5048 "error getting bootenv in pool '%s'"), zhp->zpool_name); 5049 } else { 5050 *nvlp = nvl; 5051 } 5052 5053 return (error); 5054 } 5055 5056 /* 5057 * Attempt to read and parse feature file(s) (from "compatibility" property). 5058 * Files contain zpool feature names, comma or whitespace-separated. 5059 * Comments (# character to next newline) are discarded. 5060 * 5061 * Arguments: 5062 * compatibility : string containing feature filenames 5063 * features : either NULL or pointer to array of boolean 5064 * report : either NULL or pointer to string buffer 5065 * rlen : length of "report" buffer 5066 * 5067 * compatibility is NULL (unset), "", "off", "legacy", or list of 5068 * comma-separated filenames. filenames should either be absolute, 5069 * or relative to: 5070 * 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or 5071 * 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d). 5072 * (Unset), "" or "off" => enable all features 5073 * "legacy" => disable all features 5074 * 5075 * Any feature names read from files which match unames in spa_feature_table 5076 * will have the corresponding boolean set in the features array (if non-NULL). 5077 * If more than one feature set specified, only features present in *all* of 5078 * them will be set. 5079 * 5080 * "report" if not NULL will be populated with a suitable status message. 5081 * 5082 * Return values: 5083 * ZPOOL_COMPATIBILITY_OK : files read and parsed ok 5084 * ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file 5085 * ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name 5086 * ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name 5087 * ZPOOL_COMPATIBILITY_NOFILES : no feature files found 5088 */ 5089 zpool_compat_status_t 5090 zpool_load_compat(const char *compat, boolean_t *features, char *report, 5091 size_t rlen) 5092 { 5093 int sdirfd, ddirfd, featfd; 5094 struct stat fs; 5095 char *fc; 5096 char *ps, *ls, *ws; 5097 char *file, *line, *word; 5098 5099 char l_compat[ZFS_MAXPROPLEN]; 5100 5101 boolean_t ret_nofiles = B_TRUE; 5102 boolean_t ret_badfile = B_FALSE; 5103 boolean_t ret_badtoken = B_FALSE; 5104 boolean_t ret_warntoken = B_FALSE; 5105 5106 /* special cases (unset), "" and "off" => enable all features */ 5107 if (compat == NULL || compat[0] == '\0' || 5108 strcmp(compat, ZPOOL_COMPAT_OFF) == 0) { 5109 if (features != NULL) 5110 for (uint_t i = 0; i < SPA_FEATURES; i++) 5111 features[i] = B_TRUE; 5112 if (report != NULL) 5113 strlcpy(report, gettext("all features enabled"), rlen); 5114 return (ZPOOL_COMPATIBILITY_OK); 5115 } 5116 5117 /* Final special case "legacy" => disable all features */ 5118 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) { 5119 if (features != NULL) 5120 for (uint_t i = 0; i < SPA_FEATURES; i++) 5121 features[i] = B_FALSE; 5122 if (report != NULL) 5123 strlcpy(report, gettext("all features disabled"), rlen); 5124 return (ZPOOL_COMPATIBILITY_OK); 5125 } 5126 5127 /* 5128 * Start with all true; will be ANDed with results from each file 5129 */ 5130 if (features != NULL) 5131 for (uint_t i = 0; i < SPA_FEATURES; i++) 5132 features[i] = B_TRUE; 5133 5134 char err_badfile[ZFS_MAXPROPLEN] = ""; 5135 char err_badtoken[ZFS_MAXPROPLEN] = ""; 5136 5137 /* 5138 * We ignore errors from the directory open() 5139 * as they're only needed if the filename is relative 5140 * which will be checked during the openat(). 5141 */ 5142 5143 /* O_PATH safer than O_RDONLY if system allows it */ 5144 #if defined(O_PATH) 5145 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH) 5146 #else 5147 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY) 5148 #endif 5149 5150 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS); 5151 ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS); 5152 5153 (void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN); 5154 5155 for (file = strtok_r(l_compat, ",", &ps); 5156 file != NULL; 5157 file = strtok_r(NULL, ",", &ps)) { 5158 5159 boolean_t l_features[SPA_FEATURES]; 5160 5161 enum { Z_SYSCONF, Z_DATA } source; 5162 5163 /* try sysconfdir first, then datadir */ 5164 source = Z_SYSCONF; 5165 if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) { 5166 featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC); 5167 source = Z_DATA; 5168 } 5169 5170 /* File readable and correct size? */ 5171 if (featfd < 0 || 5172 fstat(featfd, &fs) < 0 || 5173 fs.st_size < 1 || 5174 fs.st_size > ZPOOL_COMPAT_MAXSIZE) { 5175 (void) close(featfd); 5176 strlcat(err_badfile, file, ZFS_MAXPROPLEN); 5177 strlcat(err_badfile, " ", ZFS_MAXPROPLEN); 5178 ret_badfile = B_TRUE; 5179 continue; 5180 } 5181 5182 /* Prefault the file if system allows */ 5183 #if defined(MAP_POPULATE) 5184 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE) 5185 #elif defined(MAP_PREFAULT_READ) 5186 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ) 5187 #else 5188 #define ZC_MMAP_FLAGS (MAP_PRIVATE) 5189 #endif 5190 5191 /* private mmap() so we can strtok safely */ 5192 fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE, 5193 ZC_MMAP_FLAGS, featfd, 0); 5194 (void) close(featfd); 5195 5196 /* map ok, and last character == newline? */ 5197 if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') { 5198 (void) munmap((void *) fc, fs.st_size); 5199 strlcat(err_badfile, file, ZFS_MAXPROPLEN); 5200 strlcat(err_badfile, " ", ZFS_MAXPROPLEN); 5201 ret_badfile = B_TRUE; 5202 continue; 5203 } 5204 5205 ret_nofiles = B_FALSE; 5206 5207 for (uint_t i = 0; i < SPA_FEATURES; i++) 5208 l_features[i] = B_FALSE; 5209 5210 /* replace final newline with NULL to ensure string ends */ 5211 fc[fs.st_size - 1] = '\0'; 5212 5213 for (line = strtok_r(fc, "\n", &ls); 5214 line != NULL; 5215 line = strtok_r(NULL, "\n", &ls)) { 5216 /* discard comments */ 5217 char *r = strchr(line, '#'); 5218 if (r != NULL) 5219 *r = '\0'; 5220 5221 for (word = strtok_r(line, ", \t", &ws); 5222 word != NULL; 5223 word = strtok_r(NULL, ", \t", &ws)) { 5224 /* Find matching feature name */ 5225 uint_t f; 5226 for (f = 0; f < SPA_FEATURES; f++) { 5227 zfeature_info_t *fi = 5228 &spa_feature_table[f]; 5229 if (strcmp(word, fi->fi_uname) == 0) { 5230 l_features[f] = B_TRUE; 5231 break; 5232 } 5233 } 5234 if (f < SPA_FEATURES) 5235 continue; 5236 5237 /* found an unrecognized word */ 5238 /* lightly sanitize it */ 5239 if (strlen(word) > 32) 5240 word[32] = '\0'; 5241 for (char *c = word; *c != '\0'; c++) 5242 if (!isprint(*c)) 5243 *c = '?'; 5244 5245 strlcat(err_badtoken, word, ZFS_MAXPROPLEN); 5246 strlcat(err_badtoken, " ", ZFS_MAXPROPLEN); 5247 if (source == Z_SYSCONF) 5248 ret_badtoken = B_TRUE; 5249 else 5250 ret_warntoken = B_TRUE; 5251 } 5252 } 5253 (void) munmap((void *) fc, fs.st_size); 5254 5255 if (features != NULL) 5256 for (uint_t i = 0; i < SPA_FEATURES; i++) 5257 features[i] &= l_features[i]; 5258 } 5259 (void) close(sdirfd); 5260 (void) close(ddirfd); 5261 5262 /* Return the most serious error */ 5263 if (ret_badfile) { 5264 if (report != NULL) 5265 snprintf(report, rlen, gettext("could not read/" 5266 "parse feature file(s): %s"), err_badfile); 5267 return (ZPOOL_COMPATIBILITY_BADFILE); 5268 } 5269 if (ret_nofiles) { 5270 if (report != NULL) 5271 strlcpy(report, 5272 gettext("no valid compatibility files specified"), 5273 rlen); 5274 return (ZPOOL_COMPATIBILITY_NOFILES); 5275 } 5276 if (ret_badtoken) { 5277 if (report != NULL) 5278 snprintf(report, rlen, gettext("invalid feature " 5279 "name(s) in local compatibility files: %s"), 5280 err_badtoken); 5281 return (ZPOOL_COMPATIBILITY_BADTOKEN); 5282 } 5283 if (ret_warntoken) { 5284 if (report != NULL) 5285 snprintf(report, rlen, gettext("unrecognized feature " 5286 "name(s) in distribution compatibility files: %s"), 5287 err_badtoken); 5288 return (ZPOOL_COMPATIBILITY_WARNTOKEN); 5289 } 5290 if (report != NULL) 5291 strlcpy(report, gettext("compatibility set ok"), rlen); 5292 return (ZPOOL_COMPATIBILITY_OK); 5293 } 5294 5295 static int 5296 zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid) 5297 { 5298 nvlist_t *tgt; 5299 boolean_t avail_spare, l2cache; 5300 5301 verify(zhp != NULL); 5302 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 5303 char errbuf[ERRBUFLEN]; 5304 (void) snprintf(errbuf, sizeof (errbuf), 5305 dgettext(TEXT_DOMAIN, "pool is in an unavailable state")); 5306 return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf)); 5307 } 5308 5309 if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache, 5310 NULL)) == NULL) { 5311 char errbuf[ERRBUFLEN]; 5312 (void) snprintf(errbuf, sizeof (errbuf), 5313 dgettext(TEXT_DOMAIN, "can not find %s in %s"), 5314 vdevname, zhp->zpool_name); 5315 return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf)); 5316 } 5317 5318 *vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 5319 return (0); 5320 } 5321 5322 /* 5323 * Get a vdev property value for 'prop' and return the value in 5324 * a pre-allocated buffer. 5325 */ 5326 int 5327 zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name, 5328 char *buf, size_t len, zprop_source_t *srctype, boolean_t literal) 5329 { 5330 nvlist_t *nv; 5331 const char *strval; 5332 uint64_t intval; 5333 zprop_source_t src = ZPROP_SRC_NONE; 5334 5335 if (prop == VDEV_PROP_USERPROP) { 5336 /* user property, prop_name must contain the property name */ 5337 assert(prop_name != NULL); 5338 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) { 5339 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 5340 strval = fnvlist_lookup_string(nv, ZPROP_VALUE); 5341 } else { 5342 /* user prop not found */ 5343 return (-1); 5344 } 5345 (void) strlcpy(buf, strval, len); 5346 if (srctype) 5347 *srctype = src; 5348 return (0); 5349 } 5350 5351 if (prop_name == NULL) 5352 prop_name = (char *)vdev_prop_to_name(prop); 5353 5354 switch (vdev_prop_get_type(prop)) { 5355 case PROP_TYPE_STRING: 5356 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) { 5357 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 5358 strval = fnvlist_lookup_string(nv, ZPROP_VALUE); 5359 } else { 5360 src = ZPROP_SRC_DEFAULT; 5361 if ((strval = vdev_prop_default_string(prop)) == NULL) 5362 strval = "-"; 5363 } 5364 (void) strlcpy(buf, strval, len); 5365 break; 5366 5367 case PROP_TYPE_NUMBER: 5368 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) { 5369 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 5370 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE); 5371 } else { 5372 src = ZPROP_SRC_DEFAULT; 5373 intval = vdev_prop_default_numeric(prop); 5374 } 5375 5376 switch (prop) { 5377 case VDEV_PROP_ASIZE: 5378 case VDEV_PROP_PSIZE: 5379 case VDEV_PROP_SIZE: 5380 case VDEV_PROP_BOOTSIZE: 5381 case VDEV_PROP_ALLOCATED: 5382 case VDEV_PROP_FREE: 5383 case VDEV_PROP_READ_ERRORS: 5384 case VDEV_PROP_WRITE_ERRORS: 5385 case VDEV_PROP_CHECKSUM_ERRORS: 5386 case VDEV_PROP_INITIALIZE_ERRORS: 5387 case VDEV_PROP_TRIM_ERRORS: 5388 case VDEV_PROP_SLOW_IOS: 5389 case VDEV_PROP_OPS_NULL: 5390 case VDEV_PROP_OPS_READ: 5391 case VDEV_PROP_OPS_WRITE: 5392 case VDEV_PROP_OPS_FREE: 5393 case VDEV_PROP_OPS_CLAIM: 5394 case VDEV_PROP_OPS_TRIM: 5395 case VDEV_PROP_BYTES_NULL: 5396 case VDEV_PROP_BYTES_READ: 5397 case VDEV_PROP_BYTES_WRITE: 5398 case VDEV_PROP_BYTES_FREE: 5399 case VDEV_PROP_BYTES_CLAIM: 5400 case VDEV_PROP_BYTES_TRIM: 5401 if (literal) { 5402 (void) snprintf(buf, len, "%llu", 5403 (u_longlong_t)intval); 5404 } else { 5405 (void) zfs_nicenum(intval, buf, len); 5406 } 5407 break; 5408 case VDEV_PROP_EXPANDSZ: 5409 if (intval == 0) { 5410 (void) strlcpy(buf, "-", len); 5411 } else if (literal) { 5412 (void) snprintf(buf, len, "%llu", 5413 (u_longlong_t)intval); 5414 } else { 5415 (void) zfs_nicenum(intval, buf, len); 5416 } 5417 break; 5418 case VDEV_PROP_CAPACITY: 5419 if (literal) { 5420 (void) snprintf(buf, len, "%llu", 5421 (u_longlong_t)intval); 5422 } else { 5423 (void) snprintf(buf, len, "%llu%%", 5424 (u_longlong_t)intval); 5425 } 5426 break; 5427 case VDEV_PROP_CHECKSUM_N: 5428 case VDEV_PROP_CHECKSUM_T: 5429 case VDEV_PROP_IO_N: 5430 case VDEV_PROP_IO_T: 5431 case VDEV_PROP_SLOW_IO_N: 5432 case VDEV_PROP_SLOW_IO_T: 5433 if (intval == UINT64_MAX) { 5434 (void) strlcpy(buf, "-", len); 5435 } else { 5436 (void) snprintf(buf, len, "%llu", 5437 (u_longlong_t)intval); 5438 } 5439 break; 5440 case VDEV_PROP_FRAGMENTATION: 5441 if (intval == UINT64_MAX) { 5442 (void) strlcpy(buf, "-", len); 5443 } else { 5444 (void) snprintf(buf, len, "%llu%%", 5445 (u_longlong_t)intval); 5446 } 5447 break; 5448 case VDEV_PROP_STATE: 5449 if (literal) { 5450 (void) snprintf(buf, len, "%llu", 5451 (u_longlong_t)intval); 5452 } else { 5453 (void) strlcpy(buf, zpool_state_to_name(intval, 5454 VDEV_AUX_NONE), len); 5455 } 5456 break; 5457 default: 5458 (void) snprintf(buf, len, "%llu", 5459 (u_longlong_t)intval); 5460 } 5461 break; 5462 5463 case PROP_TYPE_INDEX: 5464 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) { 5465 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 5466 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE); 5467 } else { 5468 /* 'trim_support' only valid for leaf vdevs */ 5469 if (prop == VDEV_PROP_TRIM_SUPPORT) { 5470 (void) strlcpy(buf, "-", len); 5471 break; 5472 } 5473 src = ZPROP_SRC_DEFAULT; 5474 intval = vdev_prop_default_numeric(prop); 5475 /* Only use if provided by the RAIDZ VDEV above */ 5476 if (prop == VDEV_PROP_RAIDZ_EXPANDING) 5477 return (ENOENT); 5478 } 5479 if (vdev_prop_index_to_string(prop, intval, 5480 (const char **)&strval) != 0) 5481 return (-1); 5482 (void) strlcpy(buf, strval, len); 5483 break; 5484 5485 default: 5486 abort(); 5487 } 5488 5489 if (srctype) 5490 *srctype = src; 5491 5492 return (0); 5493 } 5494 5495 /* 5496 * Get a vdev property value for 'prop_name' and return the value in 5497 * a pre-allocated buffer. 5498 */ 5499 int 5500 zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop, 5501 char *prop_name, char *buf, size_t len, zprop_source_t *srctype, 5502 boolean_t literal) 5503 { 5504 nvlist_t *reqnvl, *reqprops; 5505 nvlist_t *retprops = NULL; 5506 uint64_t vdev_guid = 0; 5507 int ret; 5508 5509 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0) 5510 return (ret); 5511 5512 if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0) 5513 return (no_memory(zhp->zpool_hdl)); 5514 if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0) 5515 return (no_memory(zhp->zpool_hdl)); 5516 5517 fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid); 5518 5519 if (prop != VDEV_PROP_USERPROP) { 5520 /* prop_name overrides prop value */ 5521 if (prop_name != NULL) 5522 prop = vdev_name_to_prop(prop_name); 5523 else 5524 prop_name = (char *)vdev_prop_to_name(prop); 5525 assert(prop < VDEV_NUM_PROPS); 5526 } 5527 5528 assert(prop_name != NULL); 5529 if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) { 5530 nvlist_free(reqnvl); 5531 nvlist_free(reqprops); 5532 return (no_memory(zhp->zpool_hdl)); 5533 } 5534 5535 fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops); 5536 5537 ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops); 5538 5539 if (ret == 0) { 5540 ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf, 5541 len, srctype, literal); 5542 } else { 5543 char errbuf[ERRBUFLEN]; 5544 (void) snprintf(errbuf, sizeof (errbuf), 5545 dgettext(TEXT_DOMAIN, "cannot get vdev property %s from" 5546 " %s in %s"), prop_name, vdevname, zhp->zpool_name); 5547 (void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf); 5548 } 5549 5550 nvlist_free(reqnvl); 5551 nvlist_free(reqprops); 5552 nvlist_free(retprops); 5553 5554 return (ret); 5555 } 5556 5557 /* 5558 * Get all vdev properties 5559 */ 5560 int 5561 zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname, 5562 nvlist_t **outnvl) 5563 { 5564 nvlist_t *nvl = NULL; 5565 uint64_t vdev_guid = 0; 5566 int ret; 5567 5568 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0) 5569 return (ret); 5570 5571 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 5572 return (no_memory(zhp->zpool_hdl)); 5573 5574 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid); 5575 5576 ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl); 5577 5578 nvlist_free(nvl); 5579 5580 if (ret) { 5581 char errbuf[ERRBUFLEN]; 5582 (void) snprintf(errbuf, sizeof (errbuf), 5583 dgettext(TEXT_DOMAIN, "cannot get vdev properties for" 5584 " %s in %s"), vdevname, zhp->zpool_name); 5585 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 5586 } 5587 5588 return (ret); 5589 } 5590 5591 /* 5592 * Set vdev property 5593 */ 5594 int 5595 zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname, 5596 const char *propname, const char *propval) 5597 { 5598 int ret; 5599 nvlist_t *nvl = NULL; 5600 nvlist_t *outnvl = NULL; 5601 nvlist_t *props; 5602 nvlist_t *realprops; 5603 prop_flags_t flags = { 0 }; 5604 uint64_t version; 5605 uint64_t vdev_guid; 5606 5607 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0) 5608 return (ret); 5609 5610 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 5611 return (no_memory(zhp->zpool_hdl)); 5612 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) 5613 return (no_memory(zhp->zpool_hdl)); 5614 5615 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid); 5616 5617 if (nvlist_add_string(props, propname, propval) != 0) { 5618 nvlist_free(props); 5619 return (no_memory(zhp->zpool_hdl)); 5620 } 5621 5622 char errbuf[ERRBUFLEN]; 5623 (void) snprintf(errbuf, sizeof (errbuf), 5624 dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"), 5625 propname, vdevname, zhp->zpool_name); 5626 5627 flags.vdevprop = 1; 5628 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 5629 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 5630 zhp->zpool_name, props, version, flags, errbuf)) == NULL) { 5631 nvlist_free(props); 5632 nvlist_free(nvl); 5633 return (-1); 5634 } 5635 5636 nvlist_free(props); 5637 props = realprops; 5638 5639 fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props); 5640 5641 ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl); 5642 5643 nvlist_free(props); 5644 nvlist_free(nvl); 5645 nvlist_free(outnvl); 5646 5647 if (ret) 5648 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 5649 5650 return (ret); 5651 } 5652 5653 /* 5654 * Prune older entries from the DDT to reclaim space under the quota 5655 */ 5656 int 5657 zpool_ddt_prune(zpool_handle_t *zhp, zpool_ddt_prune_unit_t unit, 5658 uint64_t amount) 5659 { 5660 int error = lzc_ddt_prune(zhp->zpool_name, unit, amount); 5661 if (error != 0) { 5662 libzfs_handle_t *hdl = zhp->zpool_hdl; 5663 char errbuf[ERRBUFLEN]; 5664 5665 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 5666 "cannot prune dedup table on '%s'"), zhp->zpool_name); 5667 5668 if (error == EALREADY) { 5669 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 5670 "a prune operation is already in progress")); 5671 (void) zfs_error(hdl, EZFS_BUSY, errbuf); 5672 } else { 5673 (void) zpool_standard_error(hdl, errno, errbuf); 5674 } 5675 return (-1); 5676 } 5677 5678 return (0); 5679 } 5680