1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 26 * Copyright (c) 2011, 2024 by Delphix. All rights reserved. 27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 28 * Copyright (c) 2018 Datto Inc. 29 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 30 * Copyright (c) 2017, Intel Corporation. 31 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com> 32 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 33 * Copyright (c) 2021, 2023, Klara Inc. 34 */ 35 36 #include <errno.h> 37 #include <libintl.h> 38 #include <stdio.h> 39 #include <stdlib.h> 40 #include <strings.h> 41 #include <unistd.h> 42 #include <libgen.h> 43 #include <zone.h> 44 #include <sys/stat.h> 45 #include <sys/efi_partition.h> 46 #include <sys/systeminfo.h> 47 #include <sys/zfs_ioctl.h> 48 #include <sys/zfs_sysfs.h> 49 #include <sys/vdev_disk.h> 50 #include <sys/types.h> 51 #include <dlfcn.h> 52 #include <libzutil.h> 53 #include <fcntl.h> 54 55 #include "zfs_namecheck.h" 56 #include "zfs_prop.h" 57 #include "libzfs_impl.h" 58 #include "zfs_comutil.h" 59 #include "zfeature_common.h" 60 61 static boolean_t zpool_vdev_is_interior(const char *name); 62 63 typedef struct prop_flags { 64 unsigned int create:1; /* Validate property on creation */ 65 unsigned int import:1; /* Validate property on import */ 66 unsigned int vdevprop:1; /* Validate property as a VDEV property */ 67 } prop_flags_t; 68 69 /* 70 * ==================================================================== 71 * zpool property functions 72 * ==================================================================== 73 */ 74 75 static int 76 zpool_get_all_props(zpool_handle_t *zhp) 77 { 78 zfs_cmd_t zc = {"\0"}; 79 libzfs_handle_t *hdl = zhp->zpool_hdl; 80 81 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 82 83 if (zhp->zpool_n_propnames > 0) { 84 nvlist_t *innvl = fnvlist_alloc(); 85 fnvlist_add_string_array(innvl, ZPOOL_GET_PROPS_NAMES, 86 zhp->zpool_propnames, zhp->zpool_n_propnames); 87 zcmd_write_src_nvlist(hdl, &zc, innvl); 88 fnvlist_free(innvl); 89 } 90 91 zcmd_alloc_dst_nvlist(hdl, &zc, 0); 92 93 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 94 if (errno == ENOMEM) 95 zcmd_expand_dst_nvlist(hdl, &zc); 96 else { 97 zcmd_free_nvlists(&zc); 98 return (-1); 99 } 100 } 101 102 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 103 zcmd_free_nvlists(&zc); 104 return (-1); 105 } 106 107 zcmd_free_nvlists(&zc); 108 109 return (0); 110 } 111 112 int 113 zpool_props_refresh(zpool_handle_t *zhp) 114 { 115 nvlist_t *old_props; 116 117 old_props = zhp->zpool_props; 118 119 if (zpool_get_all_props(zhp) != 0) 120 return (-1); 121 122 nvlist_free(old_props); 123 return (0); 124 } 125 126 static const char * 127 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 128 zprop_source_t *src) 129 { 130 nvlist_t *nv, *nvl; 131 const char *value; 132 zprop_source_t source; 133 134 nvl = zhp->zpool_props; 135 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 136 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 137 value = fnvlist_lookup_string(nv, ZPROP_VALUE); 138 } else { 139 source = ZPROP_SRC_DEFAULT; 140 if ((value = zpool_prop_default_string(prop)) == NULL) 141 value = "-"; 142 } 143 144 if (src) 145 *src = source; 146 147 return (value); 148 } 149 150 uint64_t 151 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 152 { 153 nvlist_t *nv, *nvl; 154 uint64_t value; 155 zprop_source_t source; 156 157 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 158 /* 159 * zpool_get_all_props() has most likely failed because 160 * the pool is faulted, but if all we need is the top level 161 * vdev's guid then get it from the zhp config nvlist. 162 */ 163 if ((prop == ZPOOL_PROP_GUID) && 164 (nvlist_lookup_nvlist(zhp->zpool_config, 165 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 166 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 167 == 0)) { 168 return (value); 169 } 170 return (zpool_prop_default_numeric(prop)); 171 } 172 173 nvl = zhp->zpool_props; 174 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 175 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 176 value = fnvlist_lookup_uint64(nv, ZPROP_VALUE); 177 } else { 178 source = ZPROP_SRC_DEFAULT; 179 value = zpool_prop_default_numeric(prop); 180 } 181 182 if (src) 183 *src = source; 184 185 return (value); 186 } 187 188 /* 189 * Map VDEV STATE to printed strings. 190 */ 191 const char * 192 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 193 { 194 switch (state) { 195 case VDEV_STATE_CLOSED: 196 case VDEV_STATE_OFFLINE: 197 return (gettext("OFFLINE")); 198 case VDEV_STATE_REMOVED: 199 return (gettext("REMOVED")); 200 case VDEV_STATE_CANT_OPEN: 201 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 202 return (gettext("FAULTED")); 203 else if (aux == VDEV_AUX_SPLIT_POOL) 204 return (gettext("SPLIT")); 205 else 206 return (gettext("UNAVAIL")); 207 case VDEV_STATE_FAULTED: 208 return (gettext("FAULTED")); 209 case VDEV_STATE_DEGRADED: 210 return (gettext("DEGRADED")); 211 case VDEV_STATE_HEALTHY: 212 return (gettext("ONLINE")); 213 214 default: 215 break; 216 } 217 218 return (gettext("UNKNOWN")); 219 } 220 221 /* 222 * Map POOL STATE to printed strings. 223 */ 224 const char * 225 zpool_pool_state_to_name(pool_state_t state) 226 { 227 switch (state) { 228 default: 229 break; 230 case POOL_STATE_ACTIVE: 231 return (gettext("ACTIVE")); 232 case POOL_STATE_EXPORTED: 233 return (gettext("EXPORTED")); 234 case POOL_STATE_DESTROYED: 235 return (gettext("DESTROYED")); 236 case POOL_STATE_SPARE: 237 return (gettext("SPARE")); 238 case POOL_STATE_L2CACHE: 239 return (gettext("L2CACHE")); 240 case POOL_STATE_UNINITIALIZED: 241 return (gettext("UNINITIALIZED")); 242 case POOL_STATE_UNAVAIL: 243 return (gettext("UNAVAIL")); 244 case POOL_STATE_POTENTIALLY_ACTIVE: 245 return (gettext("POTENTIALLY_ACTIVE")); 246 } 247 248 return (gettext("UNKNOWN")); 249 } 250 251 /* 252 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED", 253 * "SUSPENDED", etc). 254 */ 255 const char * 256 zpool_get_state_str(zpool_handle_t *zhp) 257 { 258 zpool_errata_t errata; 259 zpool_status_t status; 260 const char *str; 261 262 status = zpool_get_status(zhp, NULL, &errata); 263 264 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 265 str = gettext("FAULTED"); 266 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT || 267 status == ZPOOL_STATUS_IO_FAILURE_CONTINUE || 268 status == ZPOOL_STATUS_IO_FAILURE_MMP) { 269 str = gettext("SUSPENDED"); 270 } else { 271 nvlist_t *nvroot = fnvlist_lookup_nvlist( 272 zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE); 273 uint_t vsc; 274 vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array( 275 nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc); 276 str = zpool_state_to_name(vs->vs_state, vs->vs_aux); 277 } 278 return (str); 279 } 280 281 /* 282 * Get a zpool property value for 'prop' and return the value in 283 * a pre-allocated buffer. 284 */ 285 int 286 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, 287 size_t len, zprop_source_t *srctype, boolean_t literal) 288 { 289 uint64_t intval; 290 const char *strval; 291 zprop_source_t src = ZPROP_SRC_NONE; 292 293 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 294 switch (prop) { 295 case ZPOOL_PROP_NAME: 296 (void) strlcpy(buf, zpool_get_name(zhp), len); 297 break; 298 299 case ZPOOL_PROP_HEALTH: 300 (void) strlcpy(buf, zpool_get_state_str(zhp), len); 301 break; 302 303 case ZPOOL_PROP_GUID: 304 intval = zpool_get_prop_int(zhp, prop, &src); 305 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval); 306 break; 307 308 case ZPOOL_PROP_ALTROOT: 309 case ZPOOL_PROP_CACHEFILE: 310 case ZPOOL_PROP_COMMENT: 311 case ZPOOL_PROP_COMPATIBILITY: 312 if (zhp->zpool_props != NULL || 313 zpool_get_all_props(zhp) == 0) { 314 (void) strlcpy(buf, 315 zpool_get_prop_string(zhp, prop, &src), 316 len); 317 break; 318 } 319 zfs_fallthrough; 320 default: 321 (void) strlcpy(buf, "-", len); 322 break; 323 } 324 325 if (srctype != NULL) 326 *srctype = src; 327 return (0); 328 } 329 330 /* 331 * ZPOOL_PROP_DEDUPCACHED can be fetched by name only using 332 * the ZPOOL_GET_PROPS_NAMES mechanism 333 */ 334 if (prop == ZPOOL_PROP_DEDUPCACHED) { 335 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME); 336 (void) zpool_props_refresh(zhp); 337 } 338 339 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 340 prop != ZPOOL_PROP_NAME) 341 return (-1); 342 343 switch (zpool_prop_get_type(prop)) { 344 case PROP_TYPE_STRING: 345 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 346 len); 347 break; 348 349 case PROP_TYPE_NUMBER: 350 intval = zpool_get_prop_int(zhp, prop, &src); 351 352 switch (prop) { 353 case ZPOOL_PROP_DEDUP_TABLE_QUOTA: 354 /* 355 * If dedup quota is 0, we translate this into 'none' 356 * (unless literal is set). And if it is UINT64_MAX 357 * we translate that as 'automatic' (limit to size of 358 * the dedicated dedup VDEV. Otherwise, fall throught 359 * into the regular number formating. 360 */ 361 if (intval == 0) { 362 (void) strlcpy(buf, literal ? "0" : "none", 363 len); 364 break; 365 } else if (intval == UINT64_MAX) { 366 (void) strlcpy(buf, "auto", len); 367 break; 368 } 369 zfs_fallthrough; 370 371 case ZPOOL_PROP_SIZE: 372 case ZPOOL_PROP_ALLOCATED: 373 case ZPOOL_PROP_FREE: 374 case ZPOOL_PROP_FREEING: 375 case ZPOOL_PROP_LEAKED: 376 case ZPOOL_PROP_ASHIFT: 377 case ZPOOL_PROP_MAXBLOCKSIZE: 378 case ZPOOL_PROP_MAXDNODESIZE: 379 case ZPOOL_PROP_BCLONESAVED: 380 case ZPOOL_PROP_BCLONEUSED: 381 case ZPOOL_PROP_DEDUP_TABLE_SIZE: 382 case ZPOOL_PROP_DEDUPCACHED: 383 if (literal) 384 (void) snprintf(buf, len, "%llu", 385 (u_longlong_t)intval); 386 else 387 (void) zfs_nicenum(intval, buf, len); 388 break; 389 390 case ZPOOL_PROP_EXPANDSZ: 391 case ZPOOL_PROP_CHECKPOINT: 392 if (intval == 0) { 393 (void) strlcpy(buf, "-", len); 394 } else if (literal) { 395 (void) snprintf(buf, len, "%llu", 396 (u_longlong_t)intval); 397 } else { 398 (void) zfs_nicebytes(intval, buf, len); 399 } 400 break; 401 402 case ZPOOL_PROP_CAPACITY: 403 if (literal) { 404 (void) snprintf(buf, len, "%llu", 405 (u_longlong_t)intval); 406 } else { 407 (void) snprintf(buf, len, "%llu%%", 408 (u_longlong_t)intval); 409 } 410 break; 411 412 case ZPOOL_PROP_FRAGMENTATION: 413 if (intval == UINT64_MAX) { 414 (void) strlcpy(buf, "-", len); 415 } else if (literal) { 416 (void) snprintf(buf, len, "%llu", 417 (u_longlong_t)intval); 418 } else { 419 (void) snprintf(buf, len, "%llu%%", 420 (u_longlong_t)intval); 421 } 422 break; 423 424 case ZPOOL_PROP_BCLONERATIO: 425 case ZPOOL_PROP_DEDUPRATIO: 426 if (literal) 427 (void) snprintf(buf, len, "%llu.%02llu", 428 (u_longlong_t)(intval / 100), 429 (u_longlong_t)(intval % 100)); 430 else 431 (void) snprintf(buf, len, "%llu.%02llux", 432 (u_longlong_t)(intval / 100), 433 (u_longlong_t)(intval % 100)); 434 break; 435 436 case ZPOOL_PROP_HEALTH: 437 (void) strlcpy(buf, zpool_get_state_str(zhp), len); 438 break; 439 case ZPOOL_PROP_VERSION: 440 if (intval >= SPA_VERSION_FEATURES) { 441 (void) snprintf(buf, len, "-"); 442 break; 443 } 444 zfs_fallthrough; 445 default: 446 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval); 447 } 448 break; 449 450 case PROP_TYPE_INDEX: 451 intval = zpool_get_prop_int(zhp, prop, &src); 452 if (zpool_prop_index_to_string(prop, intval, &strval) 453 != 0) 454 return (-1); 455 (void) strlcpy(buf, strval, len); 456 break; 457 458 default: 459 abort(); 460 } 461 462 if (srctype) 463 *srctype = src; 464 465 return (0); 466 } 467 468 /* 469 * Get a zpool property value for 'propname' and return the value in 470 * a pre-allocated buffer. 471 */ 472 int 473 zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf, 474 size_t len, zprop_source_t *srctype) 475 { 476 nvlist_t *nv; 477 uint64_t ival; 478 const char *value; 479 zprop_source_t source = ZPROP_SRC_LOCAL; 480 481 if (zhp->zpool_props == NULL) 482 zpool_get_all_props(zhp); 483 484 if (nvlist_lookup_nvlist(zhp->zpool_props, propname, &nv) == 0) { 485 if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0) 486 source = ival; 487 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 488 } else { 489 source = ZPROP_SRC_DEFAULT; 490 value = "-"; 491 } 492 493 if (srctype) 494 *srctype = source; 495 496 (void) strlcpy(buf, value, len); 497 498 return (0); 499 } 500 501 /* 502 * Check if the bootfs name has the same pool name as it is set to. 503 * Assuming bootfs is a valid dataset name. 504 */ 505 static boolean_t 506 bootfs_name_valid(const char *pool, const char *bootfs) 507 { 508 int len = strlen(pool); 509 if (bootfs[0] == '\0') 510 return (B_TRUE); 511 512 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 513 return (B_FALSE); 514 515 if (strncmp(pool, bootfs, len) == 0 && 516 (bootfs[len] == '/' || bootfs[len] == '\0')) 517 return (B_TRUE); 518 519 return (B_FALSE); 520 } 521 522 /* 523 * Given an nvlist of zpool properties to be set, validate that they are 524 * correct, and parse any numeric properties (index, boolean, etc) if they are 525 * specified as strings. 526 */ 527 static nvlist_t * 528 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 529 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 530 { 531 nvpair_t *elem; 532 nvlist_t *retprops; 533 zpool_prop_t prop; 534 const char *strval; 535 uint64_t intval; 536 const char *check; 537 struct stat64 statbuf; 538 zpool_handle_t *zhp; 539 char *parent, *slash; 540 char report[1024]; 541 542 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 543 (void) no_memory(hdl); 544 return (NULL); 545 } 546 547 elem = NULL; 548 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 549 const char *propname = nvpair_name(elem); 550 551 if (flags.vdevprop && zpool_prop_vdev(propname)) { 552 vdev_prop_t vprop = vdev_name_to_prop(propname); 553 554 if (vdev_prop_readonly(vprop)) { 555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 556 "is readonly"), propname); 557 (void) zfs_error(hdl, EZFS_PROPREADONLY, 558 errbuf); 559 goto error; 560 } 561 562 if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV, 563 retprops, &strval, &intval, errbuf) != 0) 564 goto error; 565 566 continue; 567 } else if (flags.vdevprop && vdev_prop_user(propname)) { 568 if (nvlist_add_nvpair(retprops, elem) != 0) { 569 (void) no_memory(hdl); 570 goto error; 571 } 572 continue; 573 } else if (flags.vdevprop) { 574 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 575 "invalid property: '%s'"), propname); 576 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 577 goto error; 578 } 579 580 prop = zpool_name_to_prop(propname); 581 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) { 582 int err; 583 char *fname = strchr(propname, '@') + 1; 584 585 err = zfeature_lookup_name(fname, NULL); 586 if (err != 0) { 587 ASSERT3U(err, ==, ENOENT); 588 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 589 "feature '%s' unsupported by kernel"), 590 fname); 591 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 592 goto error; 593 } 594 595 if (nvpair_type(elem) != DATA_TYPE_STRING) { 596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 597 "'%s' must be a string"), propname); 598 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 599 goto error; 600 } 601 602 (void) nvpair_value_string(elem, &strval); 603 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 && 604 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) { 605 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 606 "property '%s' can only be set to " 607 "'enabled' or 'disabled'"), propname); 608 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 609 goto error; 610 } 611 612 if (!flags.create && 613 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) { 614 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 615 "property '%s' can only be set to " 616 "'disabled' at creation time"), propname); 617 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 618 goto error; 619 } 620 621 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 622 (void) no_memory(hdl); 623 goto error; 624 } 625 continue; 626 } else if (prop == ZPOOL_PROP_INVAL && 627 zfs_prop_user(propname)) { 628 /* 629 * This is a user property: make sure it's a 630 * string, and that it's less than ZAP_MAXNAMELEN. 631 */ 632 if (nvpair_type(elem) != DATA_TYPE_STRING) { 633 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 634 "'%s' must be a string"), propname); 635 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 636 goto error; 637 } 638 639 if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) { 640 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 641 "property name '%s' is too long"), 642 propname); 643 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 644 goto error; 645 } 646 647 (void) nvpair_value_string(elem, &strval); 648 649 if (strlen(strval) >= ZFS_MAXPROPLEN) { 650 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 651 "property value '%s' is too long"), 652 strval); 653 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 654 goto error; 655 } 656 657 if (nvlist_add_string(retprops, propname, 658 strval) != 0) { 659 (void) no_memory(hdl); 660 goto error; 661 } 662 663 continue; 664 } 665 666 /* 667 * Make sure this property is valid and applies to this type. 668 */ 669 if (prop == ZPOOL_PROP_INVAL) { 670 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 671 "invalid property '%s'"), propname); 672 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 673 goto error; 674 } 675 676 if (zpool_prop_readonly(prop)) { 677 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 678 "is readonly"), propname); 679 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 680 goto error; 681 } 682 683 if (!flags.create && zpool_prop_setonce(prop)) { 684 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 685 "property '%s' can only be set at " 686 "creation time"), propname); 687 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 688 goto error; 689 } 690 691 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 692 &strval, &intval, errbuf) != 0) 693 goto error; 694 695 /* 696 * Perform additional checking for specific properties. 697 */ 698 switch (prop) { 699 case ZPOOL_PROP_VERSION: 700 if (intval < version || 701 !SPA_VERSION_IS_SUPPORTED(intval)) { 702 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 703 "property '%s' number %llu is invalid."), 704 propname, (unsigned long long)intval); 705 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 706 goto error; 707 } 708 break; 709 710 case ZPOOL_PROP_ASHIFT: 711 if (intval != 0 && 712 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) { 713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 714 "property '%s' number %llu is invalid, " 715 "only values between %" PRId32 " and %" 716 PRId32 " are allowed."), 717 propname, (unsigned long long)intval, 718 ASHIFT_MIN, ASHIFT_MAX); 719 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 720 goto error; 721 } 722 break; 723 724 case ZPOOL_PROP_BOOTFS: 725 if (flags.create || flags.import) { 726 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 727 "property '%s' cannot be set at creation " 728 "or import time"), propname); 729 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 730 goto error; 731 } 732 733 if (version < SPA_VERSION_BOOTFS) { 734 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 735 "pool must be upgraded to support " 736 "'%s' property"), propname); 737 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 738 goto error; 739 } 740 741 /* 742 * bootfs property value has to be a dataset name and 743 * the dataset has to be in the same pool as it sets to. 744 */ 745 if (!bootfs_name_valid(poolname, strval)) { 746 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 747 "is an invalid name"), strval); 748 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 749 goto error; 750 } 751 752 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 753 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 754 "could not open pool '%s'"), poolname); 755 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 756 goto error; 757 } 758 zpool_close(zhp); 759 break; 760 761 case ZPOOL_PROP_ALTROOT: 762 if (!flags.create && !flags.import) { 763 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 764 "property '%s' can only be set during pool " 765 "creation or import"), propname); 766 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 767 goto error; 768 } 769 770 if (strval[0] != '/') { 771 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 772 "bad alternate root '%s'"), strval); 773 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 774 goto error; 775 } 776 break; 777 778 case ZPOOL_PROP_CACHEFILE: 779 if (strval[0] == '\0') 780 break; 781 782 if (strcmp(strval, "none") == 0) 783 break; 784 785 if (strval[0] != '/') { 786 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 787 "property '%s' must be empty, an " 788 "absolute path, or 'none'"), propname); 789 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 790 goto error; 791 } 792 793 parent = strdup(strval); 794 if (parent == NULL) { 795 (void) zfs_error(hdl, EZFS_NOMEM, errbuf); 796 goto error; 797 } 798 slash = strrchr(parent, '/'); 799 800 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 801 strcmp(slash, "/..") == 0) { 802 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 803 "'%s' is not a valid file"), parent); 804 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 805 free(parent); 806 goto error; 807 } 808 809 *slash = '\0'; 810 811 if (parent[0] != '\0' && 812 (stat64(parent, &statbuf) != 0 || 813 !S_ISDIR(statbuf.st_mode))) { 814 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 815 "'%s' is not a valid directory"), 816 parent); 817 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 818 free(parent); 819 goto error; 820 } 821 free(parent); 822 823 break; 824 825 case ZPOOL_PROP_COMPATIBILITY: 826 switch (zpool_load_compat(strval, NULL, report, 1024)) { 827 case ZPOOL_COMPATIBILITY_OK: 828 case ZPOOL_COMPATIBILITY_WARNTOKEN: 829 break; 830 case ZPOOL_COMPATIBILITY_BADFILE: 831 case ZPOOL_COMPATIBILITY_BADTOKEN: 832 case ZPOOL_COMPATIBILITY_NOFILES: 833 zfs_error_aux(hdl, "%s", report); 834 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 835 goto error; 836 } 837 break; 838 839 case ZPOOL_PROP_COMMENT: 840 for (check = strval; *check != '\0'; check++) { 841 if (!isprint(*check)) { 842 zfs_error_aux(hdl, 843 dgettext(TEXT_DOMAIN, 844 "comment may only have printable " 845 "characters")); 846 (void) zfs_error(hdl, EZFS_BADPROP, 847 errbuf); 848 goto error; 849 } 850 } 851 if (strlen(strval) > ZPROP_MAX_COMMENT) { 852 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 853 "comment must not exceed %d characters"), 854 ZPROP_MAX_COMMENT); 855 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 856 goto error; 857 } 858 break; 859 case ZPOOL_PROP_READONLY: 860 if (!flags.import) { 861 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 862 "property '%s' can only be set at " 863 "import time"), propname); 864 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 865 goto error; 866 } 867 break; 868 case ZPOOL_PROP_MULTIHOST: 869 if (get_system_hostid() == 0) { 870 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 871 "requires a non-zero system hostid")); 872 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 873 goto error; 874 } 875 break; 876 case ZPOOL_PROP_DEDUPDITTO: 877 printf("Note: property '%s' no longer has " 878 "any effect\n", propname); 879 break; 880 881 default: 882 break; 883 } 884 } 885 886 return (retprops); 887 error: 888 nvlist_free(retprops); 889 return (NULL); 890 } 891 892 /* 893 * Set zpool property : propname=propval. 894 */ 895 int 896 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 897 { 898 zfs_cmd_t zc = {"\0"}; 899 int ret = -1; 900 char errbuf[ERRBUFLEN]; 901 nvlist_t *nvl = NULL; 902 nvlist_t *realprops; 903 uint64_t version; 904 prop_flags_t flags = { 0 }; 905 906 (void) snprintf(errbuf, sizeof (errbuf), 907 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 908 zhp->zpool_name); 909 910 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 911 return (no_memory(zhp->zpool_hdl)); 912 913 if (nvlist_add_string(nvl, propname, propval) != 0) { 914 nvlist_free(nvl); 915 return (no_memory(zhp->zpool_hdl)); 916 } 917 918 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 919 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 920 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 921 nvlist_free(nvl); 922 return (-1); 923 } 924 925 nvlist_free(nvl); 926 nvl = realprops; 927 928 /* 929 * Execute the corresponding ioctl() to set this property. 930 */ 931 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 932 933 zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl); 934 935 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 936 937 zcmd_free_nvlists(&zc); 938 nvlist_free(nvl); 939 940 if (ret) 941 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 942 else 943 (void) zpool_props_refresh(zhp); 944 945 return (ret); 946 } 947 948 int 949 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp, 950 zfs_type_t type, boolean_t literal) 951 { 952 libzfs_handle_t *hdl = zhp->zpool_hdl; 953 zprop_list_t *entry; 954 char buf[ZFS_MAXPROPLEN]; 955 nvlist_t *features = NULL; 956 nvpair_t *nvp; 957 zprop_list_t **last; 958 boolean_t firstexpand = (NULL == *plp); 959 int i; 960 961 if (zprop_expand_list(hdl, plp, type) != 0) 962 return (-1); 963 964 if (type == ZFS_TYPE_VDEV) 965 return (0); 966 967 last = plp; 968 while (*last != NULL) 969 last = &(*last)->pl_next; 970 971 if ((*plp)->pl_all) 972 features = zpool_get_features(zhp); 973 974 if ((*plp)->pl_all && firstexpand) { 975 /* Handle userprops in the all properties case */ 976 if (zhp->zpool_props == NULL && zpool_props_refresh(zhp)) 977 return (-1); 978 979 nvp = NULL; 980 while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) != 981 NULL) { 982 const char *propname = nvpair_name(nvp); 983 984 if (!zfs_prop_user(propname)) 985 continue; 986 987 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 988 entry->pl_prop = ZPROP_USERPROP; 989 entry->pl_user_prop = zfs_strdup(hdl, propname); 990 entry->pl_width = strlen(entry->pl_user_prop); 991 entry->pl_all = B_TRUE; 992 993 *last = entry; 994 last = &entry->pl_next; 995 } 996 997 for (i = 0; i < SPA_FEATURES; i++) { 998 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 999 entry->pl_prop = ZPROP_USERPROP; 1000 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 1001 spa_feature_table[i].fi_uname); 1002 entry->pl_width = strlen(entry->pl_user_prop); 1003 entry->pl_all = B_TRUE; 1004 1005 *last = entry; 1006 last = &entry->pl_next; 1007 } 1008 } 1009 1010 /* add any unsupported features */ 1011 for (nvp = nvlist_next_nvpair(features, NULL); 1012 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 1013 char *propname; 1014 boolean_t found; 1015 1016 if (zfeature_is_supported(nvpair_name(nvp))) 1017 continue; 1018 1019 propname = zfs_asprintf(hdl, "unsupported@%s", 1020 nvpair_name(nvp)); 1021 1022 /* 1023 * Before adding the property to the list make sure that no 1024 * other pool already added the same property. 1025 */ 1026 found = B_FALSE; 1027 entry = *plp; 1028 while (entry != NULL) { 1029 if (entry->pl_user_prop != NULL && 1030 strcmp(propname, entry->pl_user_prop) == 0) { 1031 found = B_TRUE; 1032 break; 1033 } 1034 entry = entry->pl_next; 1035 } 1036 if (found) { 1037 free(propname); 1038 continue; 1039 } 1040 1041 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 1042 entry->pl_prop = ZPROP_USERPROP; 1043 entry->pl_user_prop = propname; 1044 entry->pl_width = strlen(entry->pl_user_prop); 1045 entry->pl_all = B_TRUE; 1046 1047 *last = entry; 1048 last = &entry->pl_next; 1049 } 1050 1051 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 1052 if (entry->pl_fixed && !literal) 1053 continue; 1054 1055 if (entry->pl_prop != ZPROP_USERPROP && 1056 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 1057 NULL, literal) == 0) { 1058 if (strlen(buf) > entry->pl_width) 1059 entry->pl_width = strlen(buf); 1060 } else if (entry->pl_prop == ZPROP_INVAL && 1061 zfs_prop_user(entry->pl_user_prop) && 1062 zpool_get_userprop(zhp, entry->pl_user_prop, buf, 1063 sizeof (buf), NULL) == 0) { 1064 if (strlen(buf) > entry->pl_width) 1065 entry->pl_width = strlen(buf); 1066 } 1067 } 1068 1069 return (0); 1070 } 1071 1072 int 1073 vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname, 1074 zprop_list_t **plp) 1075 { 1076 zprop_list_t *entry; 1077 char buf[ZFS_MAXPROPLEN]; 1078 const char *strval = NULL; 1079 int err = 0; 1080 nvpair_t *elem = NULL; 1081 nvlist_t *vprops = NULL; 1082 nvlist_t *propval = NULL; 1083 const char *propname; 1084 vdev_prop_t prop; 1085 zprop_list_t **last; 1086 1087 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 1088 if (entry->pl_fixed) 1089 continue; 1090 1091 if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop, 1092 entry->pl_user_prop, buf, sizeof (buf), NULL, 1093 B_FALSE) == 0) { 1094 if (strlen(buf) > entry->pl_width) 1095 entry->pl_width = strlen(buf); 1096 } 1097 if (entry->pl_prop == VDEV_PROP_NAME && 1098 strlen(vdevname) > entry->pl_width) 1099 entry->pl_width = strlen(vdevname); 1100 } 1101 1102 /* Handle the all properties case */ 1103 last = plp; 1104 if (*last != NULL && (*last)->pl_all == B_TRUE) { 1105 while (*last != NULL) 1106 last = &(*last)->pl_next; 1107 1108 err = zpool_get_all_vdev_props(zhp, vdevname, &vprops); 1109 if (err != 0) 1110 return (err); 1111 1112 while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) { 1113 propname = nvpair_name(elem); 1114 1115 /* Skip properties that are not user defined */ 1116 if ((prop = vdev_name_to_prop(propname)) != 1117 VDEV_PROP_USERPROP) 1118 continue; 1119 1120 if (nvpair_value_nvlist(elem, &propval) != 0) 1121 continue; 1122 1123 strval = fnvlist_lookup_string(propval, ZPROP_VALUE); 1124 1125 entry = zfs_alloc(zhp->zpool_hdl, 1126 sizeof (zprop_list_t)); 1127 entry->pl_prop = prop; 1128 entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl, 1129 propname); 1130 entry->pl_width = strlen(strval); 1131 entry->pl_all = B_TRUE; 1132 *last = entry; 1133 last = &entry->pl_next; 1134 } 1135 } 1136 1137 return (0); 1138 } 1139 1140 /* 1141 * Get the state for the given feature on the given ZFS pool. 1142 */ 1143 int 1144 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 1145 size_t len) 1146 { 1147 uint64_t refcount; 1148 boolean_t found = B_FALSE; 1149 nvlist_t *features = zpool_get_features(zhp); 1150 boolean_t supported; 1151 const char *feature = strchr(propname, '@') + 1; 1152 1153 supported = zpool_prop_feature(propname); 1154 ASSERT(supported || zpool_prop_unsupported(propname)); 1155 1156 /* 1157 * Convert from feature name to feature guid. This conversion is 1158 * unnecessary for unsupported@... properties because they already 1159 * use guids. 1160 */ 1161 if (supported) { 1162 int ret; 1163 spa_feature_t fid; 1164 1165 ret = zfeature_lookup_name(feature, &fid); 1166 if (ret != 0) { 1167 (void) strlcpy(buf, "-", len); 1168 return (ENOTSUP); 1169 } 1170 feature = spa_feature_table[fid].fi_guid; 1171 } 1172 1173 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 1174 found = B_TRUE; 1175 1176 if (supported) { 1177 if (!found) { 1178 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 1179 } else { 1180 if (refcount == 0) 1181 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 1182 else 1183 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 1184 } 1185 } else { 1186 if (found) { 1187 if (refcount == 0) { 1188 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 1189 } else { 1190 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 1191 } 1192 } else { 1193 (void) strlcpy(buf, "-", len); 1194 return (ENOTSUP); 1195 } 1196 } 1197 1198 return (0); 1199 } 1200 1201 /* 1202 * Validate the given pool name, optionally putting an extended error message in 1203 * 'buf'. 1204 */ 1205 boolean_t 1206 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 1207 { 1208 namecheck_err_t why; 1209 char what; 1210 int ret; 1211 1212 ret = pool_namecheck(pool, &why, &what); 1213 1214 /* 1215 * The rules for reserved pool names were extended at a later point. 1216 * But we need to support users with existing pools that may now be 1217 * invalid. So we only check for this expanded set of names during a 1218 * create (or import), and only in userland. 1219 */ 1220 if (ret == 0 && !isopen && 1221 (strncmp(pool, "mirror", 6) == 0 || 1222 strncmp(pool, "raidz", 5) == 0 || 1223 strncmp(pool, "draid", 5) == 0 || 1224 strncmp(pool, "spare", 5) == 0 || 1225 strcmp(pool, "log") == 0)) { 1226 if (hdl != NULL) 1227 zfs_error_aux(hdl, 1228 dgettext(TEXT_DOMAIN, "name is reserved")); 1229 return (B_FALSE); 1230 } 1231 1232 1233 if (ret != 0) { 1234 if (hdl != NULL) { 1235 switch (why) { 1236 case NAME_ERR_TOOLONG: 1237 zfs_error_aux(hdl, 1238 dgettext(TEXT_DOMAIN, "name is too long")); 1239 break; 1240 1241 case NAME_ERR_INVALCHAR: 1242 zfs_error_aux(hdl, 1243 dgettext(TEXT_DOMAIN, "invalid character " 1244 "'%c' in pool name"), what); 1245 break; 1246 1247 case NAME_ERR_NOLETTER: 1248 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1249 "name must begin with a letter")); 1250 break; 1251 1252 case NAME_ERR_RESERVED: 1253 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1254 "name is reserved")); 1255 break; 1256 1257 case NAME_ERR_DISKLIKE: 1258 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1259 "pool name is reserved")); 1260 break; 1261 1262 case NAME_ERR_LEADING_SLASH: 1263 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1264 "leading slash in name")); 1265 break; 1266 1267 case NAME_ERR_EMPTY_COMPONENT: 1268 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1269 "empty component in name")); 1270 break; 1271 1272 case NAME_ERR_TRAILING_SLASH: 1273 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1274 "trailing slash in name")); 1275 break; 1276 1277 case NAME_ERR_MULTIPLE_DELIMITERS: 1278 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1279 "multiple '@' and/or '#' delimiters in " 1280 "name")); 1281 break; 1282 1283 case NAME_ERR_NO_AT: 1284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1285 "permission set is missing '@'")); 1286 break; 1287 1288 default: 1289 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1290 "(%d) not defined"), why); 1291 break; 1292 } 1293 } 1294 return (B_FALSE); 1295 } 1296 1297 return (B_TRUE); 1298 } 1299 1300 /* 1301 * Open a handle to the given pool, even if the pool is currently in the FAULTED 1302 * state. 1303 */ 1304 zpool_handle_t * 1305 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 1306 { 1307 zpool_handle_t *zhp; 1308 boolean_t missing; 1309 1310 /* 1311 * Make sure the pool name is valid. 1312 */ 1313 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 1314 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1315 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1316 pool); 1317 return (NULL); 1318 } 1319 1320 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t)); 1321 1322 zhp->zpool_hdl = hdl; 1323 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1324 1325 if (zpool_refresh_stats(zhp, &missing) != 0) { 1326 zpool_close(zhp); 1327 return (NULL); 1328 } 1329 1330 if (missing) { 1331 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1332 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1333 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1334 zpool_close(zhp); 1335 return (NULL); 1336 } 1337 1338 return (zhp); 1339 } 1340 1341 /* 1342 * Like the above, but silent on error. Used when iterating over pools (because 1343 * the configuration cache may be out of date). 1344 */ 1345 int 1346 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1347 { 1348 zpool_handle_t *zhp; 1349 boolean_t missing; 1350 1351 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t)); 1352 1353 zhp->zpool_hdl = hdl; 1354 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1355 1356 if (zpool_refresh_stats(zhp, &missing) != 0) { 1357 zpool_close(zhp); 1358 return (-1); 1359 } 1360 1361 if (missing) { 1362 zpool_close(zhp); 1363 *ret = NULL; 1364 return (0); 1365 } 1366 1367 *ret = zhp; 1368 return (0); 1369 } 1370 1371 /* 1372 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1373 * state. 1374 */ 1375 zpool_handle_t * 1376 zpool_open(libzfs_handle_t *hdl, const char *pool) 1377 { 1378 zpool_handle_t *zhp; 1379 1380 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1381 return (NULL); 1382 1383 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1384 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1385 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1386 zpool_close(zhp); 1387 return (NULL); 1388 } 1389 1390 return (zhp); 1391 } 1392 1393 /* 1394 * Close the handle. Simply frees the memory associated with the handle. 1395 */ 1396 void 1397 zpool_close(zpool_handle_t *zhp) 1398 { 1399 nvlist_free(zhp->zpool_config); 1400 nvlist_free(zhp->zpool_old_config); 1401 nvlist_free(zhp->zpool_props); 1402 free(zhp); 1403 } 1404 1405 /* 1406 * Return the name of the pool. 1407 */ 1408 const char * 1409 zpool_get_name(zpool_handle_t *zhp) 1410 { 1411 return (zhp->zpool_name); 1412 } 1413 1414 1415 /* 1416 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1417 */ 1418 int 1419 zpool_get_state(zpool_handle_t *zhp) 1420 { 1421 return (zhp->zpool_state); 1422 } 1423 1424 /* 1425 * Check if vdev list contains a special vdev 1426 */ 1427 static boolean_t 1428 zpool_has_special_vdev(nvlist_t *nvroot) 1429 { 1430 nvlist_t **child; 1431 uint_t children; 1432 1433 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child, 1434 &children) == 0) { 1435 for (uint_t c = 0; c < children; c++) { 1436 const char *bias; 1437 1438 if (nvlist_lookup_string(child[c], 1439 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 && 1440 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) { 1441 return (B_TRUE); 1442 } 1443 } 1444 } 1445 return (B_FALSE); 1446 } 1447 1448 /* 1449 * Check if vdev list contains a dRAID vdev 1450 */ 1451 static boolean_t 1452 zpool_has_draid_vdev(nvlist_t *nvroot) 1453 { 1454 nvlist_t **child; 1455 uint_t children; 1456 1457 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1458 &child, &children) == 0) { 1459 for (uint_t c = 0; c < children; c++) { 1460 const char *type; 1461 1462 if (nvlist_lookup_string(child[c], 1463 ZPOOL_CONFIG_TYPE, &type) == 0 && 1464 strcmp(type, VDEV_TYPE_DRAID) == 0) { 1465 return (B_TRUE); 1466 } 1467 } 1468 } 1469 return (B_FALSE); 1470 } 1471 1472 /* 1473 * Output a dRAID top-level vdev name in to the provided buffer. 1474 */ 1475 static char * 1476 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity, 1477 uint64_t spares, uint64_t children) 1478 { 1479 snprintf(name, len, "%s%llu:%llud:%lluc:%llus", 1480 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data, 1481 (u_longlong_t)children, (u_longlong_t)spares); 1482 1483 return (name); 1484 } 1485 1486 /* 1487 * Return B_TRUE if the provided name is a dRAID spare name. 1488 */ 1489 boolean_t 1490 zpool_is_draid_spare(const char *name) 1491 { 1492 uint64_t spare_id, parity, vdev_id; 1493 1494 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu", 1495 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id, 1496 (u_longlong_t *)&spare_id) == 3) { 1497 return (B_TRUE); 1498 } 1499 1500 return (B_FALSE); 1501 } 1502 1503 /* 1504 * Create the named pool, using the provided vdev list. It is assumed 1505 * that the consumer has already validated the contents of the nvlist, so we 1506 * don't have to worry about error semantics. 1507 */ 1508 int 1509 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1510 nvlist_t *props, nvlist_t *fsprops) 1511 { 1512 zfs_cmd_t zc = {"\0"}; 1513 nvlist_t *zc_fsprops = NULL; 1514 nvlist_t *zc_props = NULL; 1515 nvlist_t *hidden_args = NULL; 1516 uint8_t *wkeydata = NULL; 1517 uint_t wkeylen = 0; 1518 char errbuf[ERRBUFLEN]; 1519 int ret = -1; 1520 1521 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1522 "cannot create '%s'"), pool); 1523 1524 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1525 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); 1526 1527 zcmd_write_conf_nvlist(hdl, &zc, nvroot); 1528 1529 if (props) { 1530 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1531 1532 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1533 SPA_VERSION_1, flags, errbuf)) == NULL) { 1534 goto create_failed; 1535 } 1536 } 1537 1538 if (fsprops) { 1539 uint64_t zoned; 1540 const char *zonestr; 1541 1542 zoned = ((nvlist_lookup_string(fsprops, 1543 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1544 strcmp(zonestr, "on") == 0); 1545 1546 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1547 fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) { 1548 goto create_failed; 1549 } 1550 1551 if (nvlist_exists(zc_fsprops, 1552 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) && 1553 !zpool_has_special_vdev(nvroot)) { 1554 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1555 "%s property requires a special vdev"), 1556 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)); 1557 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 1558 goto create_failed; 1559 } 1560 1561 if (!zc_props && 1562 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1563 goto create_failed; 1564 } 1565 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE, 1566 &wkeydata, &wkeylen) != 0) { 1567 zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf); 1568 goto create_failed; 1569 } 1570 if (nvlist_add_nvlist(zc_props, 1571 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1572 goto create_failed; 1573 } 1574 if (wkeydata != NULL) { 1575 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0) 1576 goto create_failed; 1577 1578 if (nvlist_add_uint8_array(hidden_args, "wkeydata", 1579 wkeydata, wkeylen) != 0) 1580 goto create_failed; 1581 1582 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS, 1583 hidden_args) != 0) 1584 goto create_failed; 1585 } 1586 } 1587 1588 if (zc_props) 1589 zcmd_write_src_nvlist(hdl, &zc, zc_props); 1590 1591 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1592 1593 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1594 1595 zcmd_free_nvlists(&zc); 1596 nvlist_free(zc_props); 1597 nvlist_free(zc_fsprops); 1598 nvlist_free(hidden_args); 1599 if (wkeydata != NULL) 1600 free(wkeydata); 1601 1602 switch (errno) { 1603 case EBUSY: 1604 /* 1605 * This can happen if the user has specified the same 1606 * device multiple times. We can't reliably detect this 1607 * until we try to add it and see we already have a 1608 * label. This can also happen under if the device is 1609 * part of an active md or lvm device. 1610 */ 1611 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1612 "one or more vdevs refer to the same device, or " 1613 "one of\nthe devices is part of an active md or " 1614 "lvm device")); 1615 return (zfs_error(hdl, EZFS_BADDEV, errbuf)); 1616 1617 case ERANGE: 1618 /* 1619 * This happens if the record size is smaller or larger 1620 * than the allowed size range, or not a power of 2. 1621 * 1622 * NOTE: although zfs_valid_proplist is called earlier, 1623 * this case may have slipped through since the 1624 * pool does not exist yet and it is therefore 1625 * impossible to read properties e.g. max blocksize 1626 * from the pool. 1627 */ 1628 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1629 "record size invalid")); 1630 return (zfs_error(hdl, EZFS_BADPROP, errbuf)); 1631 1632 case EOVERFLOW: 1633 /* 1634 * This occurs when one of the devices is below 1635 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1636 * device was the problem device since there's no 1637 * reliable way to determine device size from userland. 1638 */ 1639 { 1640 char buf[64]; 1641 1642 zfs_nicebytes(SPA_MINDEVSIZE, buf, 1643 sizeof (buf)); 1644 1645 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1646 "one or more devices is less than the " 1647 "minimum size (%s)"), buf); 1648 } 1649 return (zfs_error(hdl, EZFS_BADDEV, errbuf)); 1650 1651 case ENOSPC: 1652 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1653 "one or more devices is out of space")); 1654 return (zfs_error(hdl, EZFS_BADDEV, errbuf)); 1655 1656 case EINVAL: 1657 if (zpool_has_draid_vdev(nvroot) && 1658 zfeature_lookup_name("draid", NULL) != 0) { 1659 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1660 "dRAID vdevs are unsupported by the " 1661 "kernel")); 1662 return (zfs_error(hdl, EZFS_BADDEV, errbuf)); 1663 } else { 1664 return (zpool_standard_error(hdl, errno, 1665 errbuf)); 1666 } 1667 1668 default: 1669 return (zpool_standard_error(hdl, errno, errbuf)); 1670 } 1671 } 1672 1673 create_failed: 1674 zcmd_free_nvlists(&zc); 1675 nvlist_free(zc_props); 1676 nvlist_free(zc_fsprops); 1677 nvlist_free(hidden_args); 1678 if (wkeydata != NULL) 1679 free(wkeydata); 1680 return (ret); 1681 } 1682 1683 /* 1684 * Destroy the given pool. It is up to the caller to ensure that there are no 1685 * datasets left in the pool. 1686 */ 1687 int 1688 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1689 { 1690 zfs_cmd_t zc = {"\0"}; 1691 zfs_handle_t *zfp = NULL; 1692 libzfs_handle_t *hdl = zhp->zpool_hdl; 1693 char errbuf[ERRBUFLEN]; 1694 1695 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1696 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1697 return (-1); 1698 1699 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1700 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1701 1702 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1703 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1704 "cannot destroy '%s'"), zhp->zpool_name); 1705 1706 if (errno == EROFS) { 1707 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1708 "one or more devices is read only")); 1709 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 1710 } else { 1711 (void) zpool_standard_error(hdl, errno, errbuf); 1712 } 1713 1714 if (zfp) 1715 zfs_close(zfp); 1716 return (-1); 1717 } 1718 1719 if (zfp) { 1720 remove_mountpoint(zfp); 1721 zfs_close(zfp); 1722 } 1723 1724 return (0); 1725 } 1726 1727 /* 1728 * Create a checkpoint in the given pool. 1729 */ 1730 int 1731 zpool_checkpoint(zpool_handle_t *zhp) 1732 { 1733 libzfs_handle_t *hdl = zhp->zpool_hdl; 1734 char errbuf[ERRBUFLEN]; 1735 int error; 1736 1737 error = lzc_pool_checkpoint(zhp->zpool_name); 1738 if (error != 0) { 1739 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1740 "cannot checkpoint '%s'"), zhp->zpool_name); 1741 (void) zpool_standard_error(hdl, error, errbuf); 1742 return (-1); 1743 } 1744 1745 return (0); 1746 } 1747 1748 /* 1749 * Discard the checkpoint from the given pool. 1750 */ 1751 int 1752 zpool_discard_checkpoint(zpool_handle_t *zhp) 1753 { 1754 libzfs_handle_t *hdl = zhp->zpool_hdl; 1755 char errbuf[ERRBUFLEN]; 1756 int error; 1757 1758 error = lzc_pool_checkpoint_discard(zhp->zpool_name); 1759 if (error != 0) { 1760 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1761 "cannot discard checkpoint in '%s'"), zhp->zpool_name); 1762 (void) zpool_standard_error(hdl, error, errbuf); 1763 return (-1); 1764 } 1765 1766 return (0); 1767 } 1768 1769 /* 1770 * Load data type for the given pool. 1771 */ 1772 int 1773 zpool_prefetch(zpool_handle_t *zhp, zpool_prefetch_type_t type) 1774 { 1775 libzfs_handle_t *hdl = zhp->zpool_hdl; 1776 char msg[1024]; 1777 int error; 1778 1779 error = lzc_pool_prefetch(zhp->zpool_name, type); 1780 if (error != 0) { 1781 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1782 "cannot prefetch %s in '%s'"), 1783 type == ZPOOL_PREFETCH_DDT ? "ddt" : "", zhp->zpool_name); 1784 (void) zpool_standard_error(hdl, error, msg); 1785 return (-1); 1786 } 1787 1788 return (0); 1789 } 1790 1791 /* 1792 * Add the given vdevs to the pool. The caller must have already performed the 1793 * necessary verification to ensure that the vdev specification is well-formed. 1794 */ 1795 int 1796 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot, boolean_t check_ashift) 1797 { 1798 zfs_cmd_t zc = {"\0"}; 1799 int ret; 1800 libzfs_handle_t *hdl = zhp->zpool_hdl; 1801 char errbuf[ERRBUFLEN]; 1802 nvlist_t **spares, **l2cache; 1803 uint_t nspares, nl2cache; 1804 1805 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1806 "cannot add to '%s'"), zhp->zpool_name); 1807 1808 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1809 SPA_VERSION_SPARES && 1810 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1811 &spares, &nspares) == 0) { 1812 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1813 "upgraded to add hot spares")); 1814 return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); 1815 } 1816 1817 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1818 SPA_VERSION_L2CACHE && 1819 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1820 &l2cache, &nl2cache) == 0) { 1821 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1822 "upgraded to add cache devices")); 1823 return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); 1824 } 1825 1826 zcmd_write_conf_nvlist(hdl, &zc, nvroot); 1827 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1828 zc.zc_flags = check_ashift; 1829 1830 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1831 switch (errno) { 1832 case EBUSY: 1833 /* 1834 * This can happen if the user has specified the same 1835 * device multiple times. We can't reliably detect this 1836 * until we try to add it and see we already have a 1837 * label. 1838 */ 1839 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1840 "one or more vdevs refer to the same device")); 1841 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 1842 break; 1843 1844 case EINVAL: 1845 1846 if (zpool_has_draid_vdev(nvroot) && 1847 zfeature_lookup_name("draid", NULL) != 0) { 1848 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1849 "dRAID vdevs are unsupported by the " 1850 "kernel")); 1851 } else { 1852 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1853 "invalid config; a pool with removing/" 1854 "removed vdevs does not support adding " 1855 "raidz or dRAID vdevs")); 1856 } 1857 1858 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 1859 break; 1860 1861 case EOVERFLOW: 1862 /* 1863 * This occurs when one of the devices is below 1864 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1865 * device was the problem device since there's no 1866 * reliable way to determine device size from userland. 1867 */ 1868 { 1869 char buf[64]; 1870 1871 zfs_nicebytes(SPA_MINDEVSIZE, buf, 1872 sizeof (buf)); 1873 1874 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1875 "device is less than the minimum " 1876 "size (%s)"), buf); 1877 } 1878 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 1879 break; 1880 1881 case ENOTSUP: 1882 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1883 "pool must be upgraded to add these vdevs")); 1884 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 1885 break; 1886 1887 default: 1888 (void) zpool_standard_error(hdl, errno, errbuf); 1889 } 1890 1891 ret = -1; 1892 } else { 1893 ret = 0; 1894 } 1895 1896 zcmd_free_nvlists(&zc); 1897 1898 return (ret); 1899 } 1900 1901 /* 1902 * Exports the pool from the system. The caller must ensure that there are no 1903 * mounted datasets in the pool. 1904 */ 1905 static int 1906 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1907 const char *log_str) 1908 { 1909 zfs_cmd_t zc = {"\0"}; 1910 1911 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1912 zc.zc_cookie = force; 1913 zc.zc_guid = hardforce; 1914 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1915 1916 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1917 switch (errno) { 1918 case EXDEV: 1919 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1920 "use '-f' to override the following errors:\n" 1921 "'%s' has an active shared spare which could be" 1922 " used by other pools once '%s' is exported."), 1923 zhp->zpool_name, zhp->zpool_name); 1924 return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1925 dgettext(TEXT_DOMAIN, "cannot export '%s'"), 1926 zhp->zpool_name)); 1927 default: 1928 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1929 dgettext(TEXT_DOMAIN, "cannot export '%s'"), 1930 zhp->zpool_name)); 1931 } 1932 } 1933 1934 return (0); 1935 } 1936 1937 int 1938 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1939 { 1940 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1941 } 1942 1943 int 1944 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1945 { 1946 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1947 } 1948 1949 static void 1950 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1951 nvlist_t *config) 1952 { 1953 nvlist_t *nv = NULL; 1954 uint64_t rewindto; 1955 int64_t loss = -1; 1956 struct tm t; 1957 char timestr[128]; 1958 1959 if (!hdl->libzfs_printerr || config == NULL) 1960 return; 1961 1962 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1963 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1964 return; 1965 } 1966 1967 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1968 return; 1969 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1970 1971 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1972 ctime_r((time_t *)&rewindto, timestr) != NULL) { 1973 timestr[24] = 0; 1974 if (dryrun) { 1975 (void) printf(dgettext(TEXT_DOMAIN, 1976 "Would be able to return %s " 1977 "to its state as of %s.\n"), 1978 name, timestr); 1979 } else { 1980 (void) printf(dgettext(TEXT_DOMAIN, 1981 "Pool %s returned to its state as of %s.\n"), 1982 name, timestr); 1983 } 1984 if (loss > 120) { 1985 (void) printf(dgettext(TEXT_DOMAIN, 1986 "%s approximately %lld "), 1987 dryrun ? "Would discard" : "Discarded", 1988 ((longlong_t)loss + 30) / 60); 1989 (void) printf(dgettext(TEXT_DOMAIN, 1990 "minutes of transactions.\n")); 1991 } else if (loss > 0) { 1992 (void) printf(dgettext(TEXT_DOMAIN, 1993 "%s approximately %lld "), 1994 dryrun ? "Would discard" : "Discarded", 1995 (longlong_t)loss); 1996 (void) printf(dgettext(TEXT_DOMAIN, 1997 "seconds of transactions.\n")); 1998 } 1999 } 2000 } 2001 2002 void 2003 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 2004 nvlist_t *config, char *buf, size_t size) 2005 { 2006 nvlist_t *nv = NULL; 2007 int64_t loss = -1; 2008 uint64_t edata = UINT64_MAX; 2009 uint64_t rewindto; 2010 struct tm t; 2011 char timestr[128], temp[1024]; 2012 2013 if (!hdl->libzfs_printerr) 2014 return; 2015 2016 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 2017 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 2018 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 2019 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 2020 goto no_info; 2021 2022 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 2023 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 2024 &edata); 2025 2026 (void) snprintf(buf, size, dgettext(TEXT_DOMAIN, 2027 "Recovery is possible, but will result in some data loss.\n")); 2028 2029 if (localtime_r((time_t *)&rewindto, &t) != NULL && 2030 ctime_r((time_t *)&rewindto, timestr) != NULL) { 2031 timestr[24] = 0; 2032 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, 2033 "\tReturning the pool to its state as of %s\n" 2034 "\tshould correct the problem. "), timestr); 2035 (void) strlcat(buf, temp, size); 2036 } else { 2037 (void) strlcat(buf, dgettext(TEXT_DOMAIN, 2038 "\tReverting the pool to an earlier state " 2039 "should correct the problem.\n\t"), size); 2040 } 2041 2042 if (loss > 120) { 2043 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, 2044 "Approximately %lld minutes of data\n" 2045 "\tmust be discarded, irreversibly. "), 2046 ((longlong_t)loss + 30) / 60); 2047 (void) strlcat(buf, temp, size); 2048 } else if (loss > 0) { 2049 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, 2050 "Approximately %lld seconds of data\n" 2051 "\tmust be discarded, irreversibly. "), 2052 (longlong_t)loss); 2053 (void) strlcat(buf, temp, size); 2054 } 2055 if (edata != 0 && edata != UINT64_MAX) { 2056 if (edata == 1) { 2057 (void) strlcat(buf, dgettext(TEXT_DOMAIN, 2058 "After rewind, at least\n" 2059 "\tone persistent user-data error will remain. "), 2060 size); 2061 } else { 2062 (void) strlcat(buf, dgettext(TEXT_DOMAIN, 2063 "After rewind, several\n" 2064 "\tpersistent user-data errors will remain. "), 2065 size); 2066 } 2067 } 2068 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, 2069 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 2070 reason >= 0 ? "clear" : "import", name); 2071 (void) strlcat(buf, temp, size); 2072 2073 (void) strlcat(buf, dgettext(TEXT_DOMAIN, 2074 "A scrub of the pool\n" 2075 "\tis strongly recommended after recovery.\n"), size); 2076 return; 2077 2078 no_info: 2079 (void) strlcat(buf, dgettext(TEXT_DOMAIN, 2080 "Destroy and re-create the pool from\n\ta backup source.\n"), size); 2081 } 2082 2083 /* 2084 * zpool_import() is a contracted interface. Should be kept the same 2085 * if possible. 2086 * 2087 * Applications should use zpool_import_props() to import a pool with 2088 * new properties value to be set. 2089 */ 2090 int 2091 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 2092 char *altroot) 2093 { 2094 nvlist_t *props = NULL; 2095 int ret; 2096 2097 if (altroot != NULL) { 2098 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 2099 return (zfs_error_fmt(hdl, EZFS_NOMEM, 2100 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 2101 newname)); 2102 } 2103 2104 if (nvlist_add_string(props, 2105 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 2106 nvlist_add_string(props, 2107 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 2108 nvlist_free(props); 2109 return (zfs_error_fmt(hdl, EZFS_NOMEM, 2110 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 2111 newname)); 2112 } 2113 } 2114 2115 ret = zpool_import_props(hdl, config, newname, props, 2116 ZFS_IMPORT_NORMAL); 2117 nvlist_free(props); 2118 return (ret); 2119 } 2120 2121 static void 2122 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 2123 int indent) 2124 { 2125 nvlist_t **child; 2126 uint_t c, children; 2127 char *vname; 2128 uint64_t is_log = 0; 2129 2130 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 2131 &is_log); 2132 2133 if (name != NULL) 2134 (void) printf("\t%*s%s%s\n", indent, "", name, 2135 is_log ? " [log]" : ""); 2136 2137 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2138 &child, &children) != 0) 2139 return; 2140 2141 for (c = 0; c < children; c++) { 2142 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID); 2143 print_vdev_tree(hdl, vname, child[c], indent + 2); 2144 free(vname); 2145 } 2146 } 2147 2148 void 2149 zpool_collect_unsup_feat(nvlist_t *config, char *buf, size_t size) 2150 { 2151 nvlist_t *nvinfo, *unsup_feat; 2152 char temp[512]; 2153 2154 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 2155 unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT); 2156 2157 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); 2158 nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 2159 const char *desc = fnvpair_value_string(nvp); 2160 if (strlen(desc) > 0) { 2161 (void) snprintf(temp, 512, "\t%s (%s)\n", 2162 nvpair_name(nvp), desc); 2163 (void) strlcat(buf, temp, size); 2164 } else { 2165 (void) snprintf(temp, 512, "\t%s\n", nvpair_name(nvp)); 2166 (void) strlcat(buf, temp, size); 2167 } 2168 } 2169 } 2170 2171 /* 2172 * Import the given pool using the known configuration and a list of 2173 * properties to be set. The configuration should have come from 2174 * zpool_find_import(). The 'newname' parameters control whether the pool 2175 * is imported with a different name. 2176 */ 2177 int 2178 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 2179 nvlist_t *props, int flags) 2180 { 2181 zfs_cmd_t zc = {"\0"}; 2182 zpool_load_policy_t policy; 2183 nvlist_t *nv = NULL; 2184 nvlist_t *nvinfo = NULL; 2185 nvlist_t *missing = NULL; 2186 const char *thename; 2187 const char *origname; 2188 int ret; 2189 int error = 0; 2190 char buf[2048]; 2191 char errbuf[ERRBUFLEN]; 2192 2193 origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME); 2194 2195 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 2196 "cannot import pool '%s'"), origname); 2197 2198 if (newname != NULL) { 2199 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2200 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 2201 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 2202 newname)); 2203 thename = newname; 2204 } else { 2205 thename = origname; 2206 } 2207 2208 if (props != NULL) { 2209 uint64_t version; 2210 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2211 2212 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION); 2213 2214 if ((props = zpool_valid_proplist(hdl, origname, 2215 props, version, flags, errbuf)) == NULL) 2216 return (-1); 2217 zcmd_write_src_nvlist(hdl, &zc, props); 2218 nvlist_free(props); 2219 } 2220 2221 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 2222 2223 zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID); 2224 2225 zcmd_write_conf_nvlist(hdl, &zc, config); 2226 zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2); 2227 2228 zc.zc_cookie = flags; 2229 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 2230 errno == ENOMEM) 2231 zcmd_expand_dst_nvlist(hdl, &zc); 2232 if (ret != 0) 2233 error = errno; 2234 2235 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 2236 2237 zcmd_free_nvlists(&zc); 2238 2239 zpool_get_load_policy(config, &policy); 2240 2241 if (error) { 2242 char desc[1024]; 2243 char aux[256]; 2244 2245 /* 2246 * Dry-run failed, but we print out what success 2247 * looks like if we found a best txg 2248 */ 2249 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) { 2250 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2251 B_TRUE, nv); 2252 nvlist_free(nv); 2253 return (-1); 2254 } 2255 2256 if (newname == NULL) 2257 (void) snprintf(desc, sizeof (desc), 2258 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 2259 thename); 2260 else 2261 (void) snprintf(desc, sizeof (desc), 2262 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 2263 origname, thename); 2264 2265 switch (error) { 2266 case ENOTSUP: 2267 if (nv != NULL && nvlist_lookup_nvlist(nv, 2268 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 2269 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 2270 (void) printf(dgettext(TEXT_DOMAIN, "This " 2271 "pool uses the following feature(s) not " 2272 "supported by this system:\n")); 2273 memset(buf, 0, 2048); 2274 zpool_collect_unsup_feat(nv, buf, 2048); 2275 (void) printf("%s", buf); 2276 if (nvlist_exists(nvinfo, 2277 ZPOOL_CONFIG_CAN_RDONLY)) { 2278 (void) printf(dgettext(TEXT_DOMAIN, 2279 "All unsupported features are only " 2280 "required for writing to the pool." 2281 "\nThe pool can be imported using " 2282 "'-o readonly=on'.\n")); 2283 } 2284 } 2285 /* 2286 * Unsupported version. 2287 */ 2288 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 2289 break; 2290 2291 case EREMOTEIO: 2292 if (nv != NULL && nvlist_lookup_nvlist(nv, 2293 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) { 2294 const char *hostname = "<unknown>"; 2295 uint64_t hostid = 0; 2296 mmp_state_t mmp_state; 2297 2298 mmp_state = fnvlist_lookup_uint64(nvinfo, 2299 ZPOOL_CONFIG_MMP_STATE); 2300 2301 if (nvlist_exists(nvinfo, 2302 ZPOOL_CONFIG_MMP_HOSTNAME)) 2303 hostname = fnvlist_lookup_string(nvinfo, 2304 ZPOOL_CONFIG_MMP_HOSTNAME); 2305 2306 if (nvlist_exists(nvinfo, 2307 ZPOOL_CONFIG_MMP_HOSTID)) 2308 hostid = fnvlist_lookup_uint64(nvinfo, 2309 ZPOOL_CONFIG_MMP_HOSTID); 2310 2311 if (mmp_state == MMP_STATE_ACTIVE) { 2312 (void) snprintf(aux, sizeof (aux), 2313 dgettext(TEXT_DOMAIN, "pool is imp" 2314 "orted on host '%s' (hostid=%lx).\n" 2315 "Export the pool on the other " 2316 "system, then run 'zpool import'."), 2317 hostname, (unsigned long) hostid); 2318 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 2319 (void) snprintf(aux, sizeof (aux), 2320 dgettext(TEXT_DOMAIN, "pool has " 2321 "the multihost property on and " 2322 "the\nsystem's hostid is not set. " 2323 "Set a unique system hostid with " 2324 "the zgenhostid(8) command.\n")); 2325 } 2326 2327 (void) zfs_error_aux(hdl, "%s", aux); 2328 } 2329 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc); 2330 break; 2331 2332 case EINVAL: 2333 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 2334 break; 2335 2336 case EROFS: 2337 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2338 "one or more devices is read only")); 2339 (void) zfs_error(hdl, EZFS_BADDEV, desc); 2340 break; 2341 2342 case ENXIO: 2343 if (nv && nvlist_lookup_nvlist(nv, 2344 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 2345 nvlist_lookup_nvlist(nvinfo, 2346 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 2347 (void) printf(dgettext(TEXT_DOMAIN, 2348 "The devices below are missing or " 2349 "corrupted, use '-m' to import the pool " 2350 "anyway:\n")); 2351 print_vdev_tree(hdl, NULL, missing, 2); 2352 (void) printf("\n"); 2353 } 2354 (void) zpool_standard_error(hdl, error, desc); 2355 break; 2356 2357 case EEXIST: 2358 (void) zpool_standard_error(hdl, error, desc); 2359 break; 2360 2361 case EBUSY: 2362 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2363 "one or more devices are already in use\n")); 2364 (void) zfs_error(hdl, EZFS_BADDEV, desc); 2365 break; 2366 case ENAMETOOLONG: 2367 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2368 "new name of at least one dataset is longer than " 2369 "the maximum allowable length")); 2370 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 2371 break; 2372 default: 2373 (void) zpool_standard_error(hdl, error, desc); 2374 memset(buf, 0, 2048); 2375 zpool_explain_recover(hdl, 2376 newname ? origname : thename, -error, nv, 2377 buf, 2048); 2378 (void) printf("\t%s", buf); 2379 break; 2380 } 2381 2382 nvlist_free(nv); 2383 ret = -1; 2384 } else { 2385 zpool_handle_t *zhp; 2386 2387 /* 2388 * This should never fail, but play it safe anyway. 2389 */ 2390 if (zpool_open_silent(hdl, thename, &zhp) != 0) 2391 ret = -1; 2392 else if (zhp != NULL) 2393 zpool_close(zhp); 2394 if (policy.zlp_rewind & 2395 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2396 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2397 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv); 2398 } 2399 nvlist_free(nv); 2400 } 2401 2402 return (ret); 2403 } 2404 2405 /* 2406 * Translate vdev names to guids. If a vdev_path is determined to be 2407 * unsuitable then a vd_errlist is allocated and the vdev path and errno 2408 * are added to it. 2409 */ 2410 static int 2411 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds, 2412 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist) 2413 { 2414 nvlist_t *errlist = NULL; 2415 int error = 0; 2416 2417 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL; 2418 elem = nvlist_next_nvpair(vds, elem)) { 2419 boolean_t spare, cache; 2420 2421 const char *vd_path = nvpair_name(elem); 2422 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache, 2423 NULL); 2424 2425 if ((tgt == NULL) || cache || spare) { 2426 if (errlist == NULL) { 2427 errlist = fnvlist_alloc(); 2428 error = EINVAL; 2429 } 2430 2431 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE : 2432 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE); 2433 fnvlist_add_int64(errlist, vd_path, err); 2434 continue; 2435 } 2436 2437 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 2438 fnvlist_add_uint64(vdev_guids, vd_path, guid); 2439 2440 char msg[MAXNAMELEN]; 2441 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid); 2442 fnvlist_add_string(guids_to_paths, msg, vd_path); 2443 } 2444 2445 if (error != 0) { 2446 verify(errlist != NULL); 2447 if (vd_errlist != NULL) 2448 *vd_errlist = errlist; 2449 else 2450 fnvlist_free(errlist); 2451 } 2452 2453 return (error); 2454 } 2455 2456 static int 2457 xlate_init_err(int err) 2458 { 2459 switch (err) { 2460 case ENODEV: 2461 return (EZFS_NODEVICE); 2462 case EINVAL: 2463 case EROFS: 2464 return (EZFS_BADDEV); 2465 case EBUSY: 2466 return (EZFS_INITIALIZING); 2467 case ESRCH: 2468 return (EZFS_NO_INITIALIZE); 2469 } 2470 return (err); 2471 } 2472 2473 /* 2474 * Begin, suspend, cancel, or uninit (clear) the initialization (initializing 2475 * of all free blocks) for the given vdevs in the given pool. 2476 */ 2477 static int 2478 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2479 nvlist_t *vds, boolean_t wait) 2480 { 2481 int err; 2482 2483 nvlist_t *vdev_guids = fnvlist_alloc(); 2484 nvlist_t *guids_to_paths = fnvlist_alloc(); 2485 nvlist_t *vd_errlist = NULL; 2486 nvlist_t *errlist; 2487 nvpair_t *elem; 2488 2489 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2490 guids_to_paths, &vd_errlist); 2491 2492 if (err != 0) { 2493 verify(vd_errlist != NULL); 2494 goto list_errors; 2495 } 2496 2497 err = lzc_initialize(zhp->zpool_name, cmd_type, 2498 vdev_guids, &errlist); 2499 2500 if (err != 0) { 2501 if (errlist != NULL && nvlist_lookup_nvlist(errlist, 2502 ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) { 2503 goto list_errors; 2504 } 2505 2506 if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) { 2507 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 2508 "uninitialize is not supported by kernel")); 2509 } 2510 2511 (void) zpool_standard_error(zhp->zpool_hdl, err, 2512 dgettext(TEXT_DOMAIN, "operation failed")); 2513 goto out; 2514 } 2515 2516 if (wait) { 2517 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL; 2518 elem = nvlist_next_nvpair(vdev_guids, elem)) { 2519 2520 uint64_t guid = fnvpair_value_uint64(elem); 2521 2522 err = lzc_wait_tag(zhp->zpool_name, 2523 ZPOOL_WAIT_INITIALIZE, guid, NULL); 2524 if (err != 0) { 2525 (void) zpool_standard_error_fmt(zhp->zpool_hdl, 2526 err, dgettext(TEXT_DOMAIN, "error " 2527 "waiting for '%s' to initialize"), 2528 nvpair_name(elem)); 2529 2530 goto out; 2531 } 2532 } 2533 } 2534 goto out; 2535 2536 list_errors: 2537 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL; 2538 elem = nvlist_next_nvpair(vd_errlist, elem)) { 2539 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem)); 2540 const char *path; 2541 2542 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2543 &path) != 0) 2544 path = nvpair_name(elem); 2545 2546 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2547 "cannot initialize '%s'", path); 2548 } 2549 2550 out: 2551 fnvlist_free(vdev_guids); 2552 fnvlist_free(guids_to_paths); 2553 2554 if (vd_errlist != NULL) 2555 fnvlist_free(vd_errlist); 2556 2557 return (err == 0 ? 0 : -1); 2558 } 2559 2560 int 2561 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2562 nvlist_t *vds) 2563 { 2564 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE)); 2565 } 2566 2567 int 2568 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2569 nvlist_t *vds) 2570 { 2571 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE)); 2572 } 2573 2574 static int 2575 xlate_trim_err(int err) 2576 { 2577 switch (err) { 2578 case ENODEV: 2579 return (EZFS_NODEVICE); 2580 case EINVAL: 2581 case EROFS: 2582 return (EZFS_BADDEV); 2583 case EBUSY: 2584 return (EZFS_TRIMMING); 2585 case ESRCH: 2586 return (EZFS_NO_TRIM); 2587 case EOPNOTSUPP: 2588 return (EZFS_TRIM_NOTSUP); 2589 } 2590 return (err); 2591 } 2592 2593 static int 2594 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids) 2595 { 2596 int err; 2597 nvpair_t *elem; 2598 2599 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL; 2600 elem = nvlist_next_nvpair(vdev_guids, elem)) { 2601 2602 uint64_t guid = fnvpair_value_uint64(elem); 2603 2604 err = lzc_wait_tag(zhp->zpool_name, 2605 ZPOOL_WAIT_TRIM, guid, NULL); 2606 if (err != 0) { 2607 (void) zpool_standard_error_fmt(zhp->zpool_hdl, 2608 err, dgettext(TEXT_DOMAIN, "error " 2609 "waiting to trim '%s'"), nvpair_name(elem)); 2610 2611 return (err); 2612 } 2613 } 2614 return (0); 2615 } 2616 2617 /* 2618 * Check errlist and report any errors, omitting ones which should be 2619 * suppressed. Returns B_TRUE if any errors were reported. 2620 */ 2621 static boolean_t 2622 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags, 2623 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist) 2624 { 2625 nvpair_t *elem; 2626 boolean_t reported_errs = B_FALSE; 2627 int num_vds = 0; 2628 int num_suppressed_errs = 0; 2629 2630 for (elem = nvlist_next_nvpair(vds, NULL); 2631 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) { 2632 num_vds++; 2633 } 2634 2635 for (elem = nvlist_next_nvpair(errlist, NULL); 2636 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) { 2637 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem)); 2638 const char *path; 2639 2640 /* 2641 * If only the pool was specified, and it was not a secure 2642 * trim then suppress warnings for individual vdevs which 2643 * do not support trimming. 2644 */ 2645 if (vd_error == EZFS_TRIM_NOTSUP && 2646 trim_flags->fullpool && 2647 !trim_flags->secure) { 2648 num_suppressed_errs++; 2649 continue; 2650 } 2651 2652 reported_errs = B_TRUE; 2653 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2654 &path) != 0) 2655 path = nvpair_name(elem); 2656 2657 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2658 "cannot trim '%s'", path); 2659 } 2660 2661 if (num_suppressed_errs == num_vds) { 2662 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 2663 "no devices in pool support trim operations")); 2664 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP, 2665 dgettext(TEXT_DOMAIN, "cannot trim"))); 2666 reported_errs = B_TRUE; 2667 } 2668 2669 return (reported_errs); 2670 } 2671 2672 /* 2673 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for 2674 * the given vdevs in the given pool. 2675 */ 2676 int 2677 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds, 2678 trimflags_t *trim_flags) 2679 { 2680 int err; 2681 int retval = 0; 2682 2683 nvlist_t *vdev_guids = fnvlist_alloc(); 2684 nvlist_t *guids_to_paths = fnvlist_alloc(); 2685 nvlist_t *errlist = NULL; 2686 2687 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2688 guids_to_paths, &errlist); 2689 if (err != 0) { 2690 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist); 2691 retval = -1; 2692 goto out; 2693 } 2694 2695 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate, 2696 trim_flags->secure, vdev_guids, &errlist); 2697 if (err != 0) { 2698 nvlist_t *vd_errlist; 2699 if (errlist != NULL && nvlist_lookup_nvlist(errlist, 2700 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) { 2701 if (check_trim_errs(zhp, trim_flags, guids_to_paths, 2702 vds, vd_errlist)) { 2703 retval = -1; 2704 goto out; 2705 } 2706 } else { 2707 char errbuf[ERRBUFLEN]; 2708 2709 (void) snprintf(errbuf, sizeof (errbuf), 2710 dgettext(TEXT_DOMAIN, "operation failed")); 2711 zpool_standard_error(zhp->zpool_hdl, err, errbuf); 2712 retval = -1; 2713 goto out; 2714 } 2715 } 2716 2717 2718 if (trim_flags->wait) 2719 retval = zpool_trim_wait(zhp, vdev_guids); 2720 2721 out: 2722 if (errlist != NULL) 2723 fnvlist_free(errlist); 2724 fnvlist_free(vdev_guids); 2725 fnvlist_free(guids_to_paths); 2726 return (retval); 2727 } 2728 2729 /* 2730 * Scan the pool. 2731 */ 2732 int 2733 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) 2734 { 2735 char errbuf[ERRBUFLEN]; 2736 int err; 2737 libzfs_handle_t *hdl = zhp->zpool_hdl; 2738 2739 nvlist_t *args = fnvlist_alloc(); 2740 fnvlist_add_uint64(args, "scan_type", (uint64_t)func); 2741 fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd); 2742 2743 err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL); 2744 fnvlist_free(args); 2745 2746 if (err == 0) { 2747 return (0); 2748 } else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) { 2749 zfs_cmd_t zc = {"\0"}; 2750 (void) strlcpy(zc.zc_name, zhp->zpool_name, 2751 sizeof (zc.zc_name)); 2752 zc.zc_cookie = func; 2753 zc.zc_flags = cmd; 2754 2755 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0) 2756 return (0); 2757 } 2758 2759 /* 2760 * An ECANCELED on a scrub means one of the following: 2761 * 1. we resumed a paused scrub. 2762 * 2. we resumed a paused error scrub. 2763 * 3. Error scrub is not run because of no error log. 2764 */ 2765 if (err == ECANCELED && (func == POOL_SCAN_SCRUB || 2766 func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL) 2767 return (0); 2768 /* 2769 * The following cases have been handled here: 2770 * 1. Paused a scrub/error scrub if there is none in progress. 2771 */ 2772 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == 2773 POOL_SCRUB_PAUSE) { 2774 return (0); 2775 } 2776 2777 ASSERT3U(func, >=, POOL_SCAN_NONE); 2778 ASSERT3U(func, <, POOL_SCAN_FUNCS); 2779 2780 if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) { 2781 if (cmd == POOL_SCRUB_PAUSE) { 2782 (void) snprintf(errbuf, sizeof (errbuf), 2783 dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"), 2784 zhp->zpool_name); 2785 } else { 2786 assert(cmd == POOL_SCRUB_NORMAL); 2787 (void) snprintf(errbuf, sizeof (errbuf), 2788 dgettext(TEXT_DOMAIN, "cannot scrub %s"), 2789 zhp->zpool_name); 2790 } 2791 } else if (func == POOL_SCAN_RESILVER) { 2792 assert(cmd == POOL_SCRUB_NORMAL); 2793 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 2794 "cannot restart resilver on %s"), zhp->zpool_name); 2795 } else if (func == POOL_SCAN_NONE) { 2796 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 2797 "cannot cancel scrubbing %s"), zhp->zpool_name); 2798 } else { 2799 assert(!"unexpected result"); 2800 } 2801 2802 /* 2803 * With EBUSY, six cases are possible: 2804 * 2805 * Current state Requested 2806 * 1. Normal Scrub Running Normal Scrub or Error Scrub 2807 * 2. Normal Scrub Paused Error Scrub 2808 * 3. Normal Scrub Paused Pause Normal Scrub 2809 * 4. Error Scrub Running Normal Scrub or Error Scrub 2810 * 5. Error Scrub Paused Pause Error Scrub 2811 * 6. Resilvering Anything else 2812 */ 2813 if (err == EBUSY) { 2814 nvlist_t *nvroot; 2815 pool_scan_stat_t *ps = NULL; 2816 uint_t psc; 2817 2818 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config, 2819 ZPOOL_CONFIG_VDEV_TREE); 2820 (void) nvlist_lookup_uint64_array(nvroot, 2821 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 2822 if (ps && ps->pss_func == POOL_SCAN_SCRUB && 2823 ps->pss_state == DSS_SCANNING) { 2824 if (ps->pss_pass_scrub_pause == 0) { 2825 /* handles case 1 */ 2826 assert(cmd == POOL_SCRUB_NORMAL); 2827 return (zfs_error(hdl, EZFS_SCRUBBING, 2828 errbuf)); 2829 } else { 2830 if (func == POOL_SCAN_ERRORSCRUB) { 2831 /* handles case 2 */ 2832 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL); 2833 return (zfs_error(hdl, 2834 EZFS_SCRUB_PAUSED_TO_CANCEL, 2835 errbuf)); 2836 } else { 2837 /* handles case 3 */ 2838 ASSERT3U(func, ==, POOL_SCAN_SCRUB); 2839 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE); 2840 return (zfs_error(hdl, 2841 EZFS_SCRUB_PAUSED, errbuf)); 2842 } 2843 } 2844 } else if (ps && 2845 ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB && 2846 ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) { 2847 if (ps->pss_pass_error_scrub_pause == 0) { 2848 /* handles case 4 */ 2849 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL); 2850 return (zfs_error(hdl, EZFS_ERRORSCRUBBING, 2851 errbuf)); 2852 } else { 2853 /* handles case 5 */ 2854 ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB); 2855 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE); 2856 return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED, 2857 errbuf)); 2858 } 2859 } else { 2860 /* handles case 6 */ 2861 return (zfs_error(hdl, EZFS_RESILVERING, errbuf)); 2862 } 2863 } else if (err == ENOENT) { 2864 return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf)); 2865 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) { 2866 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf)); 2867 } else { 2868 return (zpool_standard_error(hdl, err, errbuf)); 2869 } 2870 } 2871 2872 /* 2873 * Find a vdev that matches the search criteria specified. We use the 2874 * the nvpair name to determine how we should look for the device. 2875 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 2876 * spare; but FALSE if its an INUSE spare. 2877 * 2878 * If 'return_parent' is set, then return the *parent* of the vdev you're 2879 * searching for rather than the vdev itself. 2880 */ 2881 static nvlist_t * 2882 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 2883 boolean_t *l2cache, boolean_t *log, boolean_t return_parent) 2884 { 2885 uint_t c, children; 2886 nvlist_t **child; 2887 nvlist_t *ret; 2888 uint64_t is_log; 2889 const char *srchkey; 2890 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 2891 const char *tmp = NULL; 2892 boolean_t is_root; 2893 2894 /* Nothing to look for */ 2895 if (search == NULL || pair == NULL) 2896 return (NULL); 2897 2898 /* Obtain the key we will use to search */ 2899 srchkey = nvpair_name(pair); 2900 2901 nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &tmp); 2902 if (strcmp(tmp, "root") == 0) 2903 is_root = B_TRUE; 2904 else 2905 is_root = B_FALSE; 2906 2907 switch (nvpair_type(pair)) { 2908 case DATA_TYPE_UINT64: 2909 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 2910 uint64_t srchval = fnvpair_value_uint64(pair); 2911 uint64_t theguid = fnvlist_lookup_uint64(nv, 2912 ZPOOL_CONFIG_GUID); 2913 if (theguid == srchval) 2914 return (nv); 2915 } 2916 break; 2917 2918 case DATA_TYPE_STRING: { 2919 const char *srchval, *val; 2920 2921 srchval = fnvpair_value_string(pair); 2922 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 2923 break; 2924 2925 /* 2926 * Search for the requested value. Special cases: 2927 * 2928 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 2929 * "-part1", or "p1". The suffix is hidden from the user, 2930 * but included in the string, so this matches around it. 2931 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname() 2932 * is used to check all possible expanded paths. 2933 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2934 * 2935 * Otherwise, all other searches are simple string compares. 2936 */ 2937 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) { 2938 uint64_t wholedisk = 0; 2939 2940 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2941 &wholedisk); 2942 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0) 2943 return (nv); 2944 2945 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) { 2946 char *type, *idx, *end, *p; 2947 uint64_t id, vdev_id; 2948 2949 /* 2950 * Determine our vdev type, keeping in mind 2951 * that the srchval is composed of a type and 2952 * vdev id pair (i.e. mirror-4). 2953 */ 2954 if ((type = strdup(srchval)) == NULL) 2955 return (NULL); 2956 2957 if ((p = strrchr(type, '-')) == NULL) { 2958 free(type); 2959 break; 2960 } 2961 idx = p + 1; 2962 *p = '\0'; 2963 2964 /* 2965 * draid names are presented like: draid2:4d:6c:0s 2966 * We match them up to the first ':' so we can still 2967 * do the parity check below, but the other params 2968 * are ignored. 2969 */ 2970 if ((p = strchr(type, ':')) != NULL) { 2971 if (strncmp(type, VDEV_TYPE_DRAID, 2972 strlen(VDEV_TYPE_DRAID)) == 0) 2973 *p = '\0'; 2974 } 2975 2976 /* 2977 * If the types don't match then keep looking. 2978 */ 2979 if (strncmp(val, type, strlen(val)) != 0) { 2980 free(type); 2981 break; 2982 } 2983 2984 verify(zpool_vdev_is_interior(type)); 2985 2986 id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID); 2987 errno = 0; 2988 vdev_id = strtoull(idx, &end, 10); 2989 2990 /* 2991 * If we are looking for a raidz and a parity is 2992 * specified, make sure it matches. 2993 */ 2994 int rzlen = strlen(VDEV_TYPE_RAIDZ); 2995 assert(rzlen == strlen(VDEV_TYPE_DRAID)); 2996 int typlen = strlen(type); 2997 if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 || 2998 strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) && 2999 typlen != rzlen) { 3000 uint64_t vdev_parity; 3001 int parity = *(type + rzlen) - '0'; 3002 3003 if (parity <= 0 || parity > 3 || 3004 (typlen - rzlen) != 1) { 3005 /* 3006 * Nonsense parity specified, can 3007 * never match 3008 */ 3009 free(type); 3010 return (NULL); 3011 } 3012 vdev_parity = fnvlist_lookup_uint64(nv, 3013 ZPOOL_CONFIG_NPARITY); 3014 if ((int)vdev_parity != parity) { 3015 free(type); 3016 break; 3017 } 3018 } 3019 3020 free(type); 3021 if (errno != 0) 3022 return (NULL); 3023 3024 /* 3025 * Now verify that we have the correct vdev id. 3026 */ 3027 if (vdev_id == id) 3028 return (nv); 3029 } 3030 3031 /* 3032 * Common case 3033 */ 3034 if (strcmp(srchval, val) == 0) 3035 return (nv); 3036 break; 3037 } 3038 3039 default: 3040 break; 3041 } 3042 3043 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 3044 &child, &children) != 0) 3045 return (NULL); 3046 3047 for (c = 0; c < children; c++) { 3048 if ((ret = vdev_to_nvlist_iter(child[c], search, 3049 avail_spare, l2cache, NULL, return_parent)) != NULL) { 3050 /* 3051 * The 'is_log' value is only set for the toplevel 3052 * vdev, not the leaf vdevs. So we always lookup the 3053 * log device from the root of the vdev tree (where 3054 * 'log' is non-NULL). 3055 */ 3056 if (log != NULL && 3057 nvlist_lookup_uint64(child[c], 3058 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 3059 is_log) { 3060 *log = B_TRUE; 3061 } 3062 return (ret && return_parent && !is_root ? nv : ret); 3063 } 3064 } 3065 3066 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 3067 &child, &children) == 0) { 3068 for (c = 0; c < children; c++) { 3069 if ((ret = vdev_to_nvlist_iter(child[c], search, 3070 avail_spare, l2cache, NULL, return_parent)) 3071 != NULL) { 3072 *avail_spare = B_TRUE; 3073 return (ret && return_parent && 3074 !is_root ? nv : ret); 3075 } 3076 } 3077 } 3078 3079 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 3080 &child, &children) == 0) { 3081 for (c = 0; c < children; c++) { 3082 if ((ret = vdev_to_nvlist_iter(child[c], search, 3083 avail_spare, l2cache, NULL, return_parent)) 3084 != NULL) { 3085 *l2cache = B_TRUE; 3086 return (ret && return_parent && 3087 !is_root ? nv : ret); 3088 } 3089 } 3090 } 3091 3092 return (NULL); 3093 } 3094 3095 /* 3096 * Given a physical path or guid, find the associated vdev. 3097 */ 3098 nvlist_t * 3099 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 3100 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 3101 { 3102 nvlist_t *search, *nvroot, *ret; 3103 uint64_t guid; 3104 char *end; 3105 3106 search = fnvlist_alloc(); 3107 3108 guid = strtoull(ppath, &end, 0); 3109 if (guid != 0 && *end == '\0') { 3110 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid); 3111 } else { 3112 fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath); 3113 } 3114 3115 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config, 3116 ZPOOL_CONFIG_VDEV_TREE); 3117 3118 *avail_spare = B_FALSE; 3119 *l2cache = B_FALSE; 3120 if (log != NULL) 3121 *log = B_FALSE; 3122 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log, 3123 B_FALSE); 3124 fnvlist_free(search); 3125 3126 return (ret); 3127 } 3128 3129 /* 3130 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 3131 */ 3132 static boolean_t 3133 zpool_vdev_is_interior(const char *name) 3134 { 3135 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 3136 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || 3137 strncmp(name, 3138 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || 3139 strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 || 3140 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 3141 return (B_TRUE); 3142 3143 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 && 3144 !zpool_is_draid_spare(name)) 3145 return (B_TRUE); 3146 3147 return (B_FALSE); 3148 } 3149 3150 /* 3151 * Lookup the nvlist for a given vdev or vdev's parent (depending on 3152 * if 'return_parent' is set). 3153 */ 3154 static nvlist_t * 3155 __zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 3156 boolean_t *l2cache, boolean_t *log, boolean_t return_parent) 3157 { 3158 char *end; 3159 nvlist_t *nvroot, *search, *ret; 3160 uint64_t guid; 3161 boolean_t __avail_spare, __l2cache, __log; 3162 3163 search = fnvlist_alloc(); 3164 3165 guid = strtoull(path, &end, 0); 3166 if (guid != 0 && *end == '\0') { 3167 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid); 3168 } else if (zpool_vdev_is_interior(path)) { 3169 fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path); 3170 } else { 3171 fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path); 3172 } 3173 3174 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config, 3175 ZPOOL_CONFIG_VDEV_TREE); 3176 3177 /* 3178 * User can pass NULL for avail_spare, l2cache, and log, but 3179 * we still need to provide variables to vdev_to_nvlist_iter(), so 3180 * just point them to junk variables here. 3181 */ 3182 if (!avail_spare) 3183 avail_spare = &__avail_spare; 3184 if (!l2cache) 3185 l2cache = &__l2cache; 3186 if (!log) 3187 log = &__log; 3188 3189 *avail_spare = B_FALSE; 3190 *l2cache = B_FALSE; 3191 if (log != NULL) 3192 *log = B_FALSE; 3193 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log, 3194 return_parent); 3195 fnvlist_free(search); 3196 3197 return (ret); 3198 } 3199 3200 nvlist_t * 3201 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 3202 boolean_t *l2cache, boolean_t *log) 3203 { 3204 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log, 3205 B_FALSE)); 3206 } 3207 3208 /* Given a vdev path, return its parent's nvlist */ 3209 nvlist_t * 3210 zpool_find_parent_vdev(zpool_handle_t *zhp, const char *path, 3211 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 3212 { 3213 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log, 3214 B_TRUE)); 3215 } 3216 3217 /* 3218 * Convert a vdev path to a GUID. Returns GUID or 0 on error. 3219 * 3220 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it 3221 * if the VDEV is a spare, l2cache, or log device. If they're NULL then 3222 * ignore them. 3223 */ 3224 static uint64_t 3225 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path, 3226 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log) 3227 { 3228 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE; 3229 nvlist_t *tgt; 3230 3231 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache, 3232 &log)) == NULL) 3233 return (0); 3234 3235 if (is_spare != NULL) 3236 *is_spare = spare; 3237 if (is_l2cache != NULL) 3238 *is_l2cache = l2cache; 3239 if (is_log != NULL) 3240 *is_log = log; 3241 3242 return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID)); 3243 } 3244 3245 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */ 3246 uint64_t 3247 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path) 3248 { 3249 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL)); 3250 } 3251 3252 /* 3253 * Bring the specified vdev online. The 'flags' parameter is a set of the 3254 * ZFS_ONLINE_* flags. 3255 */ 3256 int 3257 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 3258 vdev_state_t *newstate) 3259 { 3260 zfs_cmd_t zc = {"\0"}; 3261 char errbuf[ERRBUFLEN]; 3262 nvlist_t *tgt; 3263 boolean_t avail_spare, l2cache, islog; 3264 libzfs_handle_t *hdl = zhp->zpool_hdl; 3265 3266 if (flags & ZFS_ONLINE_EXPAND) { 3267 (void) snprintf(errbuf, sizeof (errbuf), 3268 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 3269 } else { 3270 (void) snprintf(errbuf, sizeof (errbuf), 3271 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 3272 } 3273 3274 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3275 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3276 &islog)) == NULL) 3277 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 3278 3279 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3280 3281 if (!(flags & ZFS_ONLINE_SPARE) && avail_spare) 3282 return (zfs_error(hdl, EZFS_ISSPARE, errbuf)); 3283 3284 #ifndef __FreeBSD__ 3285 const char *pathname; 3286 if ((flags & ZFS_ONLINE_EXPAND || 3287 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) && 3288 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) { 3289 uint64_t wholedisk = 0; 3290 3291 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 3292 &wholedisk); 3293 3294 /* 3295 * XXX - L2ARC 1.0 devices can't support expansion. 3296 */ 3297 if (l2cache) { 3298 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3299 "cannot expand cache devices")); 3300 return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf)); 3301 } 3302 3303 if (wholedisk) { 3304 const char *fullpath = path; 3305 char buf[MAXPATHLEN]; 3306 int error; 3307 3308 if (path[0] != '/') { 3309 error = zfs_resolve_shortname(path, buf, 3310 sizeof (buf)); 3311 if (error != 0) 3312 return (zfs_error(hdl, EZFS_NODEVICE, 3313 errbuf)); 3314 3315 fullpath = buf; 3316 } 3317 3318 error = zpool_relabel_disk(hdl, fullpath, errbuf); 3319 if (error != 0) 3320 return (error); 3321 } 3322 } 3323 #endif 3324 3325 zc.zc_cookie = VDEV_STATE_ONLINE; 3326 zc.zc_obj = flags; 3327 3328 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 3329 if (errno == EINVAL) { 3330 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 3331 "from this pool into a new one. Use '%s' " 3332 "instead"), "zpool detach"); 3333 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf)); 3334 } 3335 return (zpool_standard_error(hdl, errno, errbuf)); 3336 } 3337 3338 *newstate = zc.zc_cookie; 3339 return (0); 3340 } 3341 3342 /* 3343 * Take the specified vdev offline 3344 */ 3345 int 3346 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 3347 { 3348 zfs_cmd_t zc = {"\0"}; 3349 char errbuf[ERRBUFLEN]; 3350 nvlist_t *tgt; 3351 boolean_t avail_spare, l2cache; 3352 libzfs_handle_t *hdl = zhp->zpool_hdl; 3353 3354 (void) snprintf(errbuf, sizeof (errbuf), 3355 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 3356 3357 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3358 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3359 NULL)) == NULL) 3360 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 3361 3362 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3363 3364 if (avail_spare) 3365 return (zfs_error(hdl, EZFS_ISSPARE, errbuf)); 3366 3367 zc.zc_cookie = VDEV_STATE_OFFLINE; 3368 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 3369 3370 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3371 return (0); 3372 3373 switch (errno) { 3374 case EBUSY: 3375 3376 /* 3377 * There are no other replicas of this device. 3378 */ 3379 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf)); 3380 3381 case EEXIST: 3382 /* 3383 * The log device has unplayed logs 3384 */ 3385 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf)); 3386 3387 default: 3388 return (zpool_standard_error(hdl, errno, errbuf)); 3389 } 3390 } 3391 3392 /* 3393 * Remove the specified vdev asynchronously from the configuration, so 3394 * that it may come ONLINE if reinserted. This is called from zed on 3395 * Udev remove event. 3396 * Note: We also have a similar function zpool_vdev_remove() that 3397 * removes the vdev from the pool. 3398 */ 3399 int 3400 zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path) 3401 { 3402 zfs_cmd_t zc = {"\0"}; 3403 char errbuf[ERRBUFLEN]; 3404 nvlist_t *tgt; 3405 boolean_t avail_spare, l2cache; 3406 libzfs_handle_t *hdl = zhp->zpool_hdl; 3407 3408 (void) snprintf(errbuf, sizeof (errbuf), 3409 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3410 3411 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3412 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3413 NULL)) == NULL) 3414 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 3415 3416 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3417 3418 zc.zc_cookie = VDEV_STATE_REMOVED; 3419 3420 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3421 return (0); 3422 3423 return (zpool_standard_error(hdl, errno, errbuf)); 3424 } 3425 3426 /* 3427 * Mark the given vdev faulted. 3428 */ 3429 int 3430 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3431 { 3432 zfs_cmd_t zc = {"\0"}; 3433 char errbuf[ERRBUFLEN]; 3434 libzfs_handle_t *hdl = zhp->zpool_hdl; 3435 3436 (void) snprintf(errbuf, sizeof (errbuf), 3437 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid); 3438 3439 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3440 zc.zc_guid = guid; 3441 zc.zc_cookie = VDEV_STATE_FAULTED; 3442 zc.zc_obj = aux; 3443 3444 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3445 return (0); 3446 3447 switch (errno) { 3448 case EBUSY: 3449 3450 /* 3451 * There are no other replicas of this device. 3452 */ 3453 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf)); 3454 3455 default: 3456 return (zpool_standard_error(hdl, errno, errbuf)); 3457 } 3458 3459 } 3460 3461 /* 3462 * Generic set vdev state function 3463 */ 3464 static int 3465 zpool_vdev_set_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux, 3466 vdev_state_t state) 3467 { 3468 zfs_cmd_t zc = {"\0"}; 3469 char errbuf[ERRBUFLEN]; 3470 libzfs_handle_t *hdl = zhp->zpool_hdl; 3471 3472 (void) snprintf(errbuf, sizeof (errbuf), 3473 dgettext(TEXT_DOMAIN, "cannot set %s %llu"), 3474 zpool_state_to_name(state, aux), (u_longlong_t)guid); 3475 3476 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3477 zc.zc_guid = guid; 3478 zc.zc_cookie = state; 3479 zc.zc_obj = aux; 3480 3481 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3482 return (0); 3483 3484 return (zpool_standard_error(hdl, errno, errbuf)); 3485 } 3486 3487 /* 3488 * Mark the given vdev degraded. 3489 */ 3490 int 3491 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3492 { 3493 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_DEGRADED)); 3494 } 3495 3496 /* 3497 * Mark the given vdev as in a removed state (as if the device does not exist). 3498 * 3499 * This is different than zpool_vdev_remove() which does a removal of a device 3500 * from the pool (but the device does exist). 3501 */ 3502 int 3503 zpool_vdev_set_removed_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3504 { 3505 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_REMOVED)); 3506 } 3507 3508 /* 3509 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 3510 * a hot spare. 3511 */ 3512 static boolean_t 3513 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 3514 { 3515 nvlist_t **child; 3516 uint_t c, children; 3517 3518 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 3519 &children) == 0) { 3520 const char *type = fnvlist_lookup_string(search, 3521 ZPOOL_CONFIG_TYPE); 3522 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 || 3523 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) && 3524 children == 2 && child[which] == tgt) 3525 return (B_TRUE); 3526 3527 for (c = 0; c < children; c++) 3528 if (is_replacing_spare(child[c], tgt, which)) 3529 return (B_TRUE); 3530 } 3531 3532 return (B_FALSE); 3533 } 3534 3535 /* 3536 * Attach new_disk (fully described by nvroot) to old_disk. 3537 * If 'replacing' is specified, the new disk will replace the old one. 3538 */ 3539 int 3540 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk, 3541 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild) 3542 { 3543 zfs_cmd_t zc = {"\0"}; 3544 char errbuf[ERRBUFLEN]; 3545 int ret; 3546 nvlist_t *tgt; 3547 boolean_t avail_spare, l2cache, islog; 3548 uint64_t val; 3549 char *newname; 3550 const char *type; 3551 nvlist_t **child; 3552 uint_t children; 3553 nvlist_t *config_root; 3554 libzfs_handle_t *hdl = zhp->zpool_hdl; 3555 3556 if (replacing) 3557 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3558 "cannot replace %s with %s"), old_disk, new_disk); 3559 else 3560 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3561 "cannot attach %s to %s"), new_disk, old_disk); 3562 3563 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3564 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 3565 &islog)) == NULL) 3566 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 3567 3568 if (avail_spare) 3569 return (zfs_error(hdl, EZFS_ISSPARE, errbuf)); 3570 3571 if (l2cache) 3572 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf)); 3573 3574 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3575 zc.zc_cookie = replacing; 3576 zc.zc_simple = rebuild; 3577 3578 if (rebuild && 3579 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) { 3580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3581 "the loaded zfs module doesn't support device rebuilds")); 3582 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3583 } 3584 3585 type = fnvlist_lookup_string(tgt, ZPOOL_CONFIG_TYPE); 3586 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 && 3587 zfeature_lookup_guid("org.openzfs:raidz_expansion", NULL) != 0) { 3588 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3589 "the loaded zfs module doesn't support raidz expansion")); 3590 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3591 } 3592 3593 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3594 &child, &children) != 0 || children != 1) { 3595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3596 "new device must be a single disk")); 3597 return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf)); 3598 } 3599 3600 config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 3601 ZPOOL_CONFIG_VDEV_TREE); 3602 3603 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL) 3604 return (-1); 3605 3606 /* 3607 * If the target is a hot spare that has been swapped in, we can only 3608 * replace it with another hot spare. 3609 */ 3610 if (replacing && 3611 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 3612 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 3613 NULL) == NULL || !avail_spare) && 3614 is_replacing_spare(config_root, tgt, 1)) { 3615 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3616 "can only be replaced by another hot spare")); 3617 free(newname); 3618 return (zfs_error(hdl, EZFS_BADTARGET, errbuf)); 3619 } 3620 3621 free(newname); 3622 3623 zcmd_write_conf_nvlist(hdl, &zc, nvroot); 3624 3625 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 3626 3627 zcmd_free_nvlists(&zc); 3628 3629 if (ret == 0) 3630 return (0); 3631 3632 switch (errno) { 3633 case ENOTSUP: 3634 /* 3635 * Can't attach to or replace this type of vdev. 3636 */ 3637 if (replacing) { 3638 uint64_t version = zpool_get_prop_int(zhp, 3639 ZPOOL_PROP_VERSION, NULL); 3640 3641 if (islog) { 3642 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3643 "cannot replace a log with a spare")); 3644 } else if (rebuild) { 3645 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3646 "only mirror and dRAID vdevs support " 3647 "sequential reconstruction")); 3648 } else if (zpool_is_draid_spare(new_disk)) { 3649 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3650 "dRAID spares can only replace child " 3651 "devices in their parent's dRAID vdev")); 3652 } else if (version >= SPA_VERSION_MULTI_REPLACE) { 3653 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3654 "already in replacing/spare config; wait " 3655 "for completion or use 'zpool detach'")); 3656 } else { 3657 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3658 "cannot replace a replacing device")); 3659 } 3660 } else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) { 3661 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3662 "raidz_expansion feature must be enabled " 3663 "in order to attach a device to raidz")); 3664 } else { 3665 char status[64] = {0}; 3666 zpool_prop_get_feature(zhp, 3667 "feature@device_rebuild", status, 63); 3668 if (rebuild && 3669 strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) { 3670 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3671 "device_rebuild feature must be enabled " 3672 "in order to use sequential " 3673 "reconstruction")); 3674 } else { 3675 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3676 "can only attach to mirrors and top-level " 3677 "disks")); 3678 } 3679 } 3680 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf); 3681 break; 3682 3683 case EINVAL: 3684 /* 3685 * The new device must be a single disk. 3686 */ 3687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3688 "new device must be a single disk")); 3689 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3690 break; 3691 3692 case EBUSY: 3693 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 3694 new_disk); 3695 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 3696 break; 3697 3698 case EOVERFLOW: 3699 /* 3700 * The new device is too small. 3701 */ 3702 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3703 "device is too small")); 3704 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 3705 break; 3706 3707 case EDOM: 3708 /* 3709 * The new device has a different optimal sector size. 3710 */ 3711 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3712 "new device has a different optimal sector size; use the " 3713 "option '-o ashift=N' to override the optimal size")); 3714 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 3715 break; 3716 3717 case ENAMETOOLONG: 3718 /* 3719 * The resulting top-level vdev spec won't fit in the label. 3720 */ 3721 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3722 break; 3723 3724 case ENXIO: 3725 /* 3726 * The existing raidz vdev has offline children 3727 */ 3728 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) { 3729 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3730 "raidz vdev has devices that are are offline or " 3731 "being replaced")); 3732 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 3733 break; 3734 } else { 3735 (void) zpool_standard_error(hdl, errno, errbuf); 3736 } 3737 break; 3738 3739 case EADDRINUSE: 3740 /* 3741 * The boot reserved area is already being used (FreeBSD) 3742 */ 3743 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) { 3744 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3745 "the reserved boot area needed for the expansion " 3746 "is already being used by a boot loader")); 3747 (void) zfs_error(hdl, EZFS_BADDEV, errbuf); 3748 } else { 3749 (void) zpool_standard_error(hdl, errno, errbuf); 3750 } 3751 break; 3752 3753 case ZFS_ERR_ASHIFT_MISMATCH: 3754 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3755 "The new device cannot have a higher alignment requirement " 3756 "than the top-level vdev.")); 3757 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf); 3758 break; 3759 default: 3760 (void) zpool_standard_error(hdl, errno, errbuf); 3761 } 3762 3763 return (-1); 3764 } 3765 3766 /* 3767 * Detach the specified device. 3768 */ 3769 int 3770 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 3771 { 3772 zfs_cmd_t zc = {"\0"}; 3773 char errbuf[ERRBUFLEN]; 3774 nvlist_t *tgt; 3775 boolean_t avail_spare, l2cache; 3776 libzfs_handle_t *hdl = zhp->zpool_hdl; 3777 3778 (void) snprintf(errbuf, sizeof (errbuf), 3779 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 3780 3781 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3782 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3783 NULL)) == NULL) 3784 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 3785 3786 if (avail_spare) 3787 return (zfs_error(hdl, EZFS_ISSPARE, errbuf)); 3788 3789 if (l2cache) 3790 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf)); 3791 3792 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3793 3794 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 3795 return (0); 3796 3797 switch (errno) { 3798 3799 case ENOTSUP: 3800 /* 3801 * Can't detach from this type of vdev. 3802 */ 3803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 3804 "applicable to mirror and replacing vdevs")); 3805 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf); 3806 break; 3807 3808 case EBUSY: 3809 /* 3810 * There are no other replicas of this device. 3811 */ 3812 (void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf); 3813 break; 3814 3815 default: 3816 (void) zpool_standard_error(hdl, errno, errbuf); 3817 } 3818 3819 return (-1); 3820 } 3821 3822 /* 3823 * Find a mirror vdev in the source nvlist. 3824 * 3825 * The mchild array contains a list of disks in one of the top-level mirrors 3826 * of the source pool. The schild array contains a list of disks that the 3827 * user specified on the command line. We loop over the mchild array to 3828 * see if any entry in the schild array matches. 3829 * 3830 * If a disk in the mchild array is found in the schild array, we return 3831 * the index of that entry. Otherwise we return -1. 3832 */ 3833 static int 3834 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 3835 nvlist_t **schild, uint_t schildren) 3836 { 3837 uint_t mc; 3838 3839 for (mc = 0; mc < mchildren; mc++) { 3840 uint_t sc; 3841 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3842 mchild[mc], 0); 3843 3844 for (sc = 0; sc < schildren; sc++) { 3845 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3846 schild[sc], 0); 3847 boolean_t result = (strcmp(mpath, spath) == 0); 3848 3849 free(spath); 3850 if (result) { 3851 free(mpath); 3852 return (mc); 3853 } 3854 } 3855 3856 free(mpath); 3857 } 3858 3859 return (-1); 3860 } 3861 3862 /* 3863 * Split a mirror pool. If newroot points to null, then a new nvlist 3864 * is generated and it is the responsibility of the caller to free it. 3865 */ 3866 int 3867 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 3868 nvlist_t *props, splitflags_t flags) 3869 { 3870 zfs_cmd_t zc = {"\0"}; 3871 char errbuf[ERRBUFLEN]; 3872 const char *bias; 3873 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 3874 nvlist_t **varray = NULL, *zc_props = NULL; 3875 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 3876 libzfs_handle_t *hdl = zhp->zpool_hdl; 3877 uint64_t vers, readonly = B_FALSE; 3878 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 3879 int retval = 0; 3880 3881 (void) snprintf(errbuf, sizeof (errbuf), 3882 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 3883 3884 if (!zpool_name_valid(hdl, B_FALSE, newname)) 3885 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); 3886 3887 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 3888 (void) fprintf(stderr, gettext("Internal error: unable to " 3889 "retrieve pool configuration\n")); 3890 return (-1); 3891 } 3892 3893 tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 3894 vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION); 3895 3896 if (props) { 3897 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 3898 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 3899 props, vers, flags, errbuf)) == NULL) 3900 return (-1); 3901 (void) nvlist_lookup_uint64(zc_props, 3902 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 3903 if (readonly) { 3904 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3905 "property %s can only be set at import time"), 3906 zpool_prop_to_name(ZPOOL_PROP_READONLY)); 3907 return (-1); 3908 } 3909 } 3910 3911 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 3912 &children) != 0) { 3913 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3914 "Source pool is missing vdev tree")); 3915 nvlist_free(zc_props); 3916 return (-1); 3917 } 3918 3919 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 3920 vcount = 0; 3921 3922 if (*newroot == NULL || 3923 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 3924 &newchild, &newchildren) != 0) 3925 newchildren = 0; 3926 3927 for (c = 0; c < children; c++) { 3928 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 3929 boolean_t is_special = B_FALSE, is_dedup = B_FALSE; 3930 const char *type; 3931 nvlist_t **mchild, *vdev; 3932 uint_t mchildren; 3933 int entry; 3934 3935 /* 3936 * Unlike cache & spares, slogs are stored in the 3937 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 3938 */ 3939 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3940 &is_log); 3941 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3942 &is_hole); 3943 if (is_log || is_hole) { 3944 /* 3945 * Create a hole vdev and put it in the config. 3946 */ 3947 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 3948 goto out; 3949 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 3950 VDEV_TYPE_HOLE) != 0) 3951 goto out; 3952 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 3953 1) != 0) 3954 goto out; 3955 if (lastlog == 0) 3956 lastlog = vcount; 3957 varray[vcount++] = vdev; 3958 continue; 3959 } 3960 lastlog = 0; 3961 type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE); 3962 3963 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) { 3964 vdev = child[c]; 3965 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3966 goto out; 3967 continue; 3968 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 3969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3970 "Source pool must be composed only of mirrors\n")); 3971 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3972 goto out; 3973 } 3974 3975 if (nvlist_lookup_string(child[c], 3976 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) { 3977 if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) 3978 is_special = B_TRUE; 3979 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) 3980 is_dedup = B_TRUE; 3981 } 3982 verify(nvlist_lookup_nvlist_array(child[c], 3983 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3984 3985 /* find or add an entry for this top-level vdev */ 3986 if (newchildren > 0 && 3987 (entry = find_vdev_entry(zhp, mchild, mchildren, 3988 newchild, newchildren)) >= 0) { 3989 /* We found a disk that the user specified. */ 3990 vdev = mchild[entry]; 3991 ++found; 3992 } else { 3993 /* User didn't specify a disk for this vdev. */ 3994 vdev = mchild[mchildren - 1]; 3995 } 3996 3997 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3998 goto out; 3999 4000 if (flags.dryrun != 0) { 4001 if (is_dedup == B_TRUE) { 4002 if (nvlist_add_string(varray[vcount - 1], 4003 ZPOOL_CONFIG_ALLOCATION_BIAS, 4004 VDEV_ALLOC_BIAS_DEDUP) != 0) 4005 goto out; 4006 } else if (is_special == B_TRUE) { 4007 if (nvlist_add_string(varray[vcount - 1], 4008 ZPOOL_CONFIG_ALLOCATION_BIAS, 4009 VDEV_ALLOC_BIAS_SPECIAL) != 0) 4010 goto out; 4011 } 4012 } 4013 } 4014 4015 /* did we find every disk the user specified? */ 4016 if (found != newchildren) { 4017 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 4018 "include at most one disk from each mirror")); 4019 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4020 goto out; 4021 } 4022 4023 /* Prepare the nvlist for populating. */ 4024 if (*newroot == NULL) { 4025 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 4026 goto out; 4027 freelist = B_TRUE; 4028 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 4029 VDEV_TYPE_ROOT) != 0) 4030 goto out; 4031 } else { 4032 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 4033 } 4034 4035 /* Add all the children we found */ 4036 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 4037 (const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0) 4038 goto out; 4039 4040 /* 4041 * If we're just doing a dry run, exit now with success. 4042 */ 4043 if (flags.dryrun) { 4044 memory_err = B_FALSE; 4045 freelist = B_FALSE; 4046 goto out; 4047 } 4048 4049 /* now build up the config list & call the ioctl */ 4050 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 4051 goto out; 4052 4053 if (nvlist_add_nvlist(newconfig, 4054 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 4055 nvlist_add_string(newconfig, 4056 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 4057 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 4058 goto out; 4059 4060 /* 4061 * The new pool is automatically part of the namespace unless we 4062 * explicitly export it. 4063 */ 4064 if (!flags.import) 4065 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 4066 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4067 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 4068 zcmd_write_conf_nvlist(hdl, &zc, newconfig); 4069 if (zc_props != NULL) 4070 zcmd_write_src_nvlist(hdl, &zc, zc_props); 4071 4072 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 4073 retval = zpool_standard_error(hdl, errno, errbuf); 4074 goto out; 4075 } 4076 4077 freelist = B_FALSE; 4078 memory_err = B_FALSE; 4079 4080 out: 4081 if (varray != NULL) { 4082 int v; 4083 4084 for (v = 0; v < vcount; v++) 4085 nvlist_free(varray[v]); 4086 free(varray); 4087 } 4088 zcmd_free_nvlists(&zc); 4089 nvlist_free(zc_props); 4090 nvlist_free(newconfig); 4091 if (freelist) { 4092 nvlist_free(*newroot); 4093 *newroot = NULL; 4094 } 4095 4096 if (retval != 0) 4097 return (retval); 4098 4099 if (memory_err) 4100 return (no_memory(hdl)); 4101 4102 return (0); 4103 } 4104 4105 /* 4106 * Remove the given device. 4107 */ 4108 int 4109 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 4110 { 4111 zfs_cmd_t zc = {"\0"}; 4112 char errbuf[ERRBUFLEN]; 4113 nvlist_t *tgt; 4114 boolean_t avail_spare, l2cache, islog; 4115 libzfs_handle_t *hdl = zhp->zpool_hdl; 4116 uint64_t version; 4117 4118 (void) snprintf(errbuf, sizeof (errbuf), 4119 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 4120 4121 if (zpool_is_draid_spare(path)) { 4122 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4123 "dRAID spares cannot be removed")); 4124 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 4125 } 4126 4127 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4128 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 4129 &islog)) == NULL) 4130 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 4131 4132 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 4133 if (islog && version < SPA_VERSION_HOLES) { 4134 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4135 "pool must be upgraded to support log removal")); 4136 return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); 4137 } 4138 4139 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 4140 4141 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 4142 return (0); 4143 4144 switch (errno) { 4145 4146 case EALREADY: 4147 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4148 "removal for this vdev is already in progress.")); 4149 (void) zfs_error(hdl, EZFS_BUSY, errbuf); 4150 break; 4151 4152 case EINVAL: 4153 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4154 "invalid config; all top-level vdevs must " 4155 "have the same sector size and not be raidz.")); 4156 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4157 break; 4158 4159 case EBUSY: 4160 if (islog) { 4161 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4162 "Mount encrypted datasets to replay logs.")); 4163 } else { 4164 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4165 "Pool busy; removal may already be in progress")); 4166 } 4167 (void) zfs_error(hdl, EZFS_BUSY, errbuf); 4168 break; 4169 4170 case EACCES: 4171 if (islog) { 4172 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4173 "Mount encrypted datasets to replay logs.")); 4174 (void) zfs_error(hdl, EZFS_BUSY, errbuf); 4175 } else { 4176 (void) zpool_standard_error(hdl, errno, errbuf); 4177 } 4178 break; 4179 4180 default: 4181 (void) zpool_standard_error(hdl, errno, errbuf); 4182 } 4183 return (-1); 4184 } 4185 4186 int 4187 zpool_vdev_remove_cancel(zpool_handle_t *zhp) 4188 { 4189 zfs_cmd_t zc = {{0}}; 4190 char errbuf[ERRBUFLEN]; 4191 libzfs_handle_t *hdl = zhp->zpool_hdl; 4192 4193 (void) snprintf(errbuf, sizeof (errbuf), 4194 dgettext(TEXT_DOMAIN, "cannot cancel removal")); 4195 4196 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4197 zc.zc_cookie = 1; 4198 4199 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 4200 return (0); 4201 4202 return (zpool_standard_error(hdl, errno, errbuf)); 4203 } 4204 4205 int 4206 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path, 4207 uint64_t *sizep) 4208 { 4209 char errbuf[ERRBUFLEN]; 4210 nvlist_t *tgt; 4211 boolean_t avail_spare, l2cache, islog; 4212 libzfs_handle_t *hdl = zhp->zpool_hdl; 4213 4214 (void) snprintf(errbuf, sizeof (errbuf), 4215 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"), 4216 path); 4217 4218 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 4219 &islog)) == NULL) 4220 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 4221 4222 if (avail_spare || l2cache || islog) { 4223 *sizep = 0; 4224 return (0); 4225 } 4226 4227 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) { 4228 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4229 "indirect size not available")); 4230 return (zfs_error(hdl, EINVAL, errbuf)); 4231 } 4232 return (0); 4233 } 4234 4235 /* 4236 * Clear the errors for the pool, or the particular device if specified. 4237 */ 4238 int 4239 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 4240 { 4241 zfs_cmd_t zc = {"\0"}; 4242 char errbuf[ERRBUFLEN]; 4243 nvlist_t *tgt; 4244 zpool_load_policy_t policy; 4245 boolean_t avail_spare, l2cache; 4246 libzfs_handle_t *hdl = zhp->zpool_hdl; 4247 nvlist_t *nvi = NULL; 4248 int error; 4249 4250 if (path) 4251 (void) snprintf(errbuf, sizeof (errbuf), 4252 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 4253 path); 4254 else 4255 (void) snprintf(errbuf, sizeof (errbuf), 4256 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 4257 zhp->zpool_name); 4258 4259 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4260 if (path) { 4261 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 4262 &l2cache, NULL)) == NULL) 4263 return (zfs_error(hdl, EZFS_NODEVICE, errbuf)); 4264 4265 /* 4266 * Don't allow error clearing for hot spares. Do allow 4267 * error clearing for l2cache devices. 4268 */ 4269 if (avail_spare) 4270 return (zfs_error(hdl, EZFS_ISSPARE, errbuf)); 4271 4272 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 4273 } 4274 4275 zpool_get_load_policy(rewindnvl, &policy); 4276 zc.zc_cookie = policy.zlp_rewind; 4277 4278 zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2); 4279 zcmd_write_src_nvlist(hdl, &zc, rewindnvl); 4280 4281 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 4282 errno == ENOMEM) 4283 zcmd_expand_dst_nvlist(hdl, &zc); 4284 4285 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) && 4286 errno != EPERM && errno != EACCES)) { 4287 if (policy.zlp_rewind & 4288 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 4289 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 4290 zpool_rewind_exclaim(hdl, zc.zc_name, 4291 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), 4292 nvi); 4293 nvlist_free(nvi); 4294 } 4295 zcmd_free_nvlists(&zc); 4296 return (0); 4297 } 4298 4299 zcmd_free_nvlists(&zc); 4300 return (zpool_standard_error(hdl, errno, errbuf)); 4301 } 4302 4303 /* 4304 * Similar to zpool_clear(), but takes a GUID (used by fmd). 4305 */ 4306 int 4307 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 4308 { 4309 zfs_cmd_t zc = {"\0"}; 4310 char errbuf[ERRBUFLEN]; 4311 libzfs_handle_t *hdl = zhp->zpool_hdl; 4312 4313 (void) snprintf(errbuf, sizeof (errbuf), 4314 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 4315 (u_longlong_t)guid); 4316 4317 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4318 zc.zc_guid = guid; 4319 zc.zc_cookie = ZPOOL_NO_REWIND; 4320 4321 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) 4322 return (0); 4323 4324 return (zpool_standard_error(hdl, errno, errbuf)); 4325 } 4326 4327 /* 4328 * Change the GUID for a pool. 4329 * 4330 * Similar to zpool_reguid(), but may take a GUID. 4331 * 4332 * If the guid argument is NULL, then no GUID is passed in the nvlist to the 4333 * ioctl(). 4334 */ 4335 int 4336 zpool_set_guid(zpool_handle_t *zhp, const uint64_t *guid) 4337 { 4338 char errbuf[ERRBUFLEN]; 4339 libzfs_handle_t *hdl = zhp->zpool_hdl; 4340 nvlist_t *nvl = NULL; 4341 zfs_cmd_t zc = {"\0"}; 4342 int error = -1; 4343 4344 if (guid != NULL) { 4345 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 4346 return (no_memory(hdl)); 4347 4348 if (nvlist_add_uint64(nvl, ZPOOL_REGUID_GUID, *guid) != 0) { 4349 nvlist_free(nvl); 4350 return (no_memory(hdl)); 4351 } 4352 4353 zcmd_write_src_nvlist(hdl, &zc, nvl); 4354 } 4355 4356 (void) snprintf(errbuf, sizeof (errbuf), 4357 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 4358 4359 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4360 error = zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc); 4361 if (error) { 4362 return (zpool_standard_error(hdl, errno, errbuf)); 4363 } 4364 if (guid != NULL) { 4365 zcmd_free_nvlists(&zc); 4366 nvlist_free(nvl); 4367 } 4368 return (0); 4369 } 4370 4371 /* 4372 * Change the GUID for a pool. 4373 */ 4374 int 4375 zpool_reguid(zpool_handle_t *zhp) 4376 { 4377 return (zpool_set_guid(zhp, NULL)); 4378 } 4379 4380 /* 4381 * Reopen the pool. 4382 */ 4383 int 4384 zpool_reopen_one(zpool_handle_t *zhp, void *data) 4385 { 4386 libzfs_handle_t *hdl = zpool_get_handle(zhp); 4387 const char *pool_name = zpool_get_name(zhp); 4388 boolean_t *scrub_restart = data; 4389 int error; 4390 4391 error = lzc_reopen(pool_name, *scrub_restart); 4392 if (error) { 4393 return (zpool_standard_error_fmt(hdl, error, 4394 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name)); 4395 } 4396 4397 return (0); 4398 } 4399 4400 /* call into libzfs_core to execute the sync IOCTL per pool */ 4401 int 4402 zpool_sync_one(zpool_handle_t *zhp, void *data) 4403 { 4404 int ret; 4405 libzfs_handle_t *hdl = zpool_get_handle(zhp); 4406 const char *pool_name = zpool_get_name(zhp); 4407 boolean_t *force = data; 4408 nvlist_t *innvl = fnvlist_alloc(); 4409 4410 fnvlist_add_boolean_value(innvl, "force", *force); 4411 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) { 4412 nvlist_free(innvl); 4413 return (zpool_standard_error_fmt(hdl, ret, 4414 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name)); 4415 } 4416 nvlist_free(innvl); 4417 4418 return (0); 4419 } 4420 4421 #define PATH_BUF_LEN 64 4422 4423 /* 4424 * Given a vdev, return the name to display in iostat. If the vdev has a path, 4425 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 4426 * We also check if this is a whole disk, in which case we strip off the 4427 * trailing 's0' slice name. 4428 * 4429 * This routine is also responsible for identifying when disks have been 4430 * reconfigured in a new location. The kernel will have opened the device by 4431 * devid, but the path will still refer to the old location. To catch this, we 4432 * first do a path -> devid translation (which is fast for the common case). If 4433 * the devid matches, we're done. If not, we do a reverse devid -> path 4434 * translation and issue the appropriate ioctl() to update the path of the vdev. 4435 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 4436 * of these checks. 4437 */ 4438 char * 4439 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 4440 int name_flags) 4441 { 4442 const char *type, *tpath; 4443 const char *path; 4444 uint64_t value; 4445 char buf[PATH_BUF_LEN]; 4446 char tmpbuf[PATH_BUF_LEN * 2]; 4447 4448 /* 4449 * vdev_name will be "root"/"root-0" for the root vdev, but it is the 4450 * zpool name that will be displayed to the user. 4451 */ 4452 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE); 4453 if (zhp != NULL && strcmp(type, "root") == 0) 4454 return (zfs_strdup(hdl, zpool_get_name(zhp))); 4455 4456 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH")) 4457 name_flags |= VDEV_NAME_PATH; 4458 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID")) 4459 name_flags |= VDEV_NAME_GUID; 4460 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS")) 4461 name_flags |= VDEV_NAME_FOLLOW_LINKS; 4462 4463 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 4464 name_flags & VDEV_NAME_GUID) { 4465 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value); 4466 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value); 4467 path = buf; 4468 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) { 4469 path = tpath; 4470 4471 if (name_flags & VDEV_NAME_FOLLOW_LINKS) { 4472 char *rp = realpath(path, NULL); 4473 if (rp) { 4474 strlcpy(buf, rp, sizeof (buf)); 4475 path = buf; 4476 free(rp); 4477 } 4478 } 4479 4480 /* 4481 * For a block device only use the name. 4482 */ 4483 if ((strcmp(type, VDEV_TYPE_DISK) == 0) && 4484 !(name_flags & VDEV_NAME_PATH)) { 4485 path = zfs_strip_path(path); 4486 } 4487 4488 /* 4489 * Remove the partition from the path if this is a whole disk. 4490 */ 4491 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 && 4492 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value) 4493 == 0 && value && !(name_flags & VDEV_NAME_PATH)) { 4494 return (zfs_strip_partition(path)); 4495 } 4496 } else { 4497 path = type; 4498 4499 /* 4500 * If it's a raidz device, we need to stick in the parity level. 4501 */ 4502 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 4503 value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY); 4504 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 4505 (u_longlong_t)value); 4506 path = buf; 4507 } 4508 4509 /* 4510 * If it's a dRAID device, we add parity, groups, and spares. 4511 */ 4512 if (strcmp(path, VDEV_TYPE_DRAID) == 0) { 4513 uint64_t ndata, nparity, nspares; 4514 nvlist_t **child; 4515 uint_t children; 4516 4517 verify(nvlist_lookup_nvlist_array(nv, 4518 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0); 4519 nparity = fnvlist_lookup_uint64(nv, 4520 ZPOOL_CONFIG_NPARITY); 4521 ndata = fnvlist_lookup_uint64(nv, 4522 ZPOOL_CONFIG_DRAID_NDATA); 4523 nspares = fnvlist_lookup_uint64(nv, 4524 ZPOOL_CONFIG_DRAID_NSPARES); 4525 4526 path = zpool_draid_name(buf, sizeof (buf), ndata, 4527 nparity, nspares, children); 4528 } 4529 4530 /* 4531 * We identify each top-level vdev by using a <type-id> 4532 * naming convention. 4533 */ 4534 if (name_flags & VDEV_NAME_TYPE_ID) { 4535 uint64_t id = fnvlist_lookup_uint64(nv, 4536 ZPOOL_CONFIG_ID); 4537 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu", 4538 path, (u_longlong_t)id); 4539 path = tmpbuf; 4540 } 4541 } 4542 4543 return (zfs_strdup(hdl, path)); 4544 } 4545 4546 static int 4547 zbookmark_mem_compare(const void *a, const void *b) 4548 { 4549 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 4550 } 4551 4552 void 4553 zpool_add_propname(zpool_handle_t *zhp, const char *propname) 4554 { 4555 assert(zhp->zpool_n_propnames < ZHP_MAX_PROPNAMES); 4556 zhp->zpool_propnames[zhp->zpool_n_propnames] = propname; 4557 zhp->zpool_n_propnames++; 4558 } 4559 4560 /* 4561 * Retrieve the persistent error log, uniquify the members, and return to the 4562 * caller. 4563 */ 4564 int 4565 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 4566 { 4567 zfs_cmd_t zc = {"\0"}; 4568 libzfs_handle_t *hdl = zhp->zpool_hdl; 4569 zbookmark_phys_t *buf; 4570 uint64_t buflen = 10000; /* approx. 1MB of RAM */ 4571 4572 if (fnvlist_lookup_uint64(zhp->zpool_config, 4573 ZPOOL_CONFIG_ERRCOUNT) == 0) 4574 return (0); 4575 4576 /* 4577 * Retrieve the raw error list from the kernel. If it doesn't fit, 4578 * allocate a larger buffer and retry. 4579 */ 4580 (void) strcpy(zc.zc_name, zhp->zpool_name); 4581 for (;;) { 4582 buf = zfs_alloc(zhp->zpool_hdl, 4583 buflen * sizeof (zbookmark_phys_t)); 4584 zc.zc_nvlist_dst = (uintptr_t)buf; 4585 zc.zc_nvlist_dst_size = buflen; 4586 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG, 4587 &zc) != 0) { 4588 free(buf); 4589 if (errno == ENOMEM) { 4590 buflen *= 2; 4591 } else { 4592 return (zpool_standard_error_fmt(hdl, errno, 4593 dgettext(TEXT_DOMAIN, "errors: List of " 4594 "errors unavailable"))); 4595 } 4596 } else { 4597 break; 4598 } 4599 } 4600 4601 /* 4602 * Sort the resulting bookmarks. This is a little confusing due to the 4603 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 4604 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks 4605 * _not_ copied as part of the process. So we point the start of our 4606 * array appropriate and decrement the total number of elements. 4607 */ 4608 zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size; 4609 uint64_t zblen = buflen - zc.zc_nvlist_dst_size; 4610 4611 qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 4612 4613 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 4614 4615 /* 4616 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 4617 */ 4618 for (uint64_t i = 0; i < zblen; i++) { 4619 nvlist_t *nv; 4620 4621 /* ignoring zb_blkid and zb_level for now */ 4622 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 4623 zb[i-1].zb_object == zb[i].zb_object) 4624 continue; 4625 4626 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 4627 goto nomem; 4628 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 4629 zb[i].zb_objset) != 0) { 4630 nvlist_free(nv); 4631 goto nomem; 4632 } 4633 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 4634 zb[i].zb_object) != 0) { 4635 nvlist_free(nv); 4636 goto nomem; 4637 } 4638 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 4639 nvlist_free(nv); 4640 goto nomem; 4641 } 4642 nvlist_free(nv); 4643 } 4644 4645 free(buf); 4646 return (0); 4647 4648 nomem: 4649 free(buf); 4650 return (no_memory(zhp->zpool_hdl)); 4651 } 4652 4653 /* 4654 * Upgrade a ZFS pool to the latest on-disk version. 4655 */ 4656 int 4657 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 4658 { 4659 zfs_cmd_t zc = {"\0"}; 4660 libzfs_handle_t *hdl = zhp->zpool_hdl; 4661 4662 (void) strcpy(zc.zc_name, zhp->zpool_name); 4663 zc.zc_cookie = new_version; 4664 4665 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 4666 return (zpool_standard_error_fmt(hdl, errno, 4667 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 4668 zhp->zpool_name)); 4669 return (0); 4670 } 4671 4672 void 4673 zfs_save_arguments(int argc, char **argv, char *string, int len) 4674 { 4675 int i; 4676 4677 (void) strlcpy(string, zfs_basename(argv[0]), len); 4678 for (i = 1; i < argc; i++) { 4679 (void) strlcat(string, " ", len); 4680 (void) strlcat(string, argv[i], len); 4681 } 4682 } 4683 4684 int 4685 zpool_log_history(libzfs_handle_t *hdl, const char *message) 4686 { 4687 zfs_cmd_t zc = {"\0"}; 4688 nvlist_t *args; 4689 4690 args = fnvlist_alloc(); 4691 fnvlist_add_string(args, "message", message); 4692 zcmd_write_src_nvlist(hdl, &zc, args); 4693 int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc); 4694 nvlist_free(args); 4695 zcmd_free_nvlists(&zc); 4696 return (err); 4697 } 4698 4699 /* 4700 * Perform ioctl to get some command history of a pool. 4701 * 4702 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 4703 * logical offset of the history buffer to start reading from. 4704 * 4705 * Upon return, 'off' is the next logical offset to read from and 4706 * 'len' is the actual amount of bytes read into 'buf'. 4707 */ 4708 static int 4709 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 4710 { 4711 zfs_cmd_t zc = {"\0"}; 4712 libzfs_handle_t *hdl = zhp->zpool_hdl; 4713 4714 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4715 4716 zc.zc_history = (uint64_t)(uintptr_t)buf; 4717 zc.zc_history_len = *len; 4718 zc.zc_history_offset = *off; 4719 4720 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 4721 switch (errno) { 4722 case EPERM: 4723 return (zfs_error_fmt(hdl, EZFS_PERM, 4724 dgettext(TEXT_DOMAIN, 4725 "cannot show history for pool '%s'"), 4726 zhp->zpool_name)); 4727 case ENOENT: 4728 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 4729 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4730 "'%s'"), zhp->zpool_name)); 4731 case ENOTSUP: 4732 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 4733 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4734 "'%s', pool must be upgraded"), zhp->zpool_name)); 4735 default: 4736 return (zpool_standard_error_fmt(hdl, errno, 4737 dgettext(TEXT_DOMAIN, 4738 "cannot get history for '%s'"), zhp->zpool_name)); 4739 } 4740 } 4741 4742 *len = zc.zc_history_len; 4743 *off = zc.zc_history_offset; 4744 4745 return (0); 4746 } 4747 4748 /* 4749 * Retrieve the command history of a pool. 4750 */ 4751 int 4752 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off, 4753 boolean_t *eof) 4754 { 4755 libzfs_handle_t *hdl = zhp->zpool_hdl; 4756 char *buf; 4757 int buflen = 128 * 1024; 4758 nvlist_t **records = NULL; 4759 uint_t numrecords = 0; 4760 int err = 0, i; 4761 uint64_t start = *off; 4762 4763 buf = zfs_alloc(hdl, buflen); 4764 4765 /* process about 1MiB a time */ 4766 while (*off - start < 1024 * 1024) { 4767 uint64_t bytes_read = buflen; 4768 uint64_t leftover; 4769 4770 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0) 4771 break; 4772 4773 /* if nothing else was read in, we're at EOF, just return */ 4774 if (!bytes_read) { 4775 *eof = B_TRUE; 4776 break; 4777 } 4778 4779 if ((err = zpool_history_unpack(buf, bytes_read, 4780 &leftover, &records, &numrecords)) != 0) { 4781 zpool_standard_error_fmt(hdl, err, 4782 dgettext(TEXT_DOMAIN, 4783 "cannot get history for '%s'"), zhp->zpool_name); 4784 break; 4785 } 4786 *off -= leftover; 4787 if (leftover == bytes_read) { 4788 /* 4789 * no progress made, because buffer is not big enough 4790 * to hold this record; resize and retry. 4791 */ 4792 buflen *= 2; 4793 free(buf); 4794 buf = zfs_alloc(hdl, buflen); 4795 } 4796 } 4797 4798 free(buf); 4799 4800 if (!err) { 4801 *nvhisp = fnvlist_alloc(); 4802 fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 4803 (const nvlist_t **)records, numrecords); 4804 } 4805 for (i = 0; i < numrecords; i++) 4806 nvlist_free(records[i]); 4807 free(records); 4808 4809 return (err); 4810 } 4811 4812 /* 4813 * Retrieve the next event given the passed 'zevent_fd' file descriptor. 4814 * If there is a new event available 'nvp' will contain a newly allocated 4815 * nvlist and 'dropped' will be set to the number of missed events since 4816 * the last call to this function. When 'nvp' is set to NULL it indicates 4817 * no new events are available. In either case the function returns 0 and 4818 * it is up to the caller to free 'nvp'. In the case of a fatal error the 4819 * function will return a non-zero value. When the function is called in 4820 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed), 4821 * it will not return until a new event is available. 4822 */ 4823 int 4824 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp, 4825 int *dropped, unsigned flags, int zevent_fd) 4826 { 4827 zfs_cmd_t zc = {"\0"}; 4828 int error = 0; 4829 4830 *nvp = NULL; 4831 *dropped = 0; 4832 zc.zc_cleanup_fd = zevent_fd; 4833 4834 if (flags & ZEVENT_NONBLOCK) 4835 zc.zc_guid = ZEVENT_NONBLOCK; 4836 4837 zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE); 4838 4839 retry: 4840 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) { 4841 switch (errno) { 4842 case ESHUTDOWN: 4843 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 4844 dgettext(TEXT_DOMAIN, "zfs shutdown")); 4845 goto out; 4846 case ENOENT: 4847 /* Blocking error case should not occur */ 4848 if (!(flags & ZEVENT_NONBLOCK)) 4849 error = zpool_standard_error_fmt(hdl, errno, 4850 dgettext(TEXT_DOMAIN, "cannot get event")); 4851 4852 goto out; 4853 case ENOMEM: 4854 zcmd_expand_dst_nvlist(hdl, &zc); 4855 goto retry; 4856 default: 4857 error = zpool_standard_error_fmt(hdl, errno, 4858 dgettext(TEXT_DOMAIN, "cannot get event")); 4859 goto out; 4860 } 4861 } 4862 4863 error = zcmd_read_dst_nvlist(hdl, &zc, nvp); 4864 if (error != 0) 4865 goto out; 4866 4867 *dropped = (int)zc.zc_cookie; 4868 out: 4869 zcmd_free_nvlists(&zc); 4870 4871 return (error); 4872 } 4873 4874 /* 4875 * Clear all events. 4876 */ 4877 int 4878 zpool_events_clear(libzfs_handle_t *hdl, int *count) 4879 { 4880 zfs_cmd_t zc = {"\0"}; 4881 4882 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0) 4883 return (zpool_standard_error(hdl, errno, 4884 dgettext(TEXT_DOMAIN, "cannot clear events"))); 4885 4886 if (count != NULL) 4887 *count = (int)zc.zc_cookie; /* # of events cleared */ 4888 4889 return (0); 4890 } 4891 4892 /* 4893 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for 4894 * the passed zevent_fd file handle. On success zero is returned, 4895 * otherwise -1 is returned and hdl->libzfs_error is set to the errno. 4896 */ 4897 int 4898 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd) 4899 { 4900 zfs_cmd_t zc = {"\0"}; 4901 int error = 0; 4902 4903 zc.zc_guid = eid; 4904 zc.zc_cleanup_fd = zevent_fd; 4905 4906 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) { 4907 switch (errno) { 4908 case ENOENT: 4909 error = zfs_error_fmt(hdl, EZFS_NOENT, 4910 dgettext(TEXT_DOMAIN, "cannot get event")); 4911 break; 4912 4913 case ENOMEM: 4914 error = zfs_error_fmt(hdl, EZFS_NOMEM, 4915 dgettext(TEXT_DOMAIN, "cannot get event")); 4916 break; 4917 4918 default: 4919 error = zpool_standard_error_fmt(hdl, errno, 4920 dgettext(TEXT_DOMAIN, "cannot get event")); 4921 break; 4922 } 4923 } 4924 4925 return (error); 4926 } 4927 4928 static void 4929 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4930 char *pathname, size_t len, boolean_t always_unmounted) 4931 { 4932 zfs_cmd_t zc = {"\0"}; 4933 boolean_t mounted = B_FALSE; 4934 char *mntpnt = NULL; 4935 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 4936 4937 if (dsobj == 0) { 4938 /* special case for the MOS */ 4939 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", 4940 (longlong_t)obj); 4941 return; 4942 } 4943 4944 /* get the dataset's name */ 4945 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4946 zc.zc_obj = dsobj; 4947 if (zfs_ioctl(zhp->zpool_hdl, 4948 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 4949 /* just write out a path of two object numbers */ 4950 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 4951 (longlong_t)dsobj, (longlong_t)obj); 4952 return; 4953 } 4954 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 4955 4956 /* find out if the dataset is mounted */ 4957 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname, 4958 &mntpnt); 4959 4960 /* get the corrupted object's path */ 4961 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 4962 zc.zc_obj = obj; 4963 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH, 4964 &zc) == 0) { 4965 if (mounted) { 4966 (void) snprintf(pathname, len, "%s%s", mntpnt, 4967 zc.zc_value); 4968 } else { 4969 (void) snprintf(pathname, len, "%s:%s", 4970 dsname, zc.zc_value); 4971 } 4972 } else { 4973 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, 4974 (longlong_t)obj); 4975 } 4976 free(mntpnt); 4977 } 4978 4979 void 4980 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4981 char *pathname, size_t len) 4982 { 4983 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE); 4984 } 4985 4986 void 4987 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4988 char *pathname, size_t len) 4989 { 4990 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE); 4991 } 4992 /* 4993 * Wait while the specified activity is in progress in the pool. 4994 */ 4995 int 4996 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity) 4997 { 4998 boolean_t missing; 4999 5000 int error = zpool_wait_status(zhp, activity, &missing, NULL); 5001 5002 if (missing) { 5003 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT, 5004 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"), 5005 zhp->zpool_name); 5006 return (ENOENT); 5007 } else { 5008 return (error); 5009 } 5010 } 5011 5012 /* 5013 * Wait for the given activity and return the status of the wait (whether or not 5014 * any waiting was done) in the 'waited' parameter. Non-existent pools are 5015 * reported via the 'missing' parameter, rather than by printing an error 5016 * message. This is convenient when this function is called in a loop over a 5017 * long period of time (as it is, for example, by zpool's wait cmd). In that 5018 * scenario, a pool being exported or destroyed should be considered a normal 5019 * event, so we don't want to print an error when we find that the pool doesn't 5020 * exist. 5021 */ 5022 int 5023 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity, 5024 boolean_t *missing, boolean_t *waited) 5025 { 5026 int error = lzc_wait(zhp->zpool_name, activity, waited); 5027 *missing = (error == ENOENT); 5028 if (*missing) 5029 return (0); 5030 5031 if (error != 0) { 5032 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 5033 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"), 5034 zhp->zpool_name); 5035 } 5036 5037 return (error); 5038 } 5039 5040 int 5041 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap) 5042 { 5043 int error = lzc_set_bootenv(zhp->zpool_name, envmap); 5044 if (error != 0) { 5045 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 5046 dgettext(TEXT_DOMAIN, 5047 "error setting bootenv in pool '%s'"), zhp->zpool_name); 5048 } 5049 5050 return (error); 5051 } 5052 5053 int 5054 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp) 5055 { 5056 nvlist_t *nvl; 5057 int error; 5058 5059 nvl = NULL; 5060 error = lzc_get_bootenv(zhp->zpool_name, &nvl); 5061 if (error != 0) { 5062 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 5063 dgettext(TEXT_DOMAIN, 5064 "error getting bootenv in pool '%s'"), zhp->zpool_name); 5065 } else { 5066 *nvlp = nvl; 5067 } 5068 5069 return (error); 5070 } 5071 5072 /* 5073 * Attempt to read and parse feature file(s) (from "compatibility" property). 5074 * Files contain zpool feature names, comma or whitespace-separated. 5075 * Comments (# character to next newline) are discarded. 5076 * 5077 * Arguments: 5078 * compatibility : string containing feature filenames 5079 * features : either NULL or pointer to array of boolean 5080 * report : either NULL or pointer to string buffer 5081 * rlen : length of "report" buffer 5082 * 5083 * compatibility is NULL (unset), "", "off", "legacy", or list of 5084 * comma-separated filenames. filenames should either be absolute, 5085 * or relative to: 5086 * 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or 5087 * 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d). 5088 * (Unset), "" or "off" => enable all features 5089 * "legacy" => disable all features 5090 * 5091 * Any feature names read from files which match unames in spa_feature_table 5092 * will have the corresponding boolean set in the features array (if non-NULL). 5093 * If more than one feature set specified, only features present in *all* of 5094 * them will be set. 5095 * 5096 * "report" if not NULL will be populated with a suitable status message. 5097 * 5098 * Return values: 5099 * ZPOOL_COMPATIBILITY_OK : files read and parsed ok 5100 * ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file 5101 * ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name 5102 * ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name 5103 * ZPOOL_COMPATIBILITY_NOFILES : no feature files found 5104 */ 5105 zpool_compat_status_t 5106 zpool_load_compat(const char *compat, boolean_t *features, char *report, 5107 size_t rlen) 5108 { 5109 int sdirfd, ddirfd, featfd; 5110 struct stat fs; 5111 char *fc; 5112 char *ps, *ls, *ws; 5113 char *file, *line, *word; 5114 5115 char l_compat[ZFS_MAXPROPLEN]; 5116 5117 boolean_t ret_nofiles = B_TRUE; 5118 boolean_t ret_badfile = B_FALSE; 5119 boolean_t ret_badtoken = B_FALSE; 5120 boolean_t ret_warntoken = B_FALSE; 5121 5122 /* special cases (unset), "" and "off" => enable all features */ 5123 if (compat == NULL || compat[0] == '\0' || 5124 strcmp(compat, ZPOOL_COMPAT_OFF) == 0) { 5125 if (features != NULL) 5126 for (uint_t i = 0; i < SPA_FEATURES; i++) 5127 features[i] = B_TRUE; 5128 if (report != NULL) 5129 strlcpy(report, gettext("all features enabled"), rlen); 5130 return (ZPOOL_COMPATIBILITY_OK); 5131 } 5132 5133 /* Final special case "legacy" => disable all features */ 5134 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) { 5135 if (features != NULL) 5136 for (uint_t i = 0; i < SPA_FEATURES; i++) 5137 features[i] = B_FALSE; 5138 if (report != NULL) 5139 strlcpy(report, gettext("all features disabled"), rlen); 5140 return (ZPOOL_COMPATIBILITY_OK); 5141 } 5142 5143 /* 5144 * Start with all true; will be ANDed with results from each file 5145 */ 5146 if (features != NULL) 5147 for (uint_t i = 0; i < SPA_FEATURES; i++) 5148 features[i] = B_TRUE; 5149 5150 char err_badfile[ZFS_MAXPROPLEN] = ""; 5151 char err_badtoken[ZFS_MAXPROPLEN] = ""; 5152 5153 /* 5154 * We ignore errors from the directory open() 5155 * as they're only needed if the filename is relative 5156 * which will be checked during the openat(). 5157 */ 5158 5159 /* O_PATH safer than O_RDONLY if system allows it */ 5160 #if defined(O_PATH) 5161 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH) 5162 #else 5163 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY) 5164 #endif 5165 5166 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS); 5167 ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS); 5168 5169 (void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN); 5170 5171 for (file = strtok_r(l_compat, ",", &ps); 5172 file != NULL; 5173 file = strtok_r(NULL, ",", &ps)) { 5174 5175 boolean_t l_features[SPA_FEATURES]; 5176 5177 enum { Z_SYSCONF, Z_DATA } source; 5178 5179 /* try sysconfdir first, then datadir */ 5180 source = Z_SYSCONF; 5181 if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) { 5182 featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC); 5183 source = Z_DATA; 5184 } 5185 5186 /* File readable and correct size? */ 5187 if (featfd < 0 || 5188 fstat(featfd, &fs) < 0 || 5189 fs.st_size < 1 || 5190 fs.st_size > ZPOOL_COMPAT_MAXSIZE) { 5191 (void) close(featfd); 5192 strlcat(err_badfile, file, ZFS_MAXPROPLEN); 5193 strlcat(err_badfile, " ", ZFS_MAXPROPLEN); 5194 ret_badfile = B_TRUE; 5195 continue; 5196 } 5197 5198 /* Prefault the file if system allows */ 5199 #if defined(MAP_POPULATE) 5200 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE) 5201 #elif defined(MAP_PREFAULT_READ) 5202 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ) 5203 #else 5204 #define ZC_MMAP_FLAGS (MAP_PRIVATE) 5205 #endif 5206 5207 /* private mmap() so we can strtok safely */ 5208 fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE, 5209 ZC_MMAP_FLAGS, featfd, 0); 5210 (void) close(featfd); 5211 5212 /* map ok, and last character == newline? */ 5213 if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') { 5214 (void) munmap((void *) fc, fs.st_size); 5215 strlcat(err_badfile, file, ZFS_MAXPROPLEN); 5216 strlcat(err_badfile, " ", ZFS_MAXPROPLEN); 5217 ret_badfile = B_TRUE; 5218 continue; 5219 } 5220 5221 ret_nofiles = B_FALSE; 5222 5223 for (uint_t i = 0; i < SPA_FEATURES; i++) 5224 l_features[i] = B_FALSE; 5225 5226 /* replace final newline with NULL to ensure string ends */ 5227 fc[fs.st_size - 1] = '\0'; 5228 5229 for (line = strtok_r(fc, "\n", &ls); 5230 line != NULL; 5231 line = strtok_r(NULL, "\n", &ls)) { 5232 /* discard comments */ 5233 char *r = strchr(line, '#'); 5234 if (r != NULL) 5235 *r = '\0'; 5236 5237 for (word = strtok_r(line, ", \t", &ws); 5238 word != NULL; 5239 word = strtok_r(NULL, ", \t", &ws)) { 5240 /* Find matching feature name */ 5241 uint_t f; 5242 for (f = 0; f < SPA_FEATURES; f++) { 5243 zfeature_info_t *fi = 5244 &spa_feature_table[f]; 5245 if (strcmp(word, fi->fi_uname) == 0) { 5246 l_features[f] = B_TRUE; 5247 break; 5248 } 5249 } 5250 if (f < SPA_FEATURES) 5251 continue; 5252 5253 /* found an unrecognized word */ 5254 /* lightly sanitize it */ 5255 if (strlen(word) > 32) 5256 word[32] = '\0'; 5257 for (char *c = word; *c != '\0'; c++) 5258 if (!isprint(*c)) 5259 *c = '?'; 5260 5261 strlcat(err_badtoken, word, ZFS_MAXPROPLEN); 5262 strlcat(err_badtoken, " ", ZFS_MAXPROPLEN); 5263 if (source == Z_SYSCONF) 5264 ret_badtoken = B_TRUE; 5265 else 5266 ret_warntoken = B_TRUE; 5267 } 5268 } 5269 (void) munmap((void *) fc, fs.st_size); 5270 5271 if (features != NULL) 5272 for (uint_t i = 0; i < SPA_FEATURES; i++) 5273 features[i] &= l_features[i]; 5274 } 5275 (void) close(sdirfd); 5276 (void) close(ddirfd); 5277 5278 /* Return the most serious error */ 5279 if (ret_badfile) { 5280 if (report != NULL) 5281 snprintf(report, rlen, gettext("could not read/" 5282 "parse feature file(s): %s"), err_badfile); 5283 return (ZPOOL_COMPATIBILITY_BADFILE); 5284 } 5285 if (ret_nofiles) { 5286 if (report != NULL) 5287 strlcpy(report, 5288 gettext("no valid compatibility files specified"), 5289 rlen); 5290 return (ZPOOL_COMPATIBILITY_NOFILES); 5291 } 5292 if (ret_badtoken) { 5293 if (report != NULL) 5294 snprintf(report, rlen, gettext("invalid feature " 5295 "name(s) in local compatibility files: %s"), 5296 err_badtoken); 5297 return (ZPOOL_COMPATIBILITY_BADTOKEN); 5298 } 5299 if (ret_warntoken) { 5300 if (report != NULL) 5301 snprintf(report, rlen, gettext("unrecognized feature " 5302 "name(s) in distribution compatibility files: %s"), 5303 err_badtoken); 5304 return (ZPOOL_COMPATIBILITY_WARNTOKEN); 5305 } 5306 if (report != NULL) 5307 strlcpy(report, gettext("compatibility set ok"), rlen); 5308 return (ZPOOL_COMPATIBILITY_OK); 5309 } 5310 5311 static int 5312 zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid) 5313 { 5314 nvlist_t *tgt; 5315 boolean_t avail_spare, l2cache; 5316 5317 verify(zhp != NULL); 5318 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 5319 char errbuf[ERRBUFLEN]; 5320 (void) snprintf(errbuf, sizeof (errbuf), 5321 dgettext(TEXT_DOMAIN, "pool is in an unavailable state")); 5322 return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf)); 5323 } 5324 5325 if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache, 5326 NULL)) == NULL) { 5327 char errbuf[ERRBUFLEN]; 5328 (void) snprintf(errbuf, sizeof (errbuf), 5329 dgettext(TEXT_DOMAIN, "can not find %s in %s"), 5330 vdevname, zhp->zpool_name); 5331 return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf)); 5332 } 5333 5334 *vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 5335 return (0); 5336 } 5337 5338 /* 5339 * Get a vdev property value for 'prop' and return the value in 5340 * a pre-allocated buffer. 5341 */ 5342 int 5343 zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name, 5344 char *buf, size_t len, zprop_source_t *srctype, boolean_t literal) 5345 { 5346 nvlist_t *nv; 5347 const char *strval; 5348 uint64_t intval; 5349 zprop_source_t src = ZPROP_SRC_NONE; 5350 5351 if (prop == VDEV_PROP_USERPROP) { 5352 /* user property, prop_name must contain the property name */ 5353 assert(prop_name != NULL); 5354 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) { 5355 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 5356 strval = fnvlist_lookup_string(nv, ZPROP_VALUE); 5357 } else { 5358 /* user prop not found */ 5359 src = ZPROP_SRC_DEFAULT; 5360 strval = "-"; 5361 } 5362 (void) strlcpy(buf, strval, len); 5363 if (srctype) 5364 *srctype = src; 5365 return (0); 5366 } 5367 5368 if (prop_name == NULL) 5369 prop_name = (char *)vdev_prop_to_name(prop); 5370 5371 switch (vdev_prop_get_type(prop)) { 5372 case PROP_TYPE_STRING: 5373 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) { 5374 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 5375 strval = fnvlist_lookup_string(nv, ZPROP_VALUE); 5376 } else { 5377 src = ZPROP_SRC_DEFAULT; 5378 if ((strval = vdev_prop_default_string(prop)) == NULL) 5379 strval = "-"; 5380 } 5381 (void) strlcpy(buf, strval, len); 5382 break; 5383 5384 case PROP_TYPE_NUMBER: 5385 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) { 5386 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 5387 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE); 5388 } else { 5389 src = ZPROP_SRC_DEFAULT; 5390 intval = vdev_prop_default_numeric(prop); 5391 } 5392 5393 switch (prop) { 5394 case VDEV_PROP_ASIZE: 5395 case VDEV_PROP_PSIZE: 5396 case VDEV_PROP_SIZE: 5397 case VDEV_PROP_BOOTSIZE: 5398 case VDEV_PROP_ALLOCATED: 5399 case VDEV_PROP_FREE: 5400 case VDEV_PROP_READ_ERRORS: 5401 case VDEV_PROP_WRITE_ERRORS: 5402 case VDEV_PROP_CHECKSUM_ERRORS: 5403 case VDEV_PROP_INITIALIZE_ERRORS: 5404 case VDEV_PROP_TRIM_ERRORS: 5405 case VDEV_PROP_SLOW_IOS: 5406 case VDEV_PROP_OPS_NULL: 5407 case VDEV_PROP_OPS_READ: 5408 case VDEV_PROP_OPS_WRITE: 5409 case VDEV_PROP_OPS_FREE: 5410 case VDEV_PROP_OPS_CLAIM: 5411 case VDEV_PROP_OPS_TRIM: 5412 case VDEV_PROP_BYTES_NULL: 5413 case VDEV_PROP_BYTES_READ: 5414 case VDEV_PROP_BYTES_WRITE: 5415 case VDEV_PROP_BYTES_FREE: 5416 case VDEV_PROP_BYTES_CLAIM: 5417 case VDEV_PROP_BYTES_TRIM: 5418 if (literal) { 5419 (void) snprintf(buf, len, "%llu", 5420 (u_longlong_t)intval); 5421 } else { 5422 (void) zfs_nicenum(intval, buf, len); 5423 } 5424 break; 5425 case VDEV_PROP_EXPANDSZ: 5426 if (intval == 0) { 5427 (void) strlcpy(buf, "-", len); 5428 } else if (literal) { 5429 (void) snprintf(buf, len, "%llu", 5430 (u_longlong_t)intval); 5431 } else { 5432 (void) zfs_nicenum(intval, buf, len); 5433 } 5434 break; 5435 case VDEV_PROP_CAPACITY: 5436 if (literal) { 5437 (void) snprintf(buf, len, "%llu", 5438 (u_longlong_t)intval); 5439 } else { 5440 (void) snprintf(buf, len, "%llu%%", 5441 (u_longlong_t)intval); 5442 } 5443 break; 5444 case VDEV_PROP_CHECKSUM_N: 5445 case VDEV_PROP_CHECKSUM_T: 5446 case VDEV_PROP_IO_N: 5447 case VDEV_PROP_IO_T: 5448 case VDEV_PROP_SLOW_IO_N: 5449 case VDEV_PROP_SLOW_IO_T: 5450 if (intval == UINT64_MAX) { 5451 (void) strlcpy(buf, "-", len); 5452 } else { 5453 (void) snprintf(buf, len, "%llu", 5454 (u_longlong_t)intval); 5455 } 5456 break; 5457 case VDEV_PROP_FRAGMENTATION: 5458 if (intval == UINT64_MAX) { 5459 (void) strlcpy(buf, "-", len); 5460 } else { 5461 (void) snprintf(buf, len, "%llu%%", 5462 (u_longlong_t)intval); 5463 } 5464 break; 5465 case VDEV_PROP_STATE: 5466 if (literal) { 5467 (void) snprintf(buf, len, "%llu", 5468 (u_longlong_t)intval); 5469 } else { 5470 (void) strlcpy(buf, zpool_state_to_name(intval, 5471 VDEV_AUX_NONE), len); 5472 } 5473 break; 5474 default: 5475 (void) snprintf(buf, len, "%llu", 5476 (u_longlong_t)intval); 5477 } 5478 break; 5479 5480 case PROP_TYPE_INDEX: 5481 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) { 5482 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE); 5483 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE); 5484 } else { 5485 /* 'trim_support' only valid for leaf vdevs */ 5486 if (prop == VDEV_PROP_TRIM_SUPPORT) { 5487 (void) strlcpy(buf, "-", len); 5488 break; 5489 } 5490 src = ZPROP_SRC_DEFAULT; 5491 intval = vdev_prop_default_numeric(prop); 5492 /* Only use if provided by the RAIDZ VDEV above */ 5493 if (prop == VDEV_PROP_RAIDZ_EXPANDING) 5494 return (ENOENT); 5495 } 5496 if (vdev_prop_index_to_string(prop, intval, 5497 (const char **)&strval) != 0) 5498 return (-1); 5499 (void) strlcpy(buf, strval, len); 5500 break; 5501 5502 default: 5503 abort(); 5504 } 5505 5506 if (srctype) 5507 *srctype = src; 5508 5509 return (0); 5510 } 5511 5512 /* 5513 * Get a vdev property value for 'prop_name' and return the value in 5514 * a pre-allocated buffer. 5515 */ 5516 int 5517 zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop, 5518 char *prop_name, char *buf, size_t len, zprop_source_t *srctype, 5519 boolean_t literal) 5520 { 5521 nvlist_t *reqnvl, *reqprops; 5522 nvlist_t *retprops = NULL; 5523 uint64_t vdev_guid = 0; 5524 int ret; 5525 5526 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0) 5527 return (ret); 5528 5529 if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0) 5530 return (no_memory(zhp->zpool_hdl)); 5531 if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0) 5532 return (no_memory(zhp->zpool_hdl)); 5533 5534 fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid); 5535 5536 if (prop != VDEV_PROP_USERPROP) { 5537 /* prop_name overrides prop value */ 5538 if (prop_name != NULL) 5539 prop = vdev_name_to_prop(prop_name); 5540 else 5541 prop_name = (char *)vdev_prop_to_name(prop); 5542 assert(prop < VDEV_NUM_PROPS); 5543 } 5544 5545 assert(prop_name != NULL); 5546 if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) { 5547 nvlist_free(reqnvl); 5548 nvlist_free(reqprops); 5549 return (no_memory(zhp->zpool_hdl)); 5550 } 5551 5552 fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops); 5553 5554 ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops); 5555 5556 if (ret == 0) { 5557 ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf, 5558 len, srctype, literal); 5559 } else { 5560 char errbuf[ERRBUFLEN]; 5561 (void) snprintf(errbuf, sizeof (errbuf), 5562 dgettext(TEXT_DOMAIN, "cannot get vdev property %s from" 5563 " %s in %s"), prop_name, vdevname, zhp->zpool_name); 5564 (void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf); 5565 } 5566 5567 nvlist_free(reqnvl); 5568 nvlist_free(reqprops); 5569 nvlist_free(retprops); 5570 5571 return (ret); 5572 } 5573 5574 /* 5575 * Get all vdev properties 5576 */ 5577 int 5578 zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname, 5579 nvlist_t **outnvl) 5580 { 5581 nvlist_t *nvl = NULL; 5582 uint64_t vdev_guid = 0; 5583 int ret; 5584 5585 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0) 5586 return (ret); 5587 5588 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 5589 return (no_memory(zhp->zpool_hdl)); 5590 5591 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid); 5592 5593 ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl); 5594 5595 nvlist_free(nvl); 5596 5597 if (ret) { 5598 char errbuf[ERRBUFLEN]; 5599 (void) snprintf(errbuf, sizeof (errbuf), 5600 dgettext(TEXT_DOMAIN, "cannot get vdev properties for" 5601 " %s in %s"), vdevname, zhp->zpool_name); 5602 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 5603 } 5604 5605 return (ret); 5606 } 5607 5608 /* 5609 * Set vdev property 5610 */ 5611 int 5612 zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname, 5613 const char *propname, const char *propval) 5614 { 5615 int ret; 5616 nvlist_t *nvl = NULL; 5617 nvlist_t *outnvl = NULL; 5618 nvlist_t *props; 5619 nvlist_t *realprops; 5620 prop_flags_t flags = { 0 }; 5621 uint64_t version; 5622 uint64_t vdev_guid; 5623 5624 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0) 5625 return (ret); 5626 5627 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 5628 return (no_memory(zhp->zpool_hdl)); 5629 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) 5630 return (no_memory(zhp->zpool_hdl)); 5631 5632 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid); 5633 5634 if (nvlist_add_string(props, propname, propval) != 0) { 5635 nvlist_free(props); 5636 return (no_memory(zhp->zpool_hdl)); 5637 } 5638 5639 char errbuf[ERRBUFLEN]; 5640 (void) snprintf(errbuf, sizeof (errbuf), 5641 dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"), 5642 propname, vdevname, zhp->zpool_name); 5643 5644 flags.vdevprop = 1; 5645 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 5646 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 5647 zhp->zpool_name, props, version, flags, errbuf)) == NULL) { 5648 nvlist_free(props); 5649 nvlist_free(nvl); 5650 return (-1); 5651 } 5652 5653 nvlist_free(props); 5654 props = realprops; 5655 5656 fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props); 5657 5658 ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl); 5659 5660 nvlist_free(props); 5661 nvlist_free(nvl); 5662 nvlist_free(outnvl); 5663 5664 if (ret) 5665 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 5666 5667 return (ret); 5668 } 5669 5670 /* 5671 * Prune older entries from the DDT to reclaim space under the quota 5672 */ 5673 int 5674 zpool_ddt_prune(zpool_handle_t *zhp, zpool_ddt_prune_unit_t unit, 5675 uint64_t amount) 5676 { 5677 int error = lzc_ddt_prune(zhp->zpool_name, unit, amount); 5678 if (error != 0) { 5679 libzfs_handle_t *hdl = zhp->zpool_hdl; 5680 char errbuf[ERRBUFLEN]; 5681 5682 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 5683 "cannot prune dedup table on '%s'"), zhp->zpool_name); 5684 5685 if (error == EALREADY) { 5686 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 5687 "a prune operation is already in progress")); 5688 (void) zfs_error(hdl, EZFS_BUSY, errbuf); 5689 } else { 5690 (void) zpool_standard_error(hdl, errno, errbuf); 5691 } 5692 return (-1); 5693 } 5694 5695 return (0); 5696 } 5697