1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 27 * Copyright (c) 2018 Datto Inc. 28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 29 * Copyright (c) 2017, Intel Corporation. 30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com> 31 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 32 */ 33 34 #include <errno.h> 35 #include <libintl.h> 36 #include <stdio.h> 37 #include <stdlib.h> 38 #include <strings.h> 39 #include <unistd.h> 40 #include <libgen.h> 41 #include <zone.h> 42 #include <sys/stat.h> 43 #include <sys/efi_partition.h> 44 #include <sys/systeminfo.h> 45 #include <sys/zfs_ioctl.h> 46 #include <sys/zfs_sysfs.h> 47 #include <sys/vdev_disk.h> 48 #include <sys/types.h> 49 #include <dlfcn.h> 50 #include <libzutil.h> 51 #include <fcntl.h> 52 53 #include "zfs_namecheck.h" 54 #include "zfs_prop.h" 55 #include "libzfs_impl.h" 56 #include "zfs_comutil.h" 57 #include "zfeature_common.h" 58 59 static boolean_t zpool_vdev_is_interior(const char *name); 60 61 typedef struct prop_flags { 62 int create:1; /* Validate property on creation */ 63 int import:1; /* Validate property on import */ 64 } prop_flags_t; 65 66 /* 67 * ==================================================================== 68 * zpool property functions 69 * ==================================================================== 70 */ 71 72 static int 73 zpool_get_all_props(zpool_handle_t *zhp) 74 { 75 zfs_cmd_t zc = {"\0"}; 76 libzfs_handle_t *hdl = zhp->zpool_hdl; 77 78 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 79 80 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 81 return (-1); 82 83 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 84 if (errno == ENOMEM) { 85 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 86 zcmd_free_nvlists(&zc); 87 return (-1); 88 } 89 } else { 90 zcmd_free_nvlists(&zc); 91 return (-1); 92 } 93 } 94 95 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 96 zcmd_free_nvlists(&zc); 97 return (-1); 98 } 99 100 zcmd_free_nvlists(&zc); 101 102 return (0); 103 } 104 105 int 106 zpool_props_refresh(zpool_handle_t *zhp) 107 { 108 nvlist_t *old_props; 109 110 old_props = zhp->zpool_props; 111 112 if (zpool_get_all_props(zhp) != 0) 113 return (-1); 114 115 nvlist_free(old_props); 116 return (0); 117 } 118 119 static const char * 120 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 121 zprop_source_t *src) 122 { 123 nvlist_t *nv, *nvl; 124 uint64_t ival; 125 char *value; 126 zprop_source_t source; 127 128 nvl = zhp->zpool_props; 129 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 130 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 131 source = ival; 132 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 133 } else { 134 source = ZPROP_SRC_DEFAULT; 135 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 136 value = "-"; 137 } 138 139 if (src) 140 *src = source; 141 142 return (value); 143 } 144 145 uint64_t 146 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 147 { 148 nvlist_t *nv, *nvl; 149 uint64_t value; 150 zprop_source_t source; 151 152 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 153 /* 154 * zpool_get_all_props() has most likely failed because 155 * the pool is faulted, but if all we need is the top level 156 * vdev's guid then get it from the zhp config nvlist. 157 */ 158 if ((prop == ZPOOL_PROP_GUID) && 159 (nvlist_lookup_nvlist(zhp->zpool_config, 160 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 161 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 162 == 0)) { 163 return (value); 164 } 165 return (zpool_prop_default_numeric(prop)); 166 } 167 168 nvl = zhp->zpool_props; 169 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 170 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 171 source = value; 172 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 173 } else { 174 source = ZPROP_SRC_DEFAULT; 175 value = zpool_prop_default_numeric(prop); 176 } 177 178 if (src) 179 *src = source; 180 181 return (value); 182 } 183 184 /* 185 * Map VDEV STATE to printed strings. 186 */ 187 const char * 188 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 189 { 190 switch (state) { 191 case VDEV_STATE_CLOSED: 192 case VDEV_STATE_OFFLINE: 193 return (gettext("OFFLINE")); 194 case VDEV_STATE_REMOVED: 195 return (gettext("REMOVED")); 196 case VDEV_STATE_CANT_OPEN: 197 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 198 return (gettext("FAULTED")); 199 else if (aux == VDEV_AUX_SPLIT_POOL) 200 return (gettext("SPLIT")); 201 else 202 return (gettext("UNAVAIL")); 203 case VDEV_STATE_FAULTED: 204 return (gettext("FAULTED")); 205 case VDEV_STATE_DEGRADED: 206 return (gettext("DEGRADED")); 207 case VDEV_STATE_HEALTHY: 208 return (gettext("ONLINE")); 209 210 default: 211 break; 212 } 213 214 return (gettext("UNKNOWN")); 215 } 216 217 /* 218 * Map POOL STATE to printed strings. 219 */ 220 const char * 221 zpool_pool_state_to_name(pool_state_t state) 222 { 223 switch (state) { 224 default: 225 break; 226 case POOL_STATE_ACTIVE: 227 return (gettext("ACTIVE")); 228 case POOL_STATE_EXPORTED: 229 return (gettext("EXPORTED")); 230 case POOL_STATE_DESTROYED: 231 return (gettext("DESTROYED")); 232 case POOL_STATE_SPARE: 233 return (gettext("SPARE")); 234 case POOL_STATE_L2CACHE: 235 return (gettext("L2CACHE")); 236 case POOL_STATE_UNINITIALIZED: 237 return (gettext("UNINITIALIZED")); 238 case POOL_STATE_UNAVAIL: 239 return (gettext("UNAVAIL")); 240 case POOL_STATE_POTENTIALLY_ACTIVE: 241 return (gettext("POTENTIALLY_ACTIVE")); 242 } 243 244 return (gettext("UNKNOWN")); 245 } 246 247 /* 248 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED", 249 * "SUSPENDED", etc). 250 */ 251 const char * 252 zpool_get_state_str(zpool_handle_t *zhp) 253 { 254 zpool_errata_t errata; 255 zpool_status_t status; 256 nvlist_t *nvroot; 257 vdev_stat_t *vs; 258 uint_t vsc; 259 const char *str; 260 261 status = zpool_get_status(zhp, NULL, &errata); 262 263 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 264 str = gettext("FAULTED"); 265 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT || 266 status == ZPOOL_STATUS_IO_FAILURE_MMP) { 267 str = gettext("SUSPENDED"); 268 } else { 269 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 270 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 271 verify(nvlist_lookup_uint64_array(nvroot, 272 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 273 == 0); 274 str = zpool_state_to_name(vs->vs_state, vs->vs_aux); 275 } 276 return (str); 277 } 278 279 /* 280 * Get a zpool property value for 'prop' and return the value in 281 * a pre-allocated buffer. 282 */ 283 int 284 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, 285 size_t len, zprop_source_t *srctype, boolean_t literal) 286 { 287 uint64_t intval; 288 const char *strval; 289 zprop_source_t src = ZPROP_SRC_NONE; 290 291 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 292 switch (prop) { 293 case ZPOOL_PROP_NAME: 294 (void) strlcpy(buf, zpool_get_name(zhp), len); 295 break; 296 297 case ZPOOL_PROP_HEALTH: 298 (void) strlcpy(buf, zpool_get_state_str(zhp), len); 299 break; 300 301 case ZPOOL_PROP_GUID: 302 intval = zpool_get_prop_int(zhp, prop, &src); 303 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval); 304 break; 305 306 case ZPOOL_PROP_ALTROOT: 307 case ZPOOL_PROP_CACHEFILE: 308 case ZPOOL_PROP_COMMENT: 309 case ZPOOL_PROP_COMPATIBILITY: 310 if (zhp->zpool_props != NULL || 311 zpool_get_all_props(zhp) == 0) { 312 (void) strlcpy(buf, 313 zpool_get_prop_string(zhp, prop, &src), 314 len); 315 break; 316 } 317 /* FALLTHROUGH */ 318 default: 319 (void) strlcpy(buf, "-", len); 320 break; 321 } 322 323 if (srctype != NULL) 324 *srctype = src; 325 return (0); 326 } 327 328 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 329 prop != ZPOOL_PROP_NAME) 330 return (-1); 331 332 switch (zpool_prop_get_type(prop)) { 333 case PROP_TYPE_STRING: 334 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 335 len); 336 break; 337 338 case PROP_TYPE_NUMBER: 339 intval = zpool_get_prop_int(zhp, prop, &src); 340 341 switch (prop) { 342 case ZPOOL_PROP_SIZE: 343 case ZPOOL_PROP_ALLOCATED: 344 case ZPOOL_PROP_FREE: 345 case ZPOOL_PROP_FREEING: 346 case ZPOOL_PROP_LEAKED: 347 case ZPOOL_PROP_ASHIFT: 348 if (literal) 349 (void) snprintf(buf, len, "%llu", 350 (u_longlong_t)intval); 351 else 352 (void) zfs_nicenum(intval, buf, len); 353 break; 354 355 case ZPOOL_PROP_EXPANDSZ: 356 case ZPOOL_PROP_CHECKPOINT: 357 if (intval == 0) { 358 (void) strlcpy(buf, "-", len); 359 } else if (literal) { 360 (void) snprintf(buf, len, "%llu", 361 (u_longlong_t)intval); 362 } else { 363 (void) zfs_nicebytes(intval, buf, len); 364 } 365 break; 366 367 case ZPOOL_PROP_CAPACITY: 368 if (literal) { 369 (void) snprintf(buf, len, "%llu", 370 (u_longlong_t)intval); 371 } else { 372 (void) snprintf(buf, len, "%llu%%", 373 (u_longlong_t)intval); 374 } 375 break; 376 377 case ZPOOL_PROP_FRAGMENTATION: 378 if (intval == UINT64_MAX) { 379 (void) strlcpy(buf, "-", len); 380 } else if (literal) { 381 (void) snprintf(buf, len, "%llu", 382 (u_longlong_t)intval); 383 } else { 384 (void) snprintf(buf, len, "%llu%%", 385 (u_longlong_t)intval); 386 } 387 break; 388 389 case ZPOOL_PROP_DEDUPRATIO: 390 if (literal) 391 (void) snprintf(buf, len, "%llu.%02llu", 392 (u_longlong_t)(intval / 100), 393 (u_longlong_t)(intval % 100)); 394 else 395 (void) snprintf(buf, len, "%llu.%02llux", 396 (u_longlong_t)(intval / 100), 397 (u_longlong_t)(intval % 100)); 398 break; 399 400 case ZPOOL_PROP_HEALTH: 401 (void) strlcpy(buf, zpool_get_state_str(zhp), len); 402 break; 403 case ZPOOL_PROP_VERSION: 404 if (intval >= SPA_VERSION_FEATURES) { 405 (void) snprintf(buf, len, "-"); 406 break; 407 } 408 /* FALLTHROUGH */ 409 default: 410 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval); 411 } 412 break; 413 414 case PROP_TYPE_INDEX: 415 intval = zpool_get_prop_int(zhp, prop, &src); 416 if (zpool_prop_index_to_string(prop, intval, &strval) 417 != 0) 418 return (-1); 419 (void) strlcpy(buf, strval, len); 420 break; 421 422 default: 423 abort(); 424 } 425 426 if (srctype) 427 *srctype = src; 428 429 return (0); 430 } 431 432 /* 433 * Check if the bootfs name has the same pool name as it is set to. 434 * Assuming bootfs is a valid dataset name. 435 */ 436 static boolean_t 437 bootfs_name_valid(const char *pool, const char *bootfs) 438 { 439 int len = strlen(pool); 440 if (bootfs[0] == '\0') 441 return (B_TRUE); 442 443 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 444 return (B_FALSE); 445 446 if (strncmp(pool, bootfs, len) == 0 && 447 (bootfs[len] == '/' || bootfs[len] == '\0')) 448 return (B_TRUE); 449 450 return (B_FALSE); 451 } 452 453 /* 454 * Given an nvlist of zpool properties to be set, validate that they are 455 * correct, and parse any numeric properties (index, boolean, etc) if they are 456 * specified as strings. 457 */ 458 static nvlist_t * 459 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 460 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 461 { 462 nvpair_t *elem; 463 nvlist_t *retprops; 464 zpool_prop_t prop; 465 char *strval; 466 uint64_t intval; 467 char *slash, *check; 468 struct stat64 statbuf; 469 zpool_handle_t *zhp; 470 char badword[ZFS_MAXPROPLEN]; 471 char badfile[MAXPATHLEN]; 472 473 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 474 (void) no_memory(hdl); 475 return (NULL); 476 } 477 478 elem = NULL; 479 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 480 const char *propname = nvpair_name(elem); 481 482 prop = zpool_name_to_prop(propname); 483 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) { 484 int err; 485 char *fname = strchr(propname, '@') + 1; 486 487 err = zfeature_lookup_name(fname, NULL); 488 if (err != 0) { 489 ASSERT3U(err, ==, ENOENT); 490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 491 "feature '%s' unsupported by kernel"), 492 fname); 493 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 494 goto error; 495 } 496 497 if (nvpair_type(elem) != DATA_TYPE_STRING) { 498 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 499 "'%s' must be a string"), propname); 500 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 501 goto error; 502 } 503 504 (void) nvpair_value_string(elem, &strval); 505 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 && 506 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) { 507 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 508 "property '%s' can only be set to " 509 "'enabled' or 'disabled'"), propname); 510 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 511 goto error; 512 } 513 514 if (!flags.create && 515 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) { 516 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 517 "property '%s' can only be set to " 518 "'disabled' at creation time"), propname); 519 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 520 goto error; 521 } 522 523 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 524 (void) no_memory(hdl); 525 goto error; 526 } 527 continue; 528 } 529 530 /* 531 * Make sure this property is valid and applies to this type. 532 */ 533 if (prop == ZPOOL_PROP_INVAL) { 534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 535 "invalid property '%s'"), propname); 536 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 537 goto error; 538 } 539 540 if (zpool_prop_readonly(prop)) { 541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 542 "is readonly"), propname); 543 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 544 goto error; 545 } 546 547 if (!flags.create && zpool_prop_setonce(prop)) { 548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 549 "property '%s' can only be set at " 550 "creation time"), propname); 551 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 552 goto error; 553 } 554 555 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 556 &strval, &intval, errbuf) != 0) 557 goto error; 558 559 /* 560 * Perform additional checking for specific properties. 561 */ 562 switch (prop) { 563 case ZPOOL_PROP_VERSION: 564 if (intval < version || 565 !SPA_VERSION_IS_SUPPORTED(intval)) { 566 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 567 "property '%s' number %d is invalid."), 568 propname, intval); 569 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 570 goto error; 571 } 572 break; 573 574 case ZPOOL_PROP_ASHIFT: 575 if (intval != 0 && 576 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) { 577 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 578 "property '%s' number %d is invalid, only " 579 "values between %" PRId32 " and " 580 "%" PRId32 " are allowed."), 581 propname, intval, ASHIFT_MIN, ASHIFT_MAX); 582 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 583 goto error; 584 } 585 break; 586 587 case ZPOOL_PROP_BOOTFS: 588 if (flags.create || flags.import) { 589 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 590 "property '%s' cannot be set at creation " 591 "or import time"), propname); 592 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 593 goto error; 594 } 595 596 if (version < SPA_VERSION_BOOTFS) { 597 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 598 "pool must be upgraded to support " 599 "'%s' property"), propname); 600 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 601 goto error; 602 } 603 604 /* 605 * bootfs property value has to be a dataset name and 606 * the dataset has to be in the same pool as it sets to. 607 */ 608 if (!bootfs_name_valid(poolname, strval)) { 609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 610 "is an invalid name"), strval); 611 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 612 goto error; 613 } 614 615 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 616 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 617 "could not open pool '%s'"), poolname); 618 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 619 goto error; 620 } 621 zpool_close(zhp); 622 break; 623 624 case ZPOOL_PROP_ALTROOT: 625 if (!flags.create && !flags.import) { 626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 627 "property '%s' can only be set during pool " 628 "creation or import"), propname); 629 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 630 goto error; 631 } 632 633 if (strval[0] != '/') { 634 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 635 "bad alternate root '%s'"), strval); 636 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 637 goto error; 638 } 639 break; 640 641 case ZPOOL_PROP_CACHEFILE: 642 if (strval[0] == '\0') 643 break; 644 645 if (strcmp(strval, "none") == 0) 646 break; 647 648 if (strval[0] != '/') { 649 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 650 "property '%s' must be empty, an " 651 "absolute path, or 'none'"), propname); 652 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 653 goto error; 654 } 655 656 slash = strrchr(strval, '/'); 657 658 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 659 strcmp(slash, "/..") == 0) { 660 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 661 "'%s' is not a valid file"), strval); 662 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 663 goto error; 664 } 665 666 *slash = '\0'; 667 668 if (strval[0] != '\0' && 669 (stat64(strval, &statbuf) != 0 || 670 !S_ISDIR(statbuf.st_mode))) { 671 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 672 "'%s' is not a valid directory"), 673 strval); 674 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 675 goto error; 676 } 677 678 *slash = '/'; 679 break; 680 681 case ZPOOL_PROP_COMPATIBILITY: 682 switch (zpool_load_compat(strval, NULL, 683 badword, badfile)) { 684 case ZPOOL_COMPATIBILITY_OK: 685 break; 686 case ZPOOL_COMPATIBILITY_READERR: 687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 688 "error reading feature file '%s'"), 689 badfile); 690 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 691 goto error; 692 case ZPOOL_COMPATIBILITY_BADFILE: 693 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 694 "feature file '%s' too large or not " 695 "newline-terminated"), 696 badfile); 697 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 698 goto error; 699 case ZPOOL_COMPATIBILITY_BADWORD: 700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 701 "unknown feature '%s' in feature " 702 "file '%s'"), 703 badword, badfile); 704 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 705 goto error; 706 case ZPOOL_COMPATIBILITY_NOFILES: 707 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 708 "no feature files specified")); 709 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 710 goto error; 711 } 712 break; 713 714 case ZPOOL_PROP_COMMENT: 715 for (check = strval; *check != '\0'; check++) { 716 if (!isprint(*check)) { 717 zfs_error_aux(hdl, 718 dgettext(TEXT_DOMAIN, 719 "comment may only have printable " 720 "characters")); 721 (void) zfs_error(hdl, EZFS_BADPROP, 722 errbuf); 723 goto error; 724 } 725 } 726 if (strlen(strval) > ZPROP_MAX_COMMENT) { 727 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 728 "comment must not exceed %d characters"), 729 ZPROP_MAX_COMMENT); 730 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 731 goto error; 732 } 733 break; 734 case ZPOOL_PROP_READONLY: 735 if (!flags.import) { 736 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 737 "property '%s' can only be set at " 738 "import time"), propname); 739 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 740 goto error; 741 } 742 break; 743 case ZPOOL_PROP_MULTIHOST: 744 if (get_system_hostid() == 0) { 745 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 746 "requires a non-zero system hostid")); 747 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 748 goto error; 749 } 750 break; 751 case ZPOOL_PROP_DEDUPDITTO: 752 printf("Note: property '%s' no longer has " 753 "any effect\n", propname); 754 break; 755 756 default: 757 break; 758 } 759 } 760 761 return (retprops); 762 error: 763 nvlist_free(retprops); 764 return (NULL); 765 } 766 767 /* 768 * Set zpool property : propname=propval. 769 */ 770 int 771 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 772 { 773 zfs_cmd_t zc = {"\0"}; 774 int ret = -1; 775 char errbuf[1024]; 776 nvlist_t *nvl = NULL; 777 nvlist_t *realprops; 778 uint64_t version; 779 prop_flags_t flags = { 0 }; 780 781 (void) snprintf(errbuf, sizeof (errbuf), 782 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 783 zhp->zpool_name); 784 785 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 786 return (no_memory(zhp->zpool_hdl)); 787 788 if (nvlist_add_string(nvl, propname, propval) != 0) { 789 nvlist_free(nvl); 790 return (no_memory(zhp->zpool_hdl)); 791 } 792 793 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 794 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 795 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 796 nvlist_free(nvl); 797 return (-1); 798 } 799 800 nvlist_free(nvl); 801 nvl = realprops; 802 803 /* 804 * Execute the corresponding ioctl() to set this property. 805 */ 806 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 807 808 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 809 nvlist_free(nvl); 810 return (-1); 811 } 812 813 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 814 815 zcmd_free_nvlists(&zc); 816 nvlist_free(nvl); 817 818 if (ret) 819 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 820 else 821 (void) zpool_props_refresh(zhp); 822 823 return (ret); 824 } 825 826 int 827 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp, 828 boolean_t literal) 829 { 830 libzfs_handle_t *hdl = zhp->zpool_hdl; 831 zprop_list_t *entry; 832 char buf[ZFS_MAXPROPLEN]; 833 nvlist_t *features = NULL; 834 nvpair_t *nvp; 835 zprop_list_t **last; 836 boolean_t firstexpand = (NULL == *plp); 837 int i; 838 839 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 840 return (-1); 841 842 last = plp; 843 while (*last != NULL) 844 last = &(*last)->pl_next; 845 846 if ((*plp)->pl_all) 847 features = zpool_get_features(zhp); 848 849 if ((*plp)->pl_all && firstexpand) { 850 for (i = 0; i < SPA_FEATURES; i++) { 851 zprop_list_t *entry = zfs_alloc(hdl, 852 sizeof (zprop_list_t)); 853 entry->pl_prop = ZPROP_INVAL; 854 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 855 spa_feature_table[i].fi_uname); 856 entry->pl_width = strlen(entry->pl_user_prop); 857 entry->pl_all = B_TRUE; 858 859 *last = entry; 860 last = &entry->pl_next; 861 } 862 } 863 864 /* add any unsupported features */ 865 for (nvp = nvlist_next_nvpair(features, NULL); 866 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 867 char *propname; 868 boolean_t found; 869 zprop_list_t *entry; 870 871 if (zfeature_is_supported(nvpair_name(nvp))) 872 continue; 873 874 propname = zfs_asprintf(hdl, "unsupported@%s", 875 nvpair_name(nvp)); 876 877 /* 878 * Before adding the property to the list make sure that no 879 * other pool already added the same property. 880 */ 881 found = B_FALSE; 882 entry = *plp; 883 while (entry != NULL) { 884 if (entry->pl_user_prop != NULL && 885 strcmp(propname, entry->pl_user_prop) == 0) { 886 found = B_TRUE; 887 break; 888 } 889 entry = entry->pl_next; 890 } 891 if (found) { 892 free(propname); 893 continue; 894 } 895 896 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 897 entry->pl_prop = ZPROP_INVAL; 898 entry->pl_user_prop = propname; 899 entry->pl_width = strlen(entry->pl_user_prop); 900 entry->pl_all = B_TRUE; 901 902 *last = entry; 903 last = &entry->pl_next; 904 } 905 906 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 907 if (entry->pl_fixed && !literal) 908 continue; 909 910 if (entry->pl_prop != ZPROP_INVAL && 911 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 912 NULL, literal) == 0) { 913 if (strlen(buf) > entry->pl_width) 914 entry->pl_width = strlen(buf); 915 } 916 } 917 918 return (0); 919 } 920 921 /* 922 * Get the state for the given feature on the given ZFS pool. 923 */ 924 int 925 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 926 size_t len) 927 { 928 uint64_t refcount; 929 boolean_t found = B_FALSE; 930 nvlist_t *features = zpool_get_features(zhp); 931 boolean_t supported; 932 const char *feature = strchr(propname, '@') + 1; 933 934 supported = zpool_prop_feature(propname); 935 ASSERT(supported || zpool_prop_unsupported(propname)); 936 937 /* 938 * Convert from feature name to feature guid. This conversion is 939 * unnecessary for unsupported@... properties because they already 940 * use guids. 941 */ 942 if (supported) { 943 int ret; 944 spa_feature_t fid; 945 946 ret = zfeature_lookup_name(feature, &fid); 947 if (ret != 0) { 948 (void) strlcpy(buf, "-", len); 949 return (ENOTSUP); 950 } 951 feature = spa_feature_table[fid].fi_guid; 952 } 953 954 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 955 found = B_TRUE; 956 957 if (supported) { 958 if (!found) { 959 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 960 } else { 961 if (refcount == 0) 962 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 963 else 964 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 965 } 966 } else { 967 if (found) { 968 if (refcount == 0) { 969 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 970 } else { 971 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 972 } 973 } else { 974 (void) strlcpy(buf, "-", len); 975 return (ENOTSUP); 976 } 977 } 978 979 return (0); 980 } 981 982 /* 983 * Validate the given pool name, optionally putting an extended error message in 984 * 'buf'. 985 */ 986 boolean_t 987 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 988 { 989 namecheck_err_t why; 990 char what; 991 int ret; 992 993 ret = pool_namecheck(pool, &why, &what); 994 995 /* 996 * The rules for reserved pool names were extended at a later point. 997 * But we need to support users with existing pools that may now be 998 * invalid. So we only check for this expanded set of names during a 999 * create (or import), and only in userland. 1000 */ 1001 if (ret == 0 && !isopen && 1002 (strncmp(pool, "mirror", 6) == 0 || 1003 strncmp(pool, "raidz", 5) == 0 || 1004 strncmp(pool, "draid", 5) == 0 || 1005 strncmp(pool, "spare", 5) == 0 || 1006 strcmp(pool, "log") == 0)) { 1007 if (hdl != NULL) 1008 zfs_error_aux(hdl, 1009 dgettext(TEXT_DOMAIN, "name is reserved")); 1010 return (B_FALSE); 1011 } 1012 1013 1014 if (ret != 0) { 1015 if (hdl != NULL) { 1016 switch (why) { 1017 case NAME_ERR_TOOLONG: 1018 zfs_error_aux(hdl, 1019 dgettext(TEXT_DOMAIN, "name is too long")); 1020 break; 1021 1022 case NAME_ERR_INVALCHAR: 1023 zfs_error_aux(hdl, 1024 dgettext(TEXT_DOMAIN, "invalid character " 1025 "'%c' in pool name"), what); 1026 break; 1027 1028 case NAME_ERR_NOLETTER: 1029 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1030 "name must begin with a letter")); 1031 break; 1032 1033 case NAME_ERR_RESERVED: 1034 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1035 "name is reserved")); 1036 break; 1037 1038 case NAME_ERR_DISKLIKE: 1039 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1040 "pool name is reserved")); 1041 break; 1042 1043 case NAME_ERR_LEADING_SLASH: 1044 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1045 "leading slash in name")); 1046 break; 1047 1048 case NAME_ERR_EMPTY_COMPONENT: 1049 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1050 "empty component in name")); 1051 break; 1052 1053 case NAME_ERR_TRAILING_SLASH: 1054 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1055 "trailing slash in name")); 1056 break; 1057 1058 case NAME_ERR_MULTIPLE_DELIMITERS: 1059 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1060 "multiple '@' and/or '#' delimiters in " 1061 "name")); 1062 break; 1063 1064 case NAME_ERR_NO_AT: 1065 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1066 "permission set is missing '@'")); 1067 break; 1068 1069 default: 1070 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1071 "(%d) not defined"), why); 1072 break; 1073 } 1074 } 1075 return (B_FALSE); 1076 } 1077 1078 return (B_TRUE); 1079 } 1080 1081 /* 1082 * Open a handle to the given pool, even if the pool is currently in the FAULTED 1083 * state. 1084 */ 1085 zpool_handle_t * 1086 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 1087 { 1088 zpool_handle_t *zhp; 1089 boolean_t missing; 1090 1091 /* 1092 * Make sure the pool name is valid. 1093 */ 1094 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 1095 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1096 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1097 pool); 1098 return (NULL); 1099 } 1100 1101 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1102 return (NULL); 1103 1104 zhp->zpool_hdl = hdl; 1105 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1106 1107 if (zpool_refresh_stats(zhp, &missing) != 0) { 1108 zpool_close(zhp); 1109 return (NULL); 1110 } 1111 1112 if (missing) { 1113 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1114 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1115 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1116 zpool_close(zhp); 1117 return (NULL); 1118 } 1119 1120 return (zhp); 1121 } 1122 1123 /* 1124 * Like the above, but silent on error. Used when iterating over pools (because 1125 * the configuration cache may be out of date). 1126 */ 1127 int 1128 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1129 { 1130 zpool_handle_t *zhp; 1131 boolean_t missing; 1132 1133 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1134 return (-1); 1135 1136 zhp->zpool_hdl = hdl; 1137 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1138 1139 if (zpool_refresh_stats(zhp, &missing) != 0) { 1140 zpool_close(zhp); 1141 return (-1); 1142 } 1143 1144 if (missing) { 1145 zpool_close(zhp); 1146 *ret = NULL; 1147 return (0); 1148 } 1149 1150 *ret = zhp; 1151 return (0); 1152 } 1153 1154 /* 1155 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1156 * state. 1157 */ 1158 zpool_handle_t * 1159 zpool_open(libzfs_handle_t *hdl, const char *pool) 1160 { 1161 zpool_handle_t *zhp; 1162 1163 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1164 return (NULL); 1165 1166 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1167 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1168 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1169 zpool_close(zhp); 1170 return (NULL); 1171 } 1172 1173 return (zhp); 1174 } 1175 1176 /* 1177 * Close the handle. Simply frees the memory associated with the handle. 1178 */ 1179 void 1180 zpool_close(zpool_handle_t *zhp) 1181 { 1182 nvlist_free(zhp->zpool_config); 1183 nvlist_free(zhp->zpool_old_config); 1184 nvlist_free(zhp->zpool_props); 1185 free(zhp); 1186 } 1187 1188 /* 1189 * Return the name of the pool. 1190 */ 1191 const char * 1192 zpool_get_name(zpool_handle_t *zhp) 1193 { 1194 return (zhp->zpool_name); 1195 } 1196 1197 1198 /* 1199 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1200 */ 1201 int 1202 zpool_get_state(zpool_handle_t *zhp) 1203 { 1204 return (zhp->zpool_state); 1205 } 1206 1207 /* 1208 * Check if vdev list contains a special vdev 1209 */ 1210 static boolean_t 1211 zpool_has_special_vdev(nvlist_t *nvroot) 1212 { 1213 nvlist_t **child; 1214 uint_t children; 1215 1216 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child, 1217 &children) == 0) { 1218 for (uint_t c = 0; c < children; c++) { 1219 char *bias; 1220 1221 if (nvlist_lookup_string(child[c], 1222 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 && 1223 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) { 1224 return (B_TRUE); 1225 } 1226 } 1227 } 1228 return (B_FALSE); 1229 } 1230 1231 /* 1232 * Check if vdev list contains a dRAID vdev 1233 */ 1234 static boolean_t 1235 zpool_has_draid_vdev(nvlist_t *nvroot) 1236 { 1237 nvlist_t **child; 1238 uint_t children; 1239 1240 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1241 &child, &children) == 0) { 1242 for (uint_t c = 0; c < children; c++) { 1243 char *type; 1244 1245 if (nvlist_lookup_string(child[c], 1246 ZPOOL_CONFIG_TYPE, &type) == 0 && 1247 strcmp(type, VDEV_TYPE_DRAID) == 0) { 1248 return (B_TRUE); 1249 } 1250 } 1251 } 1252 return (B_FALSE); 1253 } 1254 1255 /* 1256 * Output a dRAID top-level vdev name in to the provided buffer. 1257 */ 1258 static char * 1259 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity, 1260 uint64_t spares, uint64_t children) 1261 { 1262 snprintf(name, len, "%s%llu:%llud:%lluc:%llus", 1263 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data, 1264 (u_longlong_t)children, (u_longlong_t)spares); 1265 1266 return (name); 1267 } 1268 1269 /* 1270 * Return B_TRUE if the provided name is a dRAID spare name. 1271 */ 1272 boolean_t 1273 zpool_is_draid_spare(const char *name) 1274 { 1275 uint64_t spare_id, parity, vdev_id; 1276 1277 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu", 1278 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id, 1279 (u_longlong_t *)&spare_id) == 3) { 1280 return (B_TRUE); 1281 } 1282 1283 return (B_FALSE); 1284 } 1285 1286 /* 1287 * Create the named pool, using the provided vdev list. It is assumed 1288 * that the consumer has already validated the contents of the nvlist, so we 1289 * don't have to worry about error semantics. 1290 */ 1291 int 1292 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1293 nvlist_t *props, nvlist_t *fsprops) 1294 { 1295 zfs_cmd_t zc = {"\0"}; 1296 nvlist_t *zc_fsprops = NULL; 1297 nvlist_t *zc_props = NULL; 1298 nvlist_t *hidden_args = NULL; 1299 uint8_t *wkeydata = NULL; 1300 uint_t wkeylen = 0; 1301 char msg[1024]; 1302 int ret = -1; 1303 1304 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1305 "cannot create '%s'"), pool); 1306 1307 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1308 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1309 1310 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1311 return (-1); 1312 1313 if (props) { 1314 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1315 1316 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1317 SPA_VERSION_1, flags, msg)) == NULL) { 1318 goto create_failed; 1319 } 1320 } 1321 1322 if (fsprops) { 1323 uint64_t zoned; 1324 char *zonestr; 1325 1326 zoned = ((nvlist_lookup_string(fsprops, 1327 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1328 strcmp(zonestr, "on") == 0); 1329 1330 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1331 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) { 1332 goto create_failed; 1333 } 1334 1335 if (nvlist_exists(zc_fsprops, 1336 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) && 1337 !zpool_has_special_vdev(nvroot)) { 1338 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1339 "%s property requires a special vdev"), 1340 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)); 1341 (void) zfs_error(hdl, EZFS_BADPROP, msg); 1342 goto create_failed; 1343 } 1344 1345 if (!zc_props && 1346 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1347 goto create_failed; 1348 } 1349 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE, 1350 &wkeydata, &wkeylen) != 0) { 1351 zfs_error(hdl, EZFS_CRYPTOFAILED, msg); 1352 goto create_failed; 1353 } 1354 if (nvlist_add_nvlist(zc_props, 1355 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1356 goto create_failed; 1357 } 1358 if (wkeydata != NULL) { 1359 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0) 1360 goto create_failed; 1361 1362 if (nvlist_add_uint8_array(hidden_args, "wkeydata", 1363 wkeydata, wkeylen) != 0) 1364 goto create_failed; 1365 1366 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS, 1367 hidden_args) != 0) 1368 goto create_failed; 1369 } 1370 } 1371 1372 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1373 goto create_failed; 1374 1375 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1376 1377 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1378 1379 zcmd_free_nvlists(&zc); 1380 nvlist_free(zc_props); 1381 nvlist_free(zc_fsprops); 1382 nvlist_free(hidden_args); 1383 if (wkeydata != NULL) 1384 free(wkeydata); 1385 1386 switch (errno) { 1387 case EBUSY: 1388 /* 1389 * This can happen if the user has specified the same 1390 * device multiple times. We can't reliably detect this 1391 * until we try to add it and see we already have a 1392 * label. This can also happen under if the device is 1393 * part of an active md or lvm device. 1394 */ 1395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1396 "one or more vdevs refer to the same device, or " 1397 "one of\nthe devices is part of an active md or " 1398 "lvm device")); 1399 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1400 1401 case ERANGE: 1402 /* 1403 * This happens if the record size is smaller or larger 1404 * than the allowed size range, or not a power of 2. 1405 * 1406 * NOTE: although zfs_valid_proplist is called earlier, 1407 * this case may have slipped through since the 1408 * pool does not exist yet and it is therefore 1409 * impossible to read properties e.g. max blocksize 1410 * from the pool. 1411 */ 1412 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1413 "record size invalid")); 1414 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1415 1416 case EOVERFLOW: 1417 /* 1418 * This occurs when one of the devices is below 1419 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1420 * device was the problem device since there's no 1421 * reliable way to determine device size from userland. 1422 */ 1423 { 1424 char buf[64]; 1425 1426 zfs_nicebytes(SPA_MINDEVSIZE, buf, 1427 sizeof (buf)); 1428 1429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1430 "one or more devices is less than the " 1431 "minimum size (%s)"), buf); 1432 } 1433 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1434 1435 case ENOSPC: 1436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1437 "one or more devices is out of space")); 1438 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1439 1440 case EINVAL: 1441 if (zpool_has_draid_vdev(nvroot) && 1442 zfeature_lookup_name("draid", NULL) != 0) { 1443 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1444 "dRAID vdevs are unsupported by the " 1445 "kernel")); 1446 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1447 } else { 1448 return (zpool_standard_error(hdl, errno, msg)); 1449 } 1450 1451 default: 1452 return (zpool_standard_error(hdl, errno, msg)); 1453 } 1454 } 1455 1456 create_failed: 1457 zcmd_free_nvlists(&zc); 1458 nvlist_free(zc_props); 1459 nvlist_free(zc_fsprops); 1460 nvlist_free(hidden_args); 1461 if (wkeydata != NULL) 1462 free(wkeydata); 1463 return (ret); 1464 } 1465 1466 /* 1467 * Destroy the given pool. It is up to the caller to ensure that there are no 1468 * datasets left in the pool. 1469 */ 1470 int 1471 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1472 { 1473 zfs_cmd_t zc = {"\0"}; 1474 zfs_handle_t *zfp = NULL; 1475 libzfs_handle_t *hdl = zhp->zpool_hdl; 1476 char msg[1024]; 1477 1478 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1479 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1480 return (-1); 1481 1482 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1483 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1484 1485 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1486 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1487 "cannot destroy '%s'"), zhp->zpool_name); 1488 1489 if (errno == EROFS) { 1490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1491 "one or more devices is read only")); 1492 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1493 } else { 1494 (void) zpool_standard_error(hdl, errno, msg); 1495 } 1496 1497 if (zfp) 1498 zfs_close(zfp); 1499 return (-1); 1500 } 1501 1502 if (zfp) { 1503 remove_mountpoint(zfp); 1504 zfs_close(zfp); 1505 } 1506 1507 return (0); 1508 } 1509 1510 /* 1511 * Create a checkpoint in the given pool. 1512 */ 1513 int 1514 zpool_checkpoint(zpool_handle_t *zhp) 1515 { 1516 libzfs_handle_t *hdl = zhp->zpool_hdl; 1517 char msg[1024]; 1518 int error; 1519 1520 error = lzc_pool_checkpoint(zhp->zpool_name); 1521 if (error != 0) { 1522 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1523 "cannot checkpoint '%s'"), zhp->zpool_name); 1524 (void) zpool_standard_error(hdl, error, msg); 1525 return (-1); 1526 } 1527 1528 return (0); 1529 } 1530 1531 /* 1532 * Discard the checkpoint from the given pool. 1533 */ 1534 int 1535 zpool_discard_checkpoint(zpool_handle_t *zhp) 1536 { 1537 libzfs_handle_t *hdl = zhp->zpool_hdl; 1538 char msg[1024]; 1539 int error; 1540 1541 error = lzc_pool_checkpoint_discard(zhp->zpool_name); 1542 if (error != 0) { 1543 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1544 "cannot discard checkpoint in '%s'"), zhp->zpool_name); 1545 (void) zpool_standard_error(hdl, error, msg); 1546 return (-1); 1547 } 1548 1549 return (0); 1550 } 1551 1552 /* 1553 * Add the given vdevs to the pool. The caller must have already performed the 1554 * necessary verification to ensure that the vdev specification is well-formed. 1555 */ 1556 int 1557 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1558 { 1559 zfs_cmd_t zc = {"\0"}; 1560 int ret; 1561 libzfs_handle_t *hdl = zhp->zpool_hdl; 1562 char msg[1024]; 1563 nvlist_t **spares, **l2cache; 1564 uint_t nspares, nl2cache; 1565 1566 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1567 "cannot add to '%s'"), zhp->zpool_name); 1568 1569 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1570 SPA_VERSION_SPARES && 1571 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1572 &spares, &nspares) == 0) { 1573 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1574 "upgraded to add hot spares")); 1575 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1576 } 1577 1578 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1579 SPA_VERSION_L2CACHE && 1580 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1581 &l2cache, &nl2cache) == 0) { 1582 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1583 "upgraded to add cache devices")); 1584 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1585 } 1586 1587 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1588 return (-1); 1589 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1590 1591 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1592 switch (errno) { 1593 case EBUSY: 1594 /* 1595 * This can happen if the user has specified the same 1596 * device multiple times. We can't reliably detect this 1597 * until we try to add it and see we already have a 1598 * label. 1599 */ 1600 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1601 "one or more vdevs refer to the same device")); 1602 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1603 break; 1604 1605 case EINVAL: 1606 1607 if (zpool_has_draid_vdev(nvroot) && 1608 zfeature_lookup_name("draid", NULL) != 0) { 1609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1610 "dRAID vdevs are unsupported by the " 1611 "kernel")); 1612 } else { 1613 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1614 "invalid config; a pool with removing/" 1615 "removed vdevs does not support adding " 1616 "raidz or dRAID vdevs")); 1617 } 1618 1619 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1620 break; 1621 1622 case EOVERFLOW: 1623 /* 1624 * This occurs when one of the devices is below 1625 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1626 * device was the problem device since there's no 1627 * reliable way to determine device size from userland. 1628 */ 1629 { 1630 char buf[64]; 1631 1632 zfs_nicebytes(SPA_MINDEVSIZE, buf, 1633 sizeof (buf)); 1634 1635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1636 "device is less than the minimum " 1637 "size (%s)"), buf); 1638 } 1639 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1640 break; 1641 1642 case ENOTSUP: 1643 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1644 "pool must be upgraded to add these vdevs")); 1645 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1646 break; 1647 1648 default: 1649 (void) zpool_standard_error(hdl, errno, msg); 1650 } 1651 1652 ret = -1; 1653 } else { 1654 ret = 0; 1655 } 1656 1657 zcmd_free_nvlists(&zc); 1658 1659 return (ret); 1660 } 1661 1662 /* 1663 * Exports the pool from the system. The caller must ensure that there are no 1664 * mounted datasets in the pool. 1665 */ 1666 static int 1667 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1668 const char *log_str) 1669 { 1670 zfs_cmd_t zc = {"\0"}; 1671 char msg[1024]; 1672 1673 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1674 "cannot export '%s'"), zhp->zpool_name); 1675 1676 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1677 zc.zc_cookie = force; 1678 zc.zc_guid = hardforce; 1679 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1680 1681 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1682 switch (errno) { 1683 case EXDEV: 1684 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1685 "use '-f' to override the following errors:\n" 1686 "'%s' has an active shared spare which could be" 1687 " used by other pools once '%s' is exported."), 1688 zhp->zpool_name, zhp->zpool_name); 1689 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1690 msg)); 1691 default: 1692 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1693 msg)); 1694 } 1695 } 1696 1697 return (0); 1698 } 1699 1700 int 1701 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1702 { 1703 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1704 } 1705 1706 int 1707 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1708 { 1709 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1710 } 1711 1712 static void 1713 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1714 nvlist_t *config) 1715 { 1716 nvlist_t *nv = NULL; 1717 uint64_t rewindto; 1718 int64_t loss = -1; 1719 struct tm t; 1720 char timestr[128]; 1721 1722 if (!hdl->libzfs_printerr || config == NULL) 1723 return; 1724 1725 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1726 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1727 return; 1728 } 1729 1730 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1731 return; 1732 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1733 1734 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1735 strftime(timestr, 128, "%c", &t) != 0) { 1736 if (dryrun) { 1737 (void) printf(dgettext(TEXT_DOMAIN, 1738 "Would be able to return %s " 1739 "to its state as of %s.\n"), 1740 name, timestr); 1741 } else { 1742 (void) printf(dgettext(TEXT_DOMAIN, 1743 "Pool %s returned to its state as of %s.\n"), 1744 name, timestr); 1745 } 1746 if (loss > 120) { 1747 (void) printf(dgettext(TEXT_DOMAIN, 1748 "%s approximately %lld "), 1749 dryrun ? "Would discard" : "Discarded", 1750 ((longlong_t)loss + 30) / 60); 1751 (void) printf(dgettext(TEXT_DOMAIN, 1752 "minutes of transactions.\n")); 1753 } else if (loss > 0) { 1754 (void) printf(dgettext(TEXT_DOMAIN, 1755 "%s approximately %lld "), 1756 dryrun ? "Would discard" : "Discarded", 1757 (longlong_t)loss); 1758 (void) printf(dgettext(TEXT_DOMAIN, 1759 "seconds of transactions.\n")); 1760 } 1761 } 1762 } 1763 1764 void 1765 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1766 nvlist_t *config) 1767 { 1768 nvlist_t *nv = NULL; 1769 int64_t loss = -1; 1770 uint64_t edata = UINT64_MAX; 1771 uint64_t rewindto; 1772 struct tm t; 1773 char timestr[128]; 1774 1775 if (!hdl->libzfs_printerr) 1776 return; 1777 1778 if (reason >= 0) 1779 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1780 else 1781 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1782 1783 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1784 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1785 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1786 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1787 goto no_info; 1788 1789 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1790 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1791 &edata); 1792 1793 (void) printf(dgettext(TEXT_DOMAIN, 1794 "Recovery is possible, but will result in some data loss.\n")); 1795 1796 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1797 strftime(timestr, 128, "%c", &t) != 0) { 1798 (void) printf(dgettext(TEXT_DOMAIN, 1799 "\tReturning the pool to its state as of %s\n" 1800 "\tshould correct the problem. "), 1801 timestr); 1802 } else { 1803 (void) printf(dgettext(TEXT_DOMAIN, 1804 "\tReverting the pool to an earlier state " 1805 "should correct the problem.\n\t")); 1806 } 1807 1808 if (loss > 120) { 1809 (void) printf(dgettext(TEXT_DOMAIN, 1810 "Approximately %lld minutes of data\n" 1811 "\tmust be discarded, irreversibly. "), 1812 ((longlong_t)loss + 30) / 60); 1813 } else if (loss > 0) { 1814 (void) printf(dgettext(TEXT_DOMAIN, 1815 "Approximately %lld seconds of data\n" 1816 "\tmust be discarded, irreversibly. "), 1817 (longlong_t)loss); 1818 } 1819 if (edata != 0 && edata != UINT64_MAX) { 1820 if (edata == 1) { 1821 (void) printf(dgettext(TEXT_DOMAIN, 1822 "After rewind, at least\n" 1823 "\tone persistent user-data error will remain. ")); 1824 } else { 1825 (void) printf(dgettext(TEXT_DOMAIN, 1826 "After rewind, several\n" 1827 "\tpersistent user-data errors will remain. ")); 1828 } 1829 } 1830 (void) printf(dgettext(TEXT_DOMAIN, 1831 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1832 reason >= 0 ? "clear" : "import", name); 1833 1834 (void) printf(dgettext(TEXT_DOMAIN, 1835 "A scrub of the pool\n" 1836 "\tis strongly recommended after recovery.\n")); 1837 return; 1838 1839 no_info: 1840 (void) printf(dgettext(TEXT_DOMAIN, 1841 "Destroy and re-create the pool from\n\ta backup source.\n")); 1842 } 1843 1844 /* 1845 * zpool_import() is a contracted interface. Should be kept the same 1846 * if possible. 1847 * 1848 * Applications should use zpool_import_props() to import a pool with 1849 * new properties value to be set. 1850 */ 1851 int 1852 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1853 char *altroot) 1854 { 1855 nvlist_t *props = NULL; 1856 int ret; 1857 1858 if (altroot != NULL) { 1859 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1860 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1861 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1862 newname)); 1863 } 1864 1865 if (nvlist_add_string(props, 1866 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1867 nvlist_add_string(props, 1868 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1869 nvlist_free(props); 1870 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1871 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1872 newname)); 1873 } 1874 } 1875 1876 ret = zpool_import_props(hdl, config, newname, props, 1877 ZFS_IMPORT_NORMAL); 1878 nvlist_free(props); 1879 return (ret); 1880 } 1881 1882 static void 1883 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1884 int indent) 1885 { 1886 nvlist_t **child; 1887 uint_t c, children; 1888 char *vname; 1889 uint64_t is_log = 0; 1890 1891 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1892 &is_log); 1893 1894 if (name != NULL) 1895 (void) printf("\t%*s%s%s\n", indent, "", name, 1896 is_log ? " [log]" : ""); 1897 1898 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1899 &child, &children) != 0) 1900 return; 1901 1902 for (c = 0; c < children; c++) { 1903 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID); 1904 print_vdev_tree(hdl, vname, child[c], indent + 2); 1905 free(vname); 1906 } 1907 } 1908 1909 void 1910 zpool_print_unsup_feat(nvlist_t *config) 1911 { 1912 nvlist_t *nvinfo, *unsup_feat; 1913 nvpair_t *nvp; 1914 1915 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1916 0); 1917 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1918 &unsup_feat) == 0); 1919 1920 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1921 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1922 char *desc; 1923 1924 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1925 verify(nvpair_value_string(nvp, &desc) == 0); 1926 1927 if (strlen(desc) > 0) 1928 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1929 else 1930 (void) printf("\t%s\n", nvpair_name(nvp)); 1931 } 1932 } 1933 1934 /* 1935 * Import the given pool using the known configuration and a list of 1936 * properties to be set. The configuration should have come from 1937 * zpool_find_import(). The 'newname' parameters control whether the pool 1938 * is imported with a different name. 1939 */ 1940 int 1941 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1942 nvlist_t *props, int flags) 1943 { 1944 zfs_cmd_t zc = {"\0"}; 1945 zpool_load_policy_t policy; 1946 nvlist_t *nv = NULL; 1947 nvlist_t *nvinfo = NULL; 1948 nvlist_t *missing = NULL; 1949 char *thename; 1950 char *origname; 1951 int ret; 1952 int error = 0; 1953 char errbuf[1024]; 1954 1955 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1956 &origname) == 0); 1957 1958 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1959 "cannot import pool '%s'"), origname); 1960 1961 if (newname != NULL) { 1962 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1963 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1964 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1965 newname)); 1966 thename = (char *)newname; 1967 } else { 1968 thename = origname; 1969 } 1970 1971 if (props != NULL) { 1972 uint64_t version; 1973 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1974 1975 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1976 &version) == 0); 1977 1978 if ((props = zpool_valid_proplist(hdl, origname, 1979 props, version, flags, errbuf)) == NULL) 1980 return (-1); 1981 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1982 nvlist_free(props); 1983 return (-1); 1984 } 1985 nvlist_free(props); 1986 } 1987 1988 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1989 1990 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1991 &zc.zc_guid) == 0); 1992 1993 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1994 zcmd_free_nvlists(&zc); 1995 return (-1); 1996 } 1997 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1998 zcmd_free_nvlists(&zc); 1999 return (-1); 2000 } 2001 2002 zc.zc_cookie = flags; 2003 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 2004 errno == ENOMEM) { 2005 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 2006 zcmd_free_nvlists(&zc); 2007 return (-1); 2008 } 2009 } 2010 if (ret != 0) 2011 error = errno; 2012 2013 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 2014 2015 zcmd_free_nvlists(&zc); 2016 2017 zpool_get_load_policy(config, &policy); 2018 2019 if (error) { 2020 char desc[1024]; 2021 char aux[256]; 2022 2023 /* 2024 * Dry-run failed, but we print out what success 2025 * looks like if we found a best txg 2026 */ 2027 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) { 2028 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2029 B_TRUE, nv); 2030 nvlist_free(nv); 2031 return (-1); 2032 } 2033 2034 if (newname == NULL) 2035 (void) snprintf(desc, sizeof (desc), 2036 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 2037 thename); 2038 else 2039 (void) snprintf(desc, sizeof (desc), 2040 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 2041 origname, thename); 2042 2043 switch (error) { 2044 case ENOTSUP: 2045 if (nv != NULL && nvlist_lookup_nvlist(nv, 2046 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 2047 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 2048 (void) printf(dgettext(TEXT_DOMAIN, "This " 2049 "pool uses the following feature(s) not " 2050 "supported by this system:\n")); 2051 zpool_print_unsup_feat(nv); 2052 if (nvlist_exists(nvinfo, 2053 ZPOOL_CONFIG_CAN_RDONLY)) { 2054 (void) printf(dgettext(TEXT_DOMAIN, 2055 "All unsupported features are only " 2056 "required for writing to the pool." 2057 "\nThe pool can be imported using " 2058 "'-o readonly=on'.\n")); 2059 } 2060 } 2061 /* 2062 * Unsupported version. 2063 */ 2064 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 2065 break; 2066 2067 case EREMOTEIO: 2068 if (nv != NULL && nvlist_lookup_nvlist(nv, 2069 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) { 2070 char *hostname = "<unknown>"; 2071 uint64_t hostid = 0; 2072 mmp_state_t mmp_state; 2073 2074 mmp_state = fnvlist_lookup_uint64(nvinfo, 2075 ZPOOL_CONFIG_MMP_STATE); 2076 2077 if (nvlist_exists(nvinfo, 2078 ZPOOL_CONFIG_MMP_HOSTNAME)) 2079 hostname = fnvlist_lookup_string(nvinfo, 2080 ZPOOL_CONFIG_MMP_HOSTNAME); 2081 2082 if (nvlist_exists(nvinfo, 2083 ZPOOL_CONFIG_MMP_HOSTID)) 2084 hostid = fnvlist_lookup_uint64(nvinfo, 2085 ZPOOL_CONFIG_MMP_HOSTID); 2086 2087 if (mmp_state == MMP_STATE_ACTIVE) { 2088 (void) snprintf(aux, sizeof (aux), 2089 dgettext(TEXT_DOMAIN, "pool is imp" 2090 "orted on host '%s' (hostid=%lx).\n" 2091 "Export the pool on the other " 2092 "system, then run 'zpool import'."), 2093 hostname, (unsigned long) hostid); 2094 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 2095 (void) snprintf(aux, sizeof (aux), 2096 dgettext(TEXT_DOMAIN, "pool has " 2097 "the multihost property on and " 2098 "the\nsystem's hostid is not set. " 2099 "Set a unique system hostid with " 2100 "the zgenhostid(8) command.\n")); 2101 } 2102 2103 (void) zfs_error_aux(hdl, aux); 2104 } 2105 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc); 2106 break; 2107 2108 case EINVAL: 2109 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 2110 break; 2111 2112 case EROFS: 2113 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2114 "one or more devices is read only")); 2115 (void) zfs_error(hdl, EZFS_BADDEV, desc); 2116 break; 2117 2118 case ENXIO: 2119 if (nv && nvlist_lookup_nvlist(nv, 2120 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 2121 nvlist_lookup_nvlist(nvinfo, 2122 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 2123 (void) printf(dgettext(TEXT_DOMAIN, 2124 "The devices below are missing or " 2125 "corrupted, use '-m' to import the pool " 2126 "anyway:\n")); 2127 print_vdev_tree(hdl, NULL, missing, 2); 2128 (void) printf("\n"); 2129 } 2130 (void) zpool_standard_error(hdl, error, desc); 2131 break; 2132 2133 case EEXIST: 2134 (void) zpool_standard_error(hdl, error, desc); 2135 break; 2136 2137 case EBUSY: 2138 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2139 "one or more devices are already in use\n")); 2140 (void) zfs_error(hdl, EZFS_BADDEV, desc); 2141 break; 2142 case ENAMETOOLONG: 2143 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2144 "new name of at least one dataset is longer than " 2145 "the maximum allowable length")); 2146 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 2147 break; 2148 default: 2149 (void) zpool_standard_error(hdl, error, desc); 2150 zpool_explain_recover(hdl, 2151 newname ? origname : thename, -error, nv); 2152 break; 2153 } 2154 2155 nvlist_free(nv); 2156 ret = -1; 2157 } else { 2158 zpool_handle_t *zhp; 2159 2160 /* 2161 * This should never fail, but play it safe anyway. 2162 */ 2163 if (zpool_open_silent(hdl, thename, &zhp) != 0) 2164 ret = -1; 2165 else if (zhp != NULL) 2166 zpool_close(zhp); 2167 if (policy.zlp_rewind & 2168 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2169 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2170 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv); 2171 } 2172 nvlist_free(nv); 2173 return (0); 2174 } 2175 2176 return (ret); 2177 } 2178 2179 /* 2180 * Translate vdev names to guids. If a vdev_path is determined to be 2181 * unsuitable then a vd_errlist is allocated and the vdev path and errno 2182 * are added to it. 2183 */ 2184 static int 2185 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds, 2186 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist) 2187 { 2188 nvlist_t *errlist = NULL; 2189 int error = 0; 2190 2191 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL; 2192 elem = nvlist_next_nvpair(vds, elem)) { 2193 boolean_t spare, cache; 2194 2195 char *vd_path = nvpair_name(elem); 2196 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache, 2197 NULL); 2198 2199 if ((tgt == NULL) || cache || spare) { 2200 if (errlist == NULL) { 2201 errlist = fnvlist_alloc(); 2202 error = EINVAL; 2203 } 2204 2205 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE : 2206 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE); 2207 fnvlist_add_int64(errlist, vd_path, err); 2208 continue; 2209 } 2210 2211 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 2212 fnvlist_add_uint64(vdev_guids, vd_path, guid); 2213 2214 char msg[MAXNAMELEN]; 2215 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid); 2216 fnvlist_add_string(guids_to_paths, msg, vd_path); 2217 } 2218 2219 if (error != 0) { 2220 verify(errlist != NULL); 2221 if (vd_errlist != NULL) 2222 *vd_errlist = errlist; 2223 else 2224 fnvlist_free(errlist); 2225 } 2226 2227 return (error); 2228 } 2229 2230 static int 2231 xlate_init_err(int err) 2232 { 2233 switch (err) { 2234 case ENODEV: 2235 return (EZFS_NODEVICE); 2236 case EINVAL: 2237 case EROFS: 2238 return (EZFS_BADDEV); 2239 case EBUSY: 2240 return (EZFS_INITIALIZING); 2241 case ESRCH: 2242 return (EZFS_NO_INITIALIZE); 2243 } 2244 return (err); 2245 } 2246 2247 /* 2248 * Begin, suspend, or cancel the initialization (initializing of all free 2249 * blocks) for the given vdevs in the given pool. 2250 */ 2251 static int 2252 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2253 nvlist_t *vds, boolean_t wait) 2254 { 2255 int err; 2256 2257 nvlist_t *vdev_guids = fnvlist_alloc(); 2258 nvlist_t *guids_to_paths = fnvlist_alloc(); 2259 nvlist_t *vd_errlist = NULL; 2260 nvlist_t *errlist; 2261 nvpair_t *elem; 2262 2263 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2264 guids_to_paths, &vd_errlist); 2265 2266 if (err != 0) { 2267 verify(vd_errlist != NULL); 2268 goto list_errors; 2269 } 2270 2271 err = lzc_initialize(zhp->zpool_name, cmd_type, 2272 vdev_guids, &errlist); 2273 2274 if (err != 0) { 2275 if (errlist != NULL) { 2276 vd_errlist = fnvlist_lookup_nvlist(errlist, 2277 ZPOOL_INITIALIZE_VDEVS); 2278 goto list_errors; 2279 } 2280 (void) zpool_standard_error(zhp->zpool_hdl, err, 2281 dgettext(TEXT_DOMAIN, "operation failed")); 2282 goto out; 2283 } 2284 2285 if (wait) { 2286 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL; 2287 elem = nvlist_next_nvpair(vdev_guids, elem)) { 2288 2289 uint64_t guid = fnvpair_value_uint64(elem); 2290 2291 err = lzc_wait_tag(zhp->zpool_name, 2292 ZPOOL_WAIT_INITIALIZE, guid, NULL); 2293 if (err != 0) { 2294 (void) zpool_standard_error_fmt(zhp->zpool_hdl, 2295 err, dgettext(TEXT_DOMAIN, "error " 2296 "waiting for '%s' to initialize"), 2297 nvpair_name(elem)); 2298 2299 goto out; 2300 } 2301 } 2302 } 2303 goto out; 2304 2305 list_errors: 2306 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL; 2307 elem = nvlist_next_nvpair(vd_errlist, elem)) { 2308 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem)); 2309 char *path; 2310 2311 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2312 &path) != 0) 2313 path = nvpair_name(elem); 2314 2315 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2316 "cannot initialize '%s'", path); 2317 } 2318 2319 out: 2320 fnvlist_free(vdev_guids); 2321 fnvlist_free(guids_to_paths); 2322 2323 if (vd_errlist != NULL) 2324 fnvlist_free(vd_errlist); 2325 2326 return (err == 0 ? 0 : -1); 2327 } 2328 2329 int 2330 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2331 nvlist_t *vds) 2332 { 2333 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE)); 2334 } 2335 2336 int 2337 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2338 nvlist_t *vds) 2339 { 2340 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE)); 2341 } 2342 2343 static int 2344 xlate_trim_err(int err) 2345 { 2346 switch (err) { 2347 case ENODEV: 2348 return (EZFS_NODEVICE); 2349 case EINVAL: 2350 case EROFS: 2351 return (EZFS_BADDEV); 2352 case EBUSY: 2353 return (EZFS_TRIMMING); 2354 case ESRCH: 2355 return (EZFS_NO_TRIM); 2356 case EOPNOTSUPP: 2357 return (EZFS_TRIM_NOTSUP); 2358 } 2359 return (err); 2360 } 2361 2362 static int 2363 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids) 2364 { 2365 int err; 2366 nvpair_t *elem; 2367 2368 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL; 2369 elem = nvlist_next_nvpair(vdev_guids, elem)) { 2370 2371 uint64_t guid = fnvpair_value_uint64(elem); 2372 2373 err = lzc_wait_tag(zhp->zpool_name, 2374 ZPOOL_WAIT_TRIM, guid, NULL); 2375 if (err != 0) { 2376 (void) zpool_standard_error_fmt(zhp->zpool_hdl, 2377 err, dgettext(TEXT_DOMAIN, "error " 2378 "waiting to trim '%s'"), nvpair_name(elem)); 2379 2380 return (err); 2381 } 2382 } 2383 return (0); 2384 } 2385 2386 /* 2387 * Check errlist and report any errors, omitting ones which should be 2388 * suppressed. Returns B_TRUE if any errors were reported. 2389 */ 2390 static boolean_t 2391 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags, 2392 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist) 2393 { 2394 nvpair_t *elem; 2395 boolean_t reported_errs = B_FALSE; 2396 int num_vds = 0; 2397 int num_suppressed_errs = 0; 2398 2399 for (elem = nvlist_next_nvpair(vds, NULL); 2400 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) { 2401 num_vds++; 2402 } 2403 2404 for (elem = nvlist_next_nvpair(errlist, NULL); 2405 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) { 2406 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem)); 2407 char *path; 2408 2409 /* 2410 * If only the pool was specified, and it was not a secure 2411 * trim then suppress warnings for individual vdevs which 2412 * do not support trimming. 2413 */ 2414 if (vd_error == EZFS_TRIM_NOTSUP && 2415 trim_flags->fullpool && 2416 !trim_flags->secure) { 2417 num_suppressed_errs++; 2418 continue; 2419 } 2420 2421 reported_errs = B_TRUE; 2422 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2423 &path) != 0) 2424 path = nvpair_name(elem); 2425 2426 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2427 "cannot trim '%s'", path); 2428 } 2429 2430 if (num_suppressed_errs == num_vds) { 2431 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 2432 "no devices in pool support trim operations")); 2433 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP, 2434 dgettext(TEXT_DOMAIN, "cannot trim"))); 2435 reported_errs = B_TRUE; 2436 } 2437 2438 return (reported_errs); 2439 } 2440 2441 /* 2442 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for 2443 * the given vdevs in the given pool. 2444 */ 2445 int 2446 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds, 2447 trimflags_t *trim_flags) 2448 { 2449 int err; 2450 int retval = 0; 2451 2452 nvlist_t *vdev_guids = fnvlist_alloc(); 2453 nvlist_t *guids_to_paths = fnvlist_alloc(); 2454 nvlist_t *errlist = NULL; 2455 2456 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2457 guids_to_paths, &errlist); 2458 if (err != 0) { 2459 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist); 2460 retval = -1; 2461 goto out; 2462 } 2463 2464 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate, 2465 trim_flags->secure, vdev_guids, &errlist); 2466 if (err != 0) { 2467 nvlist_t *vd_errlist; 2468 if (errlist != NULL && nvlist_lookup_nvlist(errlist, 2469 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) { 2470 if (check_trim_errs(zhp, trim_flags, guids_to_paths, 2471 vds, vd_errlist)) { 2472 retval = -1; 2473 goto out; 2474 } 2475 } else { 2476 char msg[1024]; 2477 2478 (void) snprintf(msg, sizeof (msg), 2479 dgettext(TEXT_DOMAIN, "operation failed")); 2480 zpool_standard_error(zhp->zpool_hdl, err, msg); 2481 retval = -1; 2482 goto out; 2483 } 2484 } 2485 2486 2487 if (trim_flags->wait) 2488 retval = zpool_trim_wait(zhp, vdev_guids); 2489 2490 out: 2491 if (errlist != NULL) 2492 fnvlist_free(errlist); 2493 fnvlist_free(vdev_guids); 2494 fnvlist_free(guids_to_paths); 2495 return (retval); 2496 } 2497 2498 /* 2499 * Scan the pool. 2500 */ 2501 int 2502 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) 2503 { 2504 zfs_cmd_t zc = {"\0"}; 2505 char msg[1024]; 2506 int err; 2507 libzfs_handle_t *hdl = zhp->zpool_hdl; 2508 2509 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2510 zc.zc_cookie = func; 2511 zc.zc_flags = cmd; 2512 2513 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0) 2514 return (0); 2515 2516 err = errno; 2517 2518 /* ECANCELED on a scrub means we resumed a paused scrub */ 2519 if (err == ECANCELED && func == POOL_SCAN_SCRUB && 2520 cmd == POOL_SCRUB_NORMAL) 2521 return (0); 2522 2523 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL) 2524 return (0); 2525 2526 if (func == POOL_SCAN_SCRUB) { 2527 if (cmd == POOL_SCRUB_PAUSE) { 2528 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2529 "cannot pause scrubbing %s"), zc.zc_name); 2530 } else { 2531 assert(cmd == POOL_SCRUB_NORMAL); 2532 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2533 "cannot scrub %s"), zc.zc_name); 2534 } 2535 } else if (func == POOL_SCAN_RESILVER) { 2536 assert(cmd == POOL_SCRUB_NORMAL); 2537 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2538 "cannot restart resilver on %s"), zc.zc_name); 2539 } else if (func == POOL_SCAN_NONE) { 2540 (void) snprintf(msg, sizeof (msg), 2541 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 2542 zc.zc_name); 2543 } else { 2544 assert(!"unexpected result"); 2545 } 2546 2547 if (err == EBUSY) { 2548 nvlist_t *nvroot; 2549 pool_scan_stat_t *ps = NULL; 2550 uint_t psc; 2551 2552 verify(nvlist_lookup_nvlist(zhp->zpool_config, 2553 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 2554 (void) nvlist_lookup_uint64_array(nvroot, 2555 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 2556 if (ps && ps->pss_func == POOL_SCAN_SCRUB && 2557 ps->pss_state == DSS_SCANNING) { 2558 if (cmd == POOL_SCRUB_PAUSE) 2559 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg)); 2560 else 2561 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 2562 } else { 2563 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 2564 } 2565 } else if (err == ENOENT) { 2566 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 2567 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) { 2568 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg)); 2569 } else { 2570 return (zpool_standard_error(hdl, err, msg)); 2571 } 2572 } 2573 2574 /* 2575 * Find a vdev that matches the search criteria specified. We use the 2576 * the nvpair name to determine how we should look for the device. 2577 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 2578 * spare; but FALSE if its an INUSE spare. 2579 */ 2580 static nvlist_t * 2581 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 2582 boolean_t *l2cache, boolean_t *log) 2583 { 2584 uint_t c, children; 2585 nvlist_t **child; 2586 nvlist_t *ret; 2587 uint64_t is_log; 2588 char *srchkey; 2589 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 2590 2591 /* Nothing to look for */ 2592 if (search == NULL || pair == NULL) 2593 return (NULL); 2594 2595 /* Obtain the key we will use to search */ 2596 srchkey = nvpair_name(pair); 2597 2598 switch (nvpair_type(pair)) { 2599 case DATA_TYPE_UINT64: 2600 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 2601 uint64_t srchval, theguid; 2602 2603 verify(nvpair_value_uint64(pair, &srchval) == 0); 2604 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2605 &theguid) == 0); 2606 if (theguid == srchval) 2607 return (nv); 2608 } 2609 break; 2610 2611 case DATA_TYPE_STRING: { 2612 char *srchval, *val; 2613 2614 verify(nvpair_value_string(pair, &srchval) == 0); 2615 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 2616 break; 2617 2618 /* 2619 * Search for the requested value. Special cases: 2620 * 2621 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 2622 * "-part1", or "p1". The suffix is hidden from the user, 2623 * but included in the string, so this matches around it. 2624 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname() 2625 * is used to check all possible expanded paths. 2626 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2627 * 2628 * Otherwise, all other searches are simple string compares. 2629 */ 2630 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) { 2631 uint64_t wholedisk = 0; 2632 2633 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2634 &wholedisk); 2635 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0) 2636 return (nv); 2637 2638 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2639 char *type, *idx, *end, *p; 2640 uint64_t id, vdev_id; 2641 2642 /* 2643 * Determine our vdev type, keeping in mind 2644 * that the srchval is composed of a type and 2645 * vdev id pair (i.e. mirror-4). 2646 */ 2647 if ((type = strdup(srchval)) == NULL) 2648 return (NULL); 2649 2650 if ((p = strrchr(type, '-')) == NULL) { 2651 free(type); 2652 break; 2653 } 2654 idx = p + 1; 2655 *p = '\0'; 2656 2657 /* 2658 * If the types don't match then keep looking. 2659 */ 2660 if (strncmp(val, type, strlen(val)) != 0) { 2661 free(type); 2662 break; 2663 } 2664 2665 verify(zpool_vdev_is_interior(type)); 2666 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2667 &id) == 0); 2668 2669 errno = 0; 2670 vdev_id = strtoull(idx, &end, 10); 2671 2672 /* 2673 * If we are looking for a raidz and a parity is 2674 * specified, make sure it matches. 2675 */ 2676 int rzlen = strlen(VDEV_TYPE_RAIDZ); 2677 assert(rzlen == strlen(VDEV_TYPE_DRAID)); 2678 int typlen = strlen(type); 2679 if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 || 2680 strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) && 2681 typlen != rzlen) { 2682 uint64_t vdev_parity; 2683 int parity = *(type + rzlen) - '0'; 2684 2685 if (parity <= 0 || parity > 3 || 2686 (typlen - rzlen) != 1) { 2687 /* 2688 * Nonsense parity specified, can 2689 * never match 2690 */ 2691 free(type); 2692 return (NULL); 2693 } 2694 verify(nvlist_lookup_uint64(nv, 2695 ZPOOL_CONFIG_NPARITY, &vdev_parity) == 0); 2696 if ((int)vdev_parity != parity) { 2697 free(type); 2698 break; 2699 } 2700 } 2701 2702 free(type); 2703 if (errno != 0) 2704 return (NULL); 2705 2706 /* 2707 * Now verify that we have the correct vdev id. 2708 */ 2709 if (vdev_id == id) 2710 return (nv); 2711 } 2712 2713 /* 2714 * Common case 2715 */ 2716 if (strcmp(srchval, val) == 0) 2717 return (nv); 2718 break; 2719 } 2720 2721 default: 2722 break; 2723 } 2724 2725 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2726 &child, &children) != 0) 2727 return (NULL); 2728 2729 for (c = 0; c < children; c++) { 2730 if ((ret = vdev_to_nvlist_iter(child[c], search, 2731 avail_spare, l2cache, NULL)) != NULL) { 2732 /* 2733 * The 'is_log' value is only set for the toplevel 2734 * vdev, not the leaf vdevs. So we always lookup the 2735 * log device from the root of the vdev tree (where 2736 * 'log' is non-NULL). 2737 */ 2738 if (log != NULL && 2739 nvlist_lookup_uint64(child[c], 2740 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2741 is_log) { 2742 *log = B_TRUE; 2743 } 2744 return (ret); 2745 } 2746 } 2747 2748 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2749 &child, &children) == 0) { 2750 for (c = 0; c < children; c++) { 2751 if ((ret = vdev_to_nvlist_iter(child[c], search, 2752 avail_spare, l2cache, NULL)) != NULL) { 2753 *avail_spare = B_TRUE; 2754 return (ret); 2755 } 2756 } 2757 } 2758 2759 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2760 &child, &children) == 0) { 2761 for (c = 0; c < children; c++) { 2762 if ((ret = vdev_to_nvlist_iter(child[c], search, 2763 avail_spare, l2cache, NULL)) != NULL) { 2764 *l2cache = B_TRUE; 2765 return (ret); 2766 } 2767 } 2768 } 2769 2770 return (NULL); 2771 } 2772 2773 /* 2774 * Given a physical path or guid, find the associated vdev. 2775 */ 2776 nvlist_t * 2777 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2778 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2779 { 2780 nvlist_t *search, *nvroot, *ret; 2781 uint64_t guid; 2782 char *end; 2783 2784 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2785 2786 guid = strtoull(ppath, &end, 0); 2787 if (guid != 0 && *end == '\0') { 2788 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2789 } else { 2790 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, 2791 ppath) == 0); 2792 } 2793 2794 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2795 &nvroot) == 0); 2796 2797 *avail_spare = B_FALSE; 2798 *l2cache = B_FALSE; 2799 if (log != NULL) 2800 *log = B_FALSE; 2801 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2802 nvlist_free(search); 2803 2804 return (ret); 2805 } 2806 2807 /* 2808 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2809 */ 2810 static boolean_t 2811 zpool_vdev_is_interior(const char *name) 2812 { 2813 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2814 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || 2815 strncmp(name, 2816 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || 2817 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2818 return (B_TRUE); 2819 2820 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 && 2821 !zpool_is_draid_spare(name)) 2822 return (B_TRUE); 2823 2824 return (B_FALSE); 2825 } 2826 2827 nvlist_t * 2828 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2829 boolean_t *l2cache, boolean_t *log) 2830 { 2831 char *end; 2832 nvlist_t *nvroot, *search, *ret; 2833 uint64_t guid; 2834 2835 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2836 2837 guid = strtoull(path, &end, 0); 2838 if (guid != 0 && *end == '\0') { 2839 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2840 } else if (zpool_vdev_is_interior(path)) { 2841 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2842 } else { 2843 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2844 } 2845 2846 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2847 &nvroot) == 0); 2848 2849 *avail_spare = B_FALSE; 2850 *l2cache = B_FALSE; 2851 if (log != NULL) 2852 *log = B_FALSE; 2853 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2854 nvlist_free(search); 2855 2856 return (ret); 2857 } 2858 2859 static int 2860 vdev_is_online(nvlist_t *nv) 2861 { 2862 uint64_t ival; 2863 2864 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2865 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2866 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2867 return (0); 2868 2869 return (1); 2870 } 2871 2872 /* 2873 * Helper function for zpool_get_physpaths(). 2874 */ 2875 static int 2876 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2877 size_t *bytes_written) 2878 { 2879 size_t bytes_left, pos, rsz; 2880 char *tmppath; 2881 const char *format; 2882 2883 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2884 &tmppath) != 0) 2885 return (EZFS_NODEVICE); 2886 2887 pos = *bytes_written; 2888 bytes_left = physpath_size - pos; 2889 format = (pos == 0) ? "%s" : " %s"; 2890 2891 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2892 *bytes_written += rsz; 2893 2894 if (rsz >= bytes_left) { 2895 /* if physpath was not copied properly, clear it */ 2896 if (bytes_left != 0) { 2897 physpath[pos] = 0; 2898 } 2899 return (EZFS_NOSPC); 2900 } 2901 return (0); 2902 } 2903 2904 static int 2905 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2906 size_t *rsz, boolean_t is_spare) 2907 { 2908 char *type; 2909 int ret; 2910 2911 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2912 return (EZFS_INVALCONFIG); 2913 2914 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2915 /* 2916 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2917 * For a spare vdev, we only want to boot from the active 2918 * spare device. 2919 */ 2920 if (is_spare) { 2921 uint64_t spare = 0; 2922 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2923 &spare); 2924 if (!spare) 2925 return (EZFS_INVALCONFIG); 2926 } 2927 2928 if (vdev_is_online(nv)) { 2929 if ((ret = vdev_get_one_physpath(nv, physpath, 2930 phypath_size, rsz)) != 0) 2931 return (ret); 2932 } 2933 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2934 strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2935 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2936 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2937 nvlist_t **child; 2938 uint_t count; 2939 int i, ret; 2940 2941 if (nvlist_lookup_nvlist_array(nv, 2942 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2943 return (EZFS_INVALCONFIG); 2944 2945 for (i = 0; i < count; i++) { 2946 ret = vdev_get_physpaths(child[i], physpath, 2947 phypath_size, rsz, is_spare); 2948 if (ret == EZFS_NOSPC) 2949 return (ret); 2950 } 2951 } 2952 2953 return (EZFS_POOL_INVALARG); 2954 } 2955 2956 /* 2957 * Get phys_path for a root pool config. 2958 * Return 0 on success; non-zero on failure. 2959 */ 2960 static int 2961 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2962 { 2963 size_t rsz; 2964 nvlist_t *vdev_root; 2965 nvlist_t **child; 2966 uint_t count; 2967 char *type; 2968 2969 rsz = 0; 2970 2971 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2972 &vdev_root) != 0) 2973 return (EZFS_INVALCONFIG); 2974 2975 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2976 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2977 &child, &count) != 0) 2978 return (EZFS_INVALCONFIG); 2979 2980 /* 2981 * root pool can only have a single top-level vdev. 2982 */ 2983 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2984 return (EZFS_POOL_INVALARG); 2985 2986 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2987 B_FALSE); 2988 2989 /* No online devices */ 2990 if (rsz == 0) 2991 return (EZFS_NODEVICE); 2992 2993 return (0); 2994 } 2995 2996 /* 2997 * Get phys_path for a root pool 2998 * Return 0 on success; non-zero on failure. 2999 */ 3000 int 3001 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 3002 { 3003 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 3004 phypath_size)); 3005 } 3006 3007 /* 3008 * Convert a vdev path to a GUID. Returns GUID or 0 on error. 3009 * 3010 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it 3011 * if the VDEV is a spare, l2cache, or log device. If they're NULL then 3012 * ignore them. 3013 */ 3014 static uint64_t 3015 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path, 3016 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log) 3017 { 3018 uint64_t guid; 3019 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE; 3020 nvlist_t *tgt; 3021 3022 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache, 3023 &log)) == NULL) 3024 return (0); 3025 3026 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0); 3027 if (is_spare != NULL) 3028 *is_spare = spare; 3029 if (is_l2cache != NULL) 3030 *is_l2cache = l2cache; 3031 if (is_log != NULL) 3032 *is_log = log; 3033 3034 return (guid); 3035 } 3036 3037 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */ 3038 uint64_t 3039 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path) 3040 { 3041 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL)); 3042 } 3043 3044 /* 3045 * Bring the specified vdev online. The 'flags' parameter is a set of the 3046 * ZFS_ONLINE_* flags. 3047 */ 3048 int 3049 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 3050 vdev_state_t *newstate) 3051 { 3052 zfs_cmd_t zc = {"\0"}; 3053 char msg[1024]; 3054 char *pathname; 3055 nvlist_t *tgt; 3056 boolean_t avail_spare, l2cache, islog; 3057 libzfs_handle_t *hdl = zhp->zpool_hdl; 3058 int error; 3059 3060 if (flags & ZFS_ONLINE_EXPAND) { 3061 (void) snprintf(msg, sizeof (msg), 3062 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 3063 } else { 3064 (void) snprintf(msg, sizeof (msg), 3065 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 3066 } 3067 3068 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3069 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3070 &islog)) == NULL) 3071 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3072 3073 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3074 3075 if (avail_spare) 3076 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3077 3078 if ((flags & ZFS_ONLINE_EXPAND || 3079 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) && 3080 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) { 3081 uint64_t wholedisk = 0; 3082 3083 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 3084 &wholedisk); 3085 3086 /* 3087 * XXX - L2ARC 1.0 devices can't support expansion. 3088 */ 3089 if (l2cache) { 3090 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3091 "cannot expand cache devices")); 3092 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 3093 } 3094 3095 if (wholedisk) { 3096 const char *fullpath = path; 3097 char buf[MAXPATHLEN]; 3098 3099 if (path[0] != '/') { 3100 error = zfs_resolve_shortname(path, buf, 3101 sizeof (buf)); 3102 if (error != 0) 3103 return (zfs_error(hdl, EZFS_NODEVICE, 3104 msg)); 3105 3106 fullpath = buf; 3107 } 3108 3109 error = zpool_relabel_disk(hdl, fullpath, msg); 3110 if (error != 0) 3111 return (error); 3112 } 3113 } 3114 3115 zc.zc_cookie = VDEV_STATE_ONLINE; 3116 zc.zc_obj = flags; 3117 3118 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 3119 if (errno == EINVAL) { 3120 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 3121 "from this pool into a new one. Use '%s' " 3122 "instead"), "zpool detach"); 3123 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 3124 } 3125 return (zpool_standard_error(hdl, errno, msg)); 3126 } 3127 3128 *newstate = zc.zc_cookie; 3129 return (0); 3130 } 3131 3132 /* 3133 * Take the specified vdev offline 3134 */ 3135 int 3136 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 3137 { 3138 zfs_cmd_t zc = {"\0"}; 3139 char msg[1024]; 3140 nvlist_t *tgt; 3141 boolean_t avail_spare, l2cache; 3142 libzfs_handle_t *hdl = zhp->zpool_hdl; 3143 3144 (void) snprintf(msg, sizeof (msg), 3145 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 3146 3147 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3148 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3149 NULL)) == NULL) 3150 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3151 3152 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3153 3154 if (avail_spare) 3155 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3156 3157 zc.zc_cookie = VDEV_STATE_OFFLINE; 3158 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 3159 3160 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3161 return (0); 3162 3163 switch (errno) { 3164 case EBUSY: 3165 3166 /* 3167 * There are no other replicas of this device. 3168 */ 3169 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 3170 3171 case EEXIST: 3172 /* 3173 * The log device has unplayed logs 3174 */ 3175 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 3176 3177 default: 3178 return (zpool_standard_error(hdl, errno, msg)); 3179 } 3180 } 3181 3182 /* 3183 * Mark the given vdev faulted. 3184 */ 3185 int 3186 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3187 { 3188 zfs_cmd_t zc = {"\0"}; 3189 char msg[1024]; 3190 libzfs_handle_t *hdl = zhp->zpool_hdl; 3191 3192 (void) snprintf(msg, sizeof (msg), 3193 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid); 3194 3195 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3196 zc.zc_guid = guid; 3197 zc.zc_cookie = VDEV_STATE_FAULTED; 3198 zc.zc_obj = aux; 3199 3200 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3201 return (0); 3202 3203 switch (errno) { 3204 case EBUSY: 3205 3206 /* 3207 * There are no other replicas of this device. 3208 */ 3209 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 3210 3211 default: 3212 return (zpool_standard_error(hdl, errno, msg)); 3213 } 3214 3215 } 3216 3217 /* 3218 * Mark the given vdev degraded. 3219 */ 3220 int 3221 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 3222 { 3223 zfs_cmd_t zc = {"\0"}; 3224 char msg[1024]; 3225 libzfs_handle_t *hdl = zhp->zpool_hdl; 3226 3227 (void) snprintf(msg, sizeof (msg), 3228 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid); 3229 3230 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3231 zc.zc_guid = guid; 3232 zc.zc_cookie = VDEV_STATE_DEGRADED; 3233 zc.zc_obj = aux; 3234 3235 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3236 return (0); 3237 3238 return (zpool_standard_error(hdl, errno, msg)); 3239 } 3240 3241 /* 3242 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 3243 * a hot spare. 3244 */ 3245 static boolean_t 3246 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 3247 { 3248 nvlist_t **child; 3249 uint_t c, children; 3250 char *type; 3251 3252 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 3253 &children) == 0) { 3254 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 3255 &type) == 0); 3256 3257 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 || 3258 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) && 3259 children == 2 && child[which] == tgt) 3260 return (B_TRUE); 3261 3262 for (c = 0; c < children; c++) 3263 if (is_replacing_spare(child[c], tgt, which)) 3264 return (B_TRUE); 3265 } 3266 3267 return (B_FALSE); 3268 } 3269 3270 /* 3271 * Attach new_disk (fully described by nvroot) to old_disk. 3272 * If 'replacing' is specified, the new disk will replace the old one. 3273 */ 3274 int 3275 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk, 3276 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild) 3277 { 3278 zfs_cmd_t zc = {"\0"}; 3279 char msg[1024]; 3280 int ret; 3281 nvlist_t *tgt; 3282 boolean_t avail_spare, l2cache, islog; 3283 uint64_t val; 3284 char *newname; 3285 nvlist_t **child; 3286 uint_t children; 3287 nvlist_t *config_root; 3288 libzfs_handle_t *hdl = zhp->zpool_hdl; 3289 3290 if (replacing) 3291 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 3292 "cannot replace %s with %s"), old_disk, new_disk); 3293 else 3294 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 3295 "cannot attach %s to %s"), new_disk, old_disk); 3296 3297 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3298 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 3299 &islog)) == NULL) 3300 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3301 3302 if (avail_spare) 3303 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3304 3305 if (l2cache) 3306 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3307 3308 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3309 zc.zc_cookie = replacing; 3310 zc.zc_simple = rebuild; 3311 3312 if (rebuild && 3313 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) { 3314 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3315 "the loaded zfs module doesn't support device rebuilds")); 3316 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 3317 } 3318 3319 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3320 &child, &children) != 0 || children != 1) { 3321 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3322 "new device must be a single disk")); 3323 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 3324 } 3325 3326 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 3327 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 3328 3329 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL) 3330 return (-1); 3331 3332 /* 3333 * If the target is a hot spare that has been swapped in, we can only 3334 * replace it with another hot spare. 3335 */ 3336 if (replacing && 3337 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 3338 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 3339 NULL) == NULL || !avail_spare) && 3340 is_replacing_spare(config_root, tgt, 1)) { 3341 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3342 "can only be replaced by another hot spare")); 3343 free(newname); 3344 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 3345 } 3346 3347 free(newname); 3348 3349 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 3350 return (-1); 3351 3352 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 3353 3354 zcmd_free_nvlists(&zc); 3355 3356 if (ret == 0) 3357 return (0); 3358 3359 switch (errno) { 3360 case ENOTSUP: 3361 /* 3362 * Can't attach to or replace this type of vdev. 3363 */ 3364 if (replacing) { 3365 uint64_t version = zpool_get_prop_int(zhp, 3366 ZPOOL_PROP_VERSION, NULL); 3367 3368 if (islog) { 3369 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3370 "cannot replace a log with a spare")); 3371 } else if (rebuild) { 3372 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3373 "only mirror and dRAID vdevs support " 3374 "sequential reconstruction")); 3375 } else if (zpool_is_draid_spare(new_disk)) { 3376 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3377 "dRAID spares can only replace child " 3378 "devices in their parent's dRAID vdev")); 3379 } else if (version >= SPA_VERSION_MULTI_REPLACE) { 3380 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3381 "already in replacing/spare config; wait " 3382 "for completion or use 'zpool detach'")); 3383 } else { 3384 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3385 "cannot replace a replacing device")); 3386 } 3387 } else { 3388 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3389 "can only attach to mirrors and top-level " 3390 "disks")); 3391 } 3392 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3393 break; 3394 3395 case EINVAL: 3396 /* 3397 * The new device must be a single disk. 3398 */ 3399 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3400 "new device must be a single disk")); 3401 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3402 break; 3403 3404 case EBUSY: 3405 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, " 3406 "or device removal is in progress"), 3407 new_disk); 3408 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3409 break; 3410 3411 case EOVERFLOW: 3412 /* 3413 * The new device is too small. 3414 */ 3415 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3416 "device is too small")); 3417 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3418 break; 3419 3420 case EDOM: 3421 /* 3422 * The new device has a different optimal sector size. 3423 */ 3424 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3425 "new device has a different optimal sector size; use the " 3426 "option '-o ashift=N' to override the optimal size")); 3427 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3428 break; 3429 3430 case ENAMETOOLONG: 3431 /* 3432 * The resulting top-level vdev spec won't fit in the label. 3433 */ 3434 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 3435 break; 3436 3437 default: 3438 (void) zpool_standard_error(hdl, errno, msg); 3439 } 3440 3441 return (-1); 3442 } 3443 3444 /* 3445 * Detach the specified device. 3446 */ 3447 int 3448 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 3449 { 3450 zfs_cmd_t zc = {"\0"}; 3451 char msg[1024]; 3452 nvlist_t *tgt; 3453 boolean_t avail_spare, l2cache; 3454 libzfs_handle_t *hdl = zhp->zpool_hdl; 3455 3456 (void) snprintf(msg, sizeof (msg), 3457 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 3458 3459 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3460 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3461 NULL)) == NULL) 3462 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3463 3464 if (avail_spare) 3465 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3466 3467 if (l2cache) 3468 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3469 3470 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3471 3472 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 3473 return (0); 3474 3475 switch (errno) { 3476 3477 case ENOTSUP: 3478 /* 3479 * Can't detach from this type of vdev. 3480 */ 3481 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 3482 "applicable to mirror and replacing vdevs")); 3483 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3484 break; 3485 3486 case EBUSY: 3487 /* 3488 * There are no other replicas of this device. 3489 */ 3490 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 3491 break; 3492 3493 default: 3494 (void) zpool_standard_error(hdl, errno, msg); 3495 } 3496 3497 return (-1); 3498 } 3499 3500 /* 3501 * Find a mirror vdev in the source nvlist. 3502 * 3503 * The mchild array contains a list of disks in one of the top-level mirrors 3504 * of the source pool. The schild array contains a list of disks that the 3505 * user specified on the command line. We loop over the mchild array to 3506 * see if any entry in the schild array matches. 3507 * 3508 * If a disk in the mchild array is found in the schild array, we return 3509 * the index of that entry. Otherwise we return -1. 3510 */ 3511 static int 3512 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 3513 nvlist_t **schild, uint_t schildren) 3514 { 3515 uint_t mc; 3516 3517 for (mc = 0; mc < mchildren; mc++) { 3518 uint_t sc; 3519 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3520 mchild[mc], 0); 3521 3522 for (sc = 0; sc < schildren; sc++) { 3523 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3524 schild[sc], 0); 3525 boolean_t result = (strcmp(mpath, spath) == 0); 3526 3527 free(spath); 3528 if (result) { 3529 free(mpath); 3530 return (mc); 3531 } 3532 } 3533 3534 free(mpath); 3535 } 3536 3537 return (-1); 3538 } 3539 3540 /* 3541 * Split a mirror pool. If newroot points to null, then a new nvlist 3542 * is generated and it is the responsibility of the caller to free it. 3543 */ 3544 int 3545 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 3546 nvlist_t *props, splitflags_t flags) 3547 { 3548 zfs_cmd_t zc = {"\0"}; 3549 char msg[1024], *bias; 3550 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 3551 nvlist_t **varray = NULL, *zc_props = NULL; 3552 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 3553 libzfs_handle_t *hdl = zhp->zpool_hdl; 3554 uint64_t vers, readonly = B_FALSE; 3555 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 3556 int retval = 0; 3557 3558 (void) snprintf(msg, sizeof (msg), 3559 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 3560 3561 if (!zpool_name_valid(hdl, B_FALSE, newname)) 3562 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 3563 3564 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 3565 (void) fprintf(stderr, gettext("Internal error: unable to " 3566 "retrieve pool configuration\n")); 3567 return (-1); 3568 } 3569 3570 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 3571 == 0); 3572 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 3573 3574 if (props) { 3575 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 3576 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 3577 props, vers, flags, msg)) == NULL) 3578 return (-1); 3579 (void) nvlist_lookup_uint64(zc_props, 3580 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 3581 if (readonly) { 3582 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3583 "property %s can only be set at import time"), 3584 zpool_prop_to_name(ZPOOL_PROP_READONLY)); 3585 return (-1); 3586 } 3587 } 3588 3589 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 3590 &children) != 0) { 3591 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3592 "Source pool is missing vdev tree")); 3593 nvlist_free(zc_props); 3594 return (-1); 3595 } 3596 3597 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 3598 vcount = 0; 3599 3600 if (*newroot == NULL || 3601 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 3602 &newchild, &newchildren) != 0) 3603 newchildren = 0; 3604 3605 for (c = 0; c < children; c++) { 3606 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 3607 boolean_t is_special = B_FALSE, is_dedup = B_FALSE; 3608 char *type; 3609 nvlist_t **mchild, *vdev; 3610 uint_t mchildren; 3611 int entry; 3612 3613 /* 3614 * Unlike cache & spares, slogs are stored in the 3615 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 3616 */ 3617 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3618 &is_log); 3619 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3620 &is_hole); 3621 if (is_log || is_hole) { 3622 /* 3623 * Create a hole vdev and put it in the config. 3624 */ 3625 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 3626 goto out; 3627 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 3628 VDEV_TYPE_HOLE) != 0) 3629 goto out; 3630 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 3631 1) != 0) 3632 goto out; 3633 if (lastlog == 0) 3634 lastlog = vcount; 3635 varray[vcount++] = vdev; 3636 continue; 3637 } 3638 lastlog = 0; 3639 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 3640 == 0); 3641 3642 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) { 3643 vdev = child[c]; 3644 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3645 goto out; 3646 continue; 3647 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 3648 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3649 "Source pool must be composed only of mirrors\n")); 3650 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3651 goto out; 3652 } 3653 3654 if (nvlist_lookup_string(child[c], 3655 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) { 3656 if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) 3657 is_special = B_TRUE; 3658 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) 3659 is_dedup = B_TRUE; 3660 } 3661 verify(nvlist_lookup_nvlist_array(child[c], 3662 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3663 3664 /* find or add an entry for this top-level vdev */ 3665 if (newchildren > 0 && 3666 (entry = find_vdev_entry(zhp, mchild, mchildren, 3667 newchild, newchildren)) >= 0) { 3668 /* We found a disk that the user specified. */ 3669 vdev = mchild[entry]; 3670 ++found; 3671 } else { 3672 /* User didn't specify a disk for this vdev. */ 3673 vdev = mchild[mchildren - 1]; 3674 } 3675 3676 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3677 goto out; 3678 3679 if (flags.dryrun != 0) { 3680 if (is_dedup == B_TRUE) { 3681 if (nvlist_add_string(varray[vcount - 1], 3682 ZPOOL_CONFIG_ALLOCATION_BIAS, 3683 VDEV_ALLOC_BIAS_DEDUP) != 0) 3684 goto out; 3685 } else if (is_special == B_TRUE) { 3686 if (nvlist_add_string(varray[vcount - 1], 3687 ZPOOL_CONFIG_ALLOCATION_BIAS, 3688 VDEV_ALLOC_BIAS_SPECIAL) != 0) 3689 goto out; 3690 } 3691 } 3692 } 3693 3694 /* did we find every disk the user specified? */ 3695 if (found != newchildren) { 3696 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3697 "include at most one disk from each mirror")); 3698 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3699 goto out; 3700 } 3701 3702 /* Prepare the nvlist for populating. */ 3703 if (*newroot == NULL) { 3704 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3705 goto out; 3706 freelist = B_TRUE; 3707 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3708 VDEV_TYPE_ROOT) != 0) 3709 goto out; 3710 } else { 3711 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3712 } 3713 3714 /* Add all the children we found */ 3715 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3716 lastlog == 0 ? vcount : lastlog) != 0) 3717 goto out; 3718 3719 /* 3720 * If we're just doing a dry run, exit now with success. 3721 */ 3722 if (flags.dryrun) { 3723 memory_err = B_FALSE; 3724 freelist = B_FALSE; 3725 goto out; 3726 } 3727 3728 /* now build up the config list & call the ioctl */ 3729 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3730 goto out; 3731 3732 if (nvlist_add_nvlist(newconfig, 3733 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3734 nvlist_add_string(newconfig, 3735 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3736 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3737 goto out; 3738 3739 /* 3740 * The new pool is automatically part of the namespace unless we 3741 * explicitly export it. 3742 */ 3743 if (!flags.import) 3744 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3745 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3746 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3747 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3748 goto out; 3749 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3750 goto out; 3751 3752 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3753 retval = zpool_standard_error(hdl, errno, msg); 3754 goto out; 3755 } 3756 3757 freelist = B_FALSE; 3758 memory_err = B_FALSE; 3759 3760 out: 3761 if (varray != NULL) { 3762 int v; 3763 3764 for (v = 0; v < vcount; v++) 3765 nvlist_free(varray[v]); 3766 free(varray); 3767 } 3768 zcmd_free_nvlists(&zc); 3769 nvlist_free(zc_props); 3770 nvlist_free(newconfig); 3771 if (freelist) { 3772 nvlist_free(*newroot); 3773 *newroot = NULL; 3774 } 3775 3776 if (retval != 0) 3777 return (retval); 3778 3779 if (memory_err) 3780 return (no_memory(hdl)); 3781 3782 return (0); 3783 } 3784 3785 /* 3786 * Remove the given device. 3787 */ 3788 int 3789 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3790 { 3791 zfs_cmd_t zc = {"\0"}; 3792 char msg[1024]; 3793 nvlist_t *tgt; 3794 boolean_t avail_spare, l2cache, islog; 3795 libzfs_handle_t *hdl = zhp->zpool_hdl; 3796 uint64_t version; 3797 3798 (void) snprintf(msg, sizeof (msg), 3799 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3800 3801 if (zpool_is_draid_spare(path)) { 3802 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3803 "dRAID spares cannot be removed")); 3804 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3805 } 3806 3807 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3808 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3809 &islog)) == NULL) 3810 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3811 3812 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3813 if (islog && version < SPA_VERSION_HOLES) { 3814 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3815 "pool must be upgraded to support log removal")); 3816 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3817 } 3818 3819 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3820 3821 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3822 return (0); 3823 3824 switch (errno) { 3825 3826 case EINVAL: 3827 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3828 "invalid config; all top-level vdevs must " 3829 "have the same sector size and not be raidz.")); 3830 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3831 break; 3832 3833 case EBUSY: 3834 if (islog) { 3835 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3836 "Mount encrypted datasets to replay logs.")); 3837 } else { 3838 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3839 "Pool busy; removal may already be in progress")); 3840 } 3841 (void) zfs_error(hdl, EZFS_BUSY, msg); 3842 break; 3843 3844 case EACCES: 3845 if (islog) { 3846 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3847 "Mount encrypted datasets to replay logs.")); 3848 (void) zfs_error(hdl, EZFS_BUSY, msg); 3849 } else { 3850 (void) zpool_standard_error(hdl, errno, msg); 3851 } 3852 break; 3853 3854 default: 3855 (void) zpool_standard_error(hdl, errno, msg); 3856 } 3857 return (-1); 3858 } 3859 3860 int 3861 zpool_vdev_remove_cancel(zpool_handle_t *zhp) 3862 { 3863 zfs_cmd_t zc; 3864 char msg[1024]; 3865 libzfs_handle_t *hdl = zhp->zpool_hdl; 3866 3867 (void) snprintf(msg, sizeof (msg), 3868 dgettext(TEXT_DOMAIN, "cannot cancel removal")); 3869 3870 bzero(&zc, sizeof (zc)); 3871 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3872 zc.zc_cookie = 1; 3873 3874 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3875 return (0); 3876 3877 return (zpool_standard_error(hdl, errno, msg)); 3878 } 3879 3880 int 3881 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path, 3882 uint64_t *sizep) 3883 { 3884 char msg[1024]; 3885 nvlist_t *tgt; 3886 boolean_t avail_spare, l2cache, islog; 3887 libzfs_handle_t *hdl = zhp->zpool_hdl; 3888 3889 (void) snprintf(msg, sizeof (msg), 3890 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"), 3891 path); 3892 3893 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3894 &islog)) == NULL) 3895 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3896 3897 if (avail_spare || l2cache || islog) { 3898 *sizep = 0; 3899 return (0); 3900 } 3901 3902 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) { 3903 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3904 "indirect size not available")); 3905 return (zfs_error(hdl, EINVAL, msg)); 3906 } 3907 return (0); 3908 } 3909 3910 /* 3911 * Clear the errors for the pool, or the particular device if specified. 3912 */ 3913 int 3914 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3915 { 3916 zfs_cmd_t zc = {"\0"}; 3917 char msg[1024]; 3918 nvlist_t *tgt; 3919 zpool_load_policy_t policy; 3920 boolean_t avail_spare, l2cache; 3921 libzfs_handle_t *hdl = zhp->zpool_hdl; 3922 nvlist_t *nvi = NULL; 3923 int error; 3924 3925 if (path) 3926 (void) snprintf(msg, sizeof (msg), 3927 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3928 path); 3929 else 3930 (void) snprintf(msg, sizeof (msg), 3931 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3932 zhp->zpool_name); 3933 3934 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3935 if (path) { 3936 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3937 &l2cache, NULL)) == NULL) 3938 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3939 3940 /* 3941 * Don't allow error clearing for hot spares. Do allow 3942 * error clearing for l2cache devices. 3943 */ 3944 if (avail_spare) 3945 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3946 3947 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3948 &zc.zc_guid) == 0); 3949 } 3950 3951 zpool_get_load_policy(rewindnvl, &policy); 3952 zc.zc_cookie = policy.zlp_rewind; 3953 3954 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3955 return (-1); 3956 3957 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3958 return (-1); 3959 3960 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3961 errno == ENOMEM) { 3962 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3963 zcmd_free_nvlists(&zc); 3964 return (-1); 3965 } 3966 } 3967 3968 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) && 3969 errno != EPERM && errno != EACCES)) { 3970 if (policy.zlp_rewind & 3971 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3972 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3973 zpool_rewind_exclaim(hdl, zc.zc_name, 3974 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), 3975 nvi); 3976 nvlist_free(nvi); 3977 } 3978 zcmd_free_nvlists(&zc); 3979 return (0); 3980 } 3981 3982 zcmd_free_nvlists(&zc); 3983 return (zpool_standard_error(hdl, errno, msg)); 3984 } 3985 3986 /* 3987 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3988 */ 3989 int 3990 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3991 { 3992 zfs_cmd_t zc = {"\0"}; 3993 char msg[1024]; 3994 libzfs_handle_t *hdl = zhp->zpool_hdl; 3995 3996 (void) snprintf(msg, sizeof (msg), 3997 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3998 (u_longlong_t)guid); 3999 4000 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4001 zc.zc_guid = guid; 4002 zc.zc_cookie = ZPOOL_NO_REWIND; 4003 4004 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) 4005 return (0); 4006 4007 return (zpool_standard_error(hdl, errno, msg)); 4008 } 4009 4010 /* 4011 * Change the GUID for a pool. 4012 */ 4013 int 4014 zpool_reguid(zpool_handle_t *zhp) 4015 { 4016 char msg[1024]; 4017 libzfs_handle_t *hdl = zhp->zpool_hdl; 4018 zfs_cmd_t zc = {"\0"}; 4019 4020 (void) snprintf(msg, sizeof (msg), 4021 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 4022 4023 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4024 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 4025 return (0); 4026 4027 return (zpool_standard_error(hdl, errno, msg)); 4028 } 4029 4030 /* 4031 * Reopen the pool. 4032 */ 4033 int 4034 zpool_reopen_one(zpool_handle_t *zhp, void *data) 4035 { 4036 libzfs_handle_t *hdl = zpool_get_handle(zhp); 4037 const char *pool_name = zpool_get_name(zhp); 4038 boolean_t *scrub_restart = data; 4039 int error; 4040 4041 error = lzc_reopen(pool_name, *scrub_restart); 4042 if (error) { 4043 return (zpool_standard_error_fmt(hdl, error, 4044 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name)); 4045 } 4046 4047 return (0); 4048 } 4049 4050 /* call into libzfs_core to execute the sync IOCTL per pool */ 4051 int 4052 zpool_sync_one(zpool_handle_t *zhp, void *data) 4053 { 4054 int ret; 4055 libzfs_handle_t *hdl = zpool_get_handle(zhp); 4056 const char *pool_name = zpool_get_name(zhp); 4057 boolean_t *force = data; 4058 nvlist_t *innvl = fnvlist_alloc(); 4059 4060 fnvlist_add_boolean_value(innvl, "force", *force); 4061 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) { 4062 nvlist_free(innvl); 4063 return (zpool_standard_error_fmt(hdl, ret, 4064 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name)); 4065 } 4066 nvlist_free(innvl); 4067 4068 return (0); 4069 } 4070 4071 #define PATH_BUF_LEN 64 4072 4073 /* 4074 * Given a vdev, return the name to display in iostat. If the vdev has a path, 4075 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 4076 * We also check if this is a whole disk, in which case we strip off the 4077 * trailing 's0' slice name. 4078 * 4079 * This routine is also responsible for identifying when disks have been 4080 * reconfigured in a new location. The kernel will have opened the device by 4081 * devid, but the path will still refer to the old location. To catch this, we 4082 * first do a path -> devid translation (which is fast for the common case). If 4083 * the devid matches, we're done. If not, we do a reverse devid -> path 4084 * translation and issue the appropriate ioctl() to update the path of the vdev. 4085 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 4086 * of these checks. 4087 */ 4088 char * 4089 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 4090 int name_flags) 4091 { 4092 char *path, *type, *env; 4093 uint64_t value; 4094 char buf[PATH_BUF_LEN]; 4095 char tmpbuf[PATH_BUF_LEN]; 4096 4097 /* 4098 * vdev_name will be "root"/"root-0" for the root vdev, but it is the 4099 * zpool name that will be displayed to the user. 4100 */ 4101 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 4102 if (zhp != NULL && strcmp(type, "root") == 0) 4103 return (zfs_strdup(hdl, zpool_get_name(zhp))); 4104 4105 env = getenv("ZPOOL_VDEV_NAME_PATH"); 4106 if (env && (strtoul(env, NULL, 0) > 0 || 4107 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 4108 name_flags |= VDEV_NAME_PATH; 4109 4110 env = getenv("ZPOOL_VDEV_NAME_GUID"); 4111 if (env && (strtoul(env, NULL, 0) > 0 || 4112 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 4113 name_flags |= VDEV_NAME_GUID; 4114 4115 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS"); 4116 if (env && (strtoul(env, NULL, 0) > 0 || 4117 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 4118 name_flags |= VDEV_NAME_FOLLOW_LINKS; 4119 4120 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 4121 name_flags & VDEV_NAME_GUID) { 4122 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value); 4123 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value); 4124 path = buf; 4125 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 4126 if (name_flags & VDEV_NAME_FOLLOW_LINKS) { 4127 char *rp = realpath(path, NULL); 4128 if (rp) { 4129 strlcpy(buf, rp, sizeof (buf)); 4130 path = buf; 4131 free(rp); 4132 } 4133 } 4134 4135 /* 4136 * For a block device only use the name. 4137 */ 4138 if ((strcmp(type, VDEV_TYPE_DISK) == 0) && 4139 !(name_flags & VDEV_NAME_PATH)) { 4140 path = zfs_strip_path(path); 4141 } 4142 4143 /* 4144 * Remove the partition from the path if this is a whole disk. 4145 */ 4146 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 && 4147 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value) 4148 == 0 && value && !(name_flags & VDEV_NAME_PATH)) { 4149 return (zfs_strip_partition(path)); 4150 } 4151 } else { 4152 path = type; 4153 4154 /* 4155 * If it's a raidz device, we need to stick in the parity level. 4156 */ 4157 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 4158 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4159 &value) == 0); 4160 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 4161 (u_longlong_t)value); 4162 path = buf; 4163 } 4164 4165 /* 4166 * If it's a dRAID device, we add parity, groups, and spares. 4167 */ 4168 if (strcmp(path, VDEV_TYPE_DRAID) == 0) { 4169 uint64_t ndata, nparity, nspares; 4170 nvlist_t **child; 4171 uint_t children; 4172 4173 verify(nvlist_lookup_nvlist_array(nv, 4174 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0); 4175 verify(nvlist_lookup_uint64(nv, 4176 ZPOOL_CONFIG_NPARITY, &nparity) == 0); 4177 verify(nvlist_lookup_uint64(nv, 4178 ZPOOL_CONFIG_DRAID_NDATA, &ndata) == 0); 4179 verify(nvlist_lookup_uint64(nv, 4180 ZPOOL_CONFIG_DRAID_NSPARES, &nspares) == 0); 4181 4182 path = zpool_draid_name(buf, sizeof (buf), ndata, 4183 nparity, nspares, children); 4184 } 4185 4186 /* 4187 * We identify each top-level vdev by using a <type-id> 4188 * naming convention. 4189 */ 4190 if (name_flags & VDEV_NAME_TYPE_ID) { 4191 uint64_t id; 4192 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 4193 &id) == 0); 4194 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu", 4195 path, (u_longlong_t)id); 4196 path = tmpbuf; 4197 } 4198 } 4199 4200 return (zfs_strdup(hdl, path)); 4201 } 4202 4203 static int 4204 zbookmark_mem_compare(const void *a, const void *b) 4205 { 4206 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 4207 } 4208 4209 /* 4210 * Retrieve the persistent error log, uniquify the members, and return to the 4211 * caller. 4212 */ 4213 int 4214 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 4215 { 4216 zfs_cmd_t zc = {"\0"}; 4217 libzfs_handle_t *hdl = zhp->zpool_hdl; 4218 uint64_t count; 4219 zbookmark_phys_t *zb = NULL; 4220 int i; 4221 4222 /* 4223 * Retrieve the raw error list from the kernel. If the number of errors 4224 * has increased, allocate more space and continue until we get the 4225 * entire list. 4226 */ 4227 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 4228 &count) == 0); 4229 if (count == 0) 4230 return (0); 4231 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 4232 count * sizeof (zbookmark_phys_t)); 4233 zc.zc_nvlist_dst_size = count; 4234 (void) strcpy(zc.zc_name, zhp->zpool_name); 4235 for (;;) { 4236 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG, 4237 &zc) != 0) { 4238 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4239 if (errno == ENOMEM) { 4240 void *dst; 4241 4242 count = zc.zc_nvlist_dst_size; 4243 dst = zfs_alloc(zhp->zpool_hdl, count * 4244 sizeof (zbookmark_phys_t)); 4245 zc.zc_nvlist_dst = (uintptr_t)dst; 4246 } else { 4247 return (zpool_standard_error_fmt(hdl, errno, 4248 dgettext(TEXT_DOMAIN, "errors: List of " 4249 "errors unavailable"))); 4250 } 4251 } else { 4252 break; 4253 } 4254 } 4255 4256 /* 4257 * Sort the resulting bookmarks. This is a little confusing due to the 4258 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 4259 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks 4260 * _not_ copied as part of the process. So we point the start of our 4261 * array appropriate and decrement the total number of elements. 4262 */ 4263 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 4264 zc.zc_nvlist_dst_size; 4265 count -= zc.zc_nvlist_dst_size; 4266 4267 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 4268 4269 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 4270 4271 /* 4272 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 4273 */ 4274 for (i = 0; i < count; i++) { 4275 nvlist_t *nv; 4276 4277 /* ignoring zb_blkid and zb_level for now */ 4278 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 4279 zb[i-1].zb_object == zb[i].zb_object) 4280 continue; 4281 4282 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 4283 goto nomem; 4284 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 4285 zb[i].zb_objset) != 0) { 4286 nvlist_free(nv); 4287 goto nomem; 4288 } 4289 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 4290 zb[i].zb_object) != 0) { 4291 nvlist_free(nv); 4292 goto nomem; 4293 } 4294 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 4295 nvlist_free(nv); 4296 goto nomem; 4297 } 4298 nvlist_free(nv); 4299 } 4300 4301 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4302 return (0); 4303 4304 nomem: 4305 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4306 return (no_memory(zhp->zpool_hdl)); 4307 } 4308 4309 /* 4310 * Upgrade a ZFS pool to the latest on-disk version. 4311 */ 4312 int 4313 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 4314 { 4315 zfs_cmd_t zc = {"\0"}; 4316 libzfs_handle_t *hdl = zhp->zpool_hdl; 4317 4318 (void) strcpy(zc.zc_name, zhp->zpool_name); 4319 zc.zc_cookie = new_version; 4320 4321 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 4322 return (zpool_standard_error_fmt(hdl, errno, 4323 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 4324 zhp->zpool_name)); 4325 return (0); 4326 } 4327 4328 void 4329 zfs_save_arguments(int argc, char **argv, char *string, int len) 4330 { 4331 int i; 4332 4333 (void) strlcpy(string, basename(argv[0]), len); 4334 for (i = 1; i < argc; i++) { 4335 (void) strlcat(string, " ", len); 4336 (void) strlcat(string, argv[i], len); 4337 } 4338 } 4339 4340 int 4341 zpool_log_history(libzfs_handle_t *hdl, const char *message) 4342 { 4343 zfs_cmd_t zc = {"\0"}; 4344 nvlist_t *args; 4345 int err; 4346 4347 args = fnvlist_alloc(); 4348 fnvlist_add_string(args, "message", message); 4349 err = zcmd_write_src_nvlist(hdl, &zc, args); 4350 if (err == 0) 4351 err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc); 4352 nvlist_free(args); 4353 zcmd_free_nvlists(&zc); 4354 return (err); 4355 } 4356 4357 /* 4358 * Perform ioctl to get some command history of a pool. 4359 * 4360 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 4361 * logical offset of the history buffer to start reading from. 4362 * 4363 * Upon return, 'off' is the next logical offset to read from and 4364 * 'len' is the actual amount of bytes read into 'buf'. 4365 */ 4366 static int 4367 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 4368 { 4369 zfs_cmd_t zc = {"\0"}; 4370 libzfs_handle_t *hdl = zhp->zpool_hdl; 4371 4372 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4373 4374 zc.zc_history = (uint64_t)(uintptr_t)buf; 4375 zc.zc_history_len = *len; 4376 zc.zc_history_offset = *off; 4377 4378 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 4379 switch (errno) { 4380 case EPERM: 4381 return (zfs_error_fmt(hdl, EZFS_PERM, 4382 dgettext(TEXT_DOMAIN, 4383 "cannot show history for pool '%s'"), 4384 zhp->zpool_name)); 4385 case ENOENT: 4386 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 4387 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4388 "'%s'"), zhp->zpool_name)); 4389 case ENOTSUP: 4390 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 4391 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4392 "'%s', pool must be upgraded"), zhp->zpool_name)); 4393 default: 4394 return (zpool_standard_error_fmt(hdl, errno, 4395 dgettext(TEXT_DOMAIN, 4396 "cannot get history for '%s'"), zhp->zpool_name)); 4397 } 4398 } 4399 4400 *len = zc.zc_history_len; 4401 *off = zc.zc_history_offset; 4402 4403 return (0); 4404 } 4405 4406 /* 4407 * Retrieve the command history of a pool. 4408 */ 4409 int 4410 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off, 4411 boolean_t *eof) 4412 { 4413 char *buf; 4414 int buflen = 128 * 1024; 4415 nvlist_t **records = NULL; 4416 uint_t numrecords = 0; 4417 int err, i; 4418 uint64_t start = *off; 4419 4420 buf = malloc(buflen); 4421 if (buf == NULL) 4422 return (ENOMEM); 4423 /* process about 1MB a time */ 4424 while (*off - start < 1024 * 1024) { 4425 uint64_t bytes_read = buflen; 4426 uint64_t leftover; 4427 4428 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0) 4429 break; 4430 4431 /* if nothing else was read in, we're at EOF, just return */ 4432 if (!bytes_read) { 4433 *eof = B_TRUE; 4434 break; 4435 } 4436 4437 if ((err = zpool_history_unpack(buf, bytes_read, 4438 &leftover, &records, &numrecords)) != 0) 4439 break; 4440 *off -= leftover; 4441 if (leftover == bytes_read) { 4442 /* 4443 * no progress made, because buffer is not big enough 4444 * to hold this record; resize and retry. 4445 */ 4446 buflen *= 2; 4447 free(buf); 4448 buf = malloc(buflen); 4449 if (buf == NULL) 4450 return (ENOMEM); 4451 } 4452 } 4453 4454 free(buf); 4455 4456 if (!err) { 4457 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 4458 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 4459 records, numrecords) == 0); 4460 } 4461 for (i = 0; i < numrecords; i++) 4462 nvlist_free(records[i]); 4463 free(records); 4464 4465 return (err); 4466 } 4467 4468 /* 4469 * Retrieve the next event given the passed 'zevent_fd' file descriptor. 4470 * If there is a new event available 'nvp' will contain a newly allocated 4471 * nvlist and 'dropped' will be set to the number of missed events since 4472 * the last call to this function. When 'nvp' is set to NULL it indicates 4473 * no new events are available. In either case the function returns 0 and 4474 * it is up to the caller to free 'nvp'. In the case of a fatal error the 4475 * function will return a non-zero value. When the function is called in 4476 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed), 4477 * it will not return until a new event is available. 4478 */ 4479 int 4480 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp, 4481 int *dropped, unsigned flags, int zevent_fd) 4482 { 4483 zfs_cmd_t zc = {"\0"}; 4484 int error = 0; 4485 4486 *nvp = NULL; 4487 *dropped = 0; 4488 zc.zc_cleanup_fd = zevent_fd; 4489 4490 if (flags & ZEVENT_NONBLOCK) 4491 zc.zc_guid = ZEVENT_NONBLOCK; 4492 4493 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0) 4494 return (-1); 4495 4496 retry: 4497 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) { 4498 switch (errno) { 4499 case ESHUTDOWN: 4500 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 4501 dgettext(TEXT_DOMAIN, "zfs shutdown")); 4502 goto out; 4503 case ENOENT: 4504 /* Blocking error case should not occur */ 4505 if (!(flags & ZEVENT_NONBLOCK)) 4506 error = zpool_standard_error_fmt(hdl, errno, 4507 dgettext(TEXT_DOMAIN, "cannot get event")); 4508 4509 goto out; 4510 case ENOMEM: 4511 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 4512 error = zfs_error_fmt(hdl, EZFS_NOMEM, 4513 dgettext(TEXT_DOMAIN, "cannot get event")); 4514 goto out; 4515 } else { 4516 goto retry; 4517 } 4518 default: 4519 error = zpool_standard_error_fmt(hdl, errno, 4520 dgettext(TEXT_DOMAIN, "cannot get event")); 4521 goto out; 4522 } 4523 } 4524 4525 error = zcmd_read_dst_nvlist(hdl, &zc, nvp); 4526 if (error != 0) 4527 goto out; 4528 4529 *dropped = (int)zc.zc_cookie; 4530 out: 4531 zcmd_free_nvlists(&zc); 4532 4533 return (error); 4534 } 4535 4536 /* 4537 * Clear all events. 4538 */ 4539 int 4540 zpool_events_clear(libzfs_handle_t *hdl, int *count) 4541 { 4542 zfs_cmd_t zc = {"\0"}; 4543 char msg[1024]; 4544 4545 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 4546 "cannot clear events")); 4547 4548 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0) 4549 return (zpool_standard_error_fmt(hdl, errno, msg)); 4550 4551 if (count != NULL) 4552 *count = (int)zc.zc_cookie; /* # of events cleared */ 4553 4554 return (0); 4555 } 4556 4557 /* 4558 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for 4559 * the passed zevent_fd file handle. On success zero is returned, 4560 * otherwise -1 is returned and hdl->libzfs_error is set to the errno. 4561 */ 4562 int 4563 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd) 4564 { 4565 zfs_cmd_t zc = {"\0"}; 4566 int error = 0; 4567 4568 zc.zc_guid = eid; 4569 zc.zc_cleanup_fd = zevent_fd; 4570 4571 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) { 4572 switch (errno) { 4573 case ENOENT: 4574 error = zfs_error_fmt(hdl, EZFS_NOENT, 4575 dgettext(TEXT_DOMAIN, "cannot get event")); 4576 break; 4577 4578 case ENOMEM: 4579 error = zfs_error_fmt(hdl, EZFS_NOMEM, 4580 dgettext(TEXT_DOMAIN, "cannot get event")); 4581 break; 4582 4583 default: 4584 error = zpool_standard_error_fmt(hdl, errno, 4585 dgettext(TEXT_DOMAIN, "cannot get event")); 4586 break; 4587 } 4588 } 4589 4590 return (error); 4591 } 4592 4593 static void 4594 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4595 char *pathname, size_t len, boolean_t always_unmounted) 4596 { 4597 zfs_cmd_t zc = {"\0"}; 4598 boolean_t mounted = B_FALSE; 4599 char *mntpnt = NULL; 4600 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 4601 4602 if (dsobj == 0) { 4603 /* special case for the MOS */ 4604 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", 4605 (longlong_t)obj); 4606 return; 4607 } 4608 4609 /* get the dataset's name */ 4610 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4611 zc.zc_obj = dsobj; 4612 if (zfs_ioctl(zhp->zpool_hdl, 4613 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 4614 /* just write out a path of two object numbers */ 4615 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 4616 (longlong_t)dsobj, (longlong_t)obj); 4617 return; 4618 } 4619 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 4620 4621 /* find out if the dataset is mounted */ 4622 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname, 4623 &mntpnt); 4624 4625 /* get the corrupted object's path */ 4626 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 4627 zc.zc_obj = obj; 4628 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH, 4629 &zc) == 0) { 4630 if (mounted) { 4631 (void) snprintf(pathname, len, "%s%s", mntpnt, 4632 zc.zc_value); 4633 } else { 4634 (void) snprintf(pathname, len, "%s:%s", 4635 dsname, zc.zc_value); 4636 } 4637 } else { 4638 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, 4639 (longlong_t)obj); 4640 } 4641 free(mntpnt); 4642 } 4643 4644 void 4645 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4646 char *pathname, size_t len) 4647 { 4648 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE); 4649 } 4650 4651 void 4652 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4653 char *pathname, size_t len) 4654 { 4655 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE); 4656 } 4657 /* 4658 * Wait while the specified activity is in progress in the pool. 4659 */ 4660 int 4661 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity) 4662 { 4663 boolean_t missing; 4664 4665 int error = zpool_wait_status(zhp, activity, &missing, NULL); 4666 4667 if (missing) { 4668 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT, 4669 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"), 4670 zhp->zpool_name); 4671 return (ENOENT); 4672 } else { 4673 return (error); 4674 } 4675 } 4676 4677 /* 4678 * Wait for the given activity and return the status of the wait (whether or not 4679 * any waiting was done) in the 'waited' parameter. Non-existent pools are 4680 * reported via the 'missing' parameter, rather than by printing an error 4681 * message. This is convenient when this function is called in a loop over a 4682 * long period of time (as it is, for example, by zpool's wait cmd). In that 4683 * scenario, a pool being exported or destroyed should be considered a normal 4684 * event, so we don't want to print an error when we find that the pool doesn't 4685 * exist. 4686 */ 4687 int 4688 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity, 4689 boolean_t *missing, boolean_t *waited) 4690 { 4691 int error = lzc_wait(zhp->zpool_name, activity, waited); 4692 *missing = (error == ENOENT); 4693 if (*missing) 4694 return (0); 4695 4696 if (error != 0) { 4697 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4698 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"), 4699 zhp->zpool_name); 4700 } 4701 4702 return (error); 4703 } 4704 4705 int 4706 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap) 4707 { 4708 int error = lzc_set_bootenv(zhp->zpool_name, envmap); 4709 if (error != 0) { 4710 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4711 dgettext(TEXT_DOMAIN, 4712 "error setting bootenv in pool '%s'"), zhp->zpool_name); 4713 } 4714 4715 return (error); 4716 } 4717 4718 int 4719 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp) 4720 { 4721 nvlist_t *nvl; 4722 int error; 4723 4724 nvl = NULL; 4725 error = lzc_get_bootenv(zhp->zpool_name, &nvl); 4726 if (error != 0) { 4727 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error, 4728 dgettext(TEXT_DOMAIN, 4729 "error getting bootenv in pool '%s'"), zhp->zpool_name); 4730 } else { 4731 *nvlp = nvl; 4732 } 4733 4734 return (error); 4735 } 4736 4737 /* 4738 * Attempt to read and parse feature file(s) (from "compatibility" property). 4739 * Files contain zpool feature names, comma or whitespace-separated. 4740 * Comments (# character to next newline) are discarded. 4741 * 4742 * Arguments: 4743 * compatibility : string containing feature filenames 4744 * features : either NULL or pointer to array of boolean 4745 * badtoken : either NULL or pointer to char[ZFS_MAXPROPLEN] 4746 * badfile : either NULL or pointer to char[MAXPATHLEN] 4747 * 4748 * compatibility is NULL (unset), "", "off", "legacy", or list of 4749 * comma-separated filenames. filenames should either be absolute, 4750 * or relative to: 4751 * 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or 4752 * 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d). 4753 * (Unset), "" or "off" => enable all features 4754 * "legacy" => disable all features 4755 * Any feature names read from files which match unames in spa_feature_table 4756 * will have the corresponding boolean set in the features array (if non-NULL). 4757 * If more than one feature set specified, only features present in *all* of 4758 * them will be set. 4759 * 4760 * An unreadable filename will be strlcpy'd to badfile (if non-NULL). 4761 * An unrecognized feature will be strlcpy'd to badtoken (if non-NULL). 4762 * 4763 * Return values: 4764 * ZPOOL_COMPATIBILITY_OK : files read and parsed ok 4765 * ZPOOL_COMPATIBILITY_READERR : file could not be opened / mmap'd 4766 * ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file 4767 * ZPOOL_COMPATIBILITY_BADWORD : file contains invalid feature name 4768 * ZPOOL_COMPATIBILITY_NOFILES : no file names found 4769 */ 4770 zpool_compat_status_t 4771 zpool_load_compat(const char *compatibility, 4772 boolean_t *features, char *badtoken, char *badfile) 4773 { 4774 int sdirfd, ddirfd, featfd; 4775 int i; 4776 struct stat fs; 4777 char *fc; /* mmap of file */ 4778 char *ps, *ls, *ws; /* strtok state */ 4779 char *file, *line, *word; 4780 char filenames[ZFS_MAXPROPLEN]; 4781 int filecount = 0; 4782 4783 /* special cases (unset), "" and "off" => enable all features */ 4784 if (compatibility == NULL || compatibility[0] == '\0' || 4785 strcmp(compatibility, ZPOOL_COMPAT_OFF) == 0) { 4786 if (features != NULL) 4787 for (i = 0; i < SPA_FEATURES; i++) 4788 features[i] = B_TRUE; 4789 return (ZPOOL_COMPATIBILITY_OK); 4790 } 4791 4792 /* Final special case "legacy" => disable all features */ 4793 if (strcmp(compatibility, ZPOOL_COMPAT_LEGACY) == 0) { 4794 if (features != NULL) 4795 for (i = 0; i < SPA_FEATURES; i++) 4796 features[i] = B_FALSE; 4797 return (ZPOOL_COMPATIBILITY_OK); 4798 } 4799 4800 /* 4801 * Start with all true; will be ANDed with results from each file 4802 */ 4803 if (features != NULL) 4804 for (i = 0; i < SPA_FEATURES; i++) 4805 features[i] = B_TRUE; 4806 4807 /* 4808 * We ignore errors from the directory open() 4809 * as they're only needed if the filename is relative 4810 * which will be checked during the openat(). 4811 */ 4812 #ifdef O_PATH 4813 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, O_DIRECTORY | O_PATH); 4814 ddirfd = open(ZPOOL_DATA_COMPAT_D, O_DIRECTORY | O_PATH); 4815 #else 4816 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, O_DIRECTORY | O_RDONLY); 4817 ddirfd = open(ZPOOL_DATA_COMPAT_D, O_DIRECTORY | O_RDONLY); 4818 #endif 4819 4820 (void) strlcpy(filenames, compatibility, ZFS_MAXPROPLEN); 4821 file = strtok_r(filenames, ",", &ps); 4822 while (file != NULL) { 4823 boolean_t features_local[SPA_FEATURES]; 4824 4825 /* try sysconfdir first, then datadir */ 4826 if ((featfd = openat(sdirfd, file, 0, O_RDONLY)) < 0) 4827 featfd = openat(ddirfd, file, 0, O_RDONLY); 4828 4829 if (featfd < 0 || fstat(featfd, &fs) < 0) { 4830 (void) close(featfd); 4831 (void) close(sdirfd); 4832 (void) close(ddirfd); 4833 if (badfile != NULL) 4834 (void) strlcpy(badfile, file, MAXPATHLEN); 4835 return (ZPOOL_COMPATIBILITY_READERR); 4836 } 4837 4838 /* Too big or too small */ 4839 if (fs.st_size < 1 || fs.st_size > ZPOOL_COMPAT_MAXSIZE) { 4840 (void) close(featfd); 4841 (void) close(sdirfd); 4842 (void) close(ddirfd); 4843 if (badfile != NULL) 4844 (void) strlcpy(badfile, file, MAXPATHLEN); 4845 return (ZPOOL_COMPATIBILITY_BADFILE); 4846 } 4847 4848 /* private mmap() so we can strtok safely */ 4849 fc = (char *)mmap(NULL, fs.st_size, 4850 PROT_READ|PROT_WRITE, MAP_PRIVATE, featfd, 0); 4851 (void) close(featfd); 4852 4853 if (fc < 0) { 4854 (void) close(sdirfd); 4855 (void) close(ddirfd); 4856 if (badfile != NULL) 4857 (void) strlcpy(badfile, file, MAXPATHLEN); 4858 return (ZPOOL_COMPATIBILITY_READERR); 4859 } 4860 4861 /* Text file sanity check - last char should be newline */ 4862 if (fc[fs.st_size - 1] != '\n') { 4863 (void) munmap((void *) fc, fs.st_size); 4864 (void) close(sdirfd); 4865 (void) close(ddirfd); 4866 if (badfile != NULL) 4867 (void) strlcpy(badfile, file, MAXPATHLEN); 4868 return (ZPOOL_COMPATIBILITY_BADFILE); 4869 } 4870 4871 /* replace with NUL to ensure we have a delimiter */ 4872 fc[fs.st_size - 1] = '\0'; 4873 4874 for (i = 0; i < SPA_FEATURES; i++) 4875 features_local[i] = B_FALSE; 4876 4877 line = strtok_r(fc, "\n", &ls); 4878 while (line != NULL) { 4879 /* discard comments */ 4880 *(strchrnul(line, '#')) = '\0'; 4881 4882 word = strtok_r(line, ", \t", &ws); 4883 while (word != NULL) { 4884 /* Find matching feature name */ 4885 for (i = 0; i < SPA_FEATURES; i++) { 4886 zfeature_info_t *fi = 4887 &spa_feature_table[i]; 4888 if (strcmp(word, fi->fi_uname) == 0) { 4889 features_local[i] = B_TRUE; 4890 break; 4891 } 4892 } 4893 if (i == SPA_FEATURES) { 4894 if (badtoken != NULL) 4895 (void) strlcpy(badtoken, word, 4896 ZFS_MAXPROPLEN); 4897 if (badfile != NULL) 4898 (void) strlcpy(badfile, file, 4899 MAXPATHLEN); 4900 (void) munmap((void *) fc, fs.st_size); 4901 (void) close(sdirfd); 4902 (void) close(ddirfd); 4903 return (ZPOOL_COMPATIBILITY_BADWORD); 4904 } 4905 word = strtok_r(NULL, ", \t", &ws); 4906 } 4907 line = strtok_r(NULL, "\n", &ls); 4908 } 4909 (void) munmap((void *) fc, fs.st_size); 4910 if (features != NULL) { 4911 for (i = 0; i < SPA_FEATURES; i++) 4912 features[i] &= features_local[i]; 4913 } 4914 filecount++; 4915 file = strtok_r(NULL, ",", &ps); 4916 } 4917 (void) close(sdirfd); 4918 (void) close(ddirfd); 4919 if (filecount == 0) 4920 return (ZPOOL_COMPATIBILITY_NOFILES); 4921 return (ZPOOL_COMPATIBILITY_OK); 4922 } 4923