1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 25 * Copyright 2019 Joyent, Inc. 26 * Copyright 2016 Nexenta Systems, Inc. 27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 28 * Copyright (c) 2017 Datto Inc. 29 * Copyright (c) 2017, Intel Corporation. 30 */ 31 32 #include <ctype.h> 33 #include <errno.h> 34 #include <devid.h> 35 #include <fcntl.h> 36 #include <libintl.h> 37 #include <stdio.h> 38 #include <stdlib.h> 39 #include <strings.h> 40 #include <unistd.h> 41 #include <libgen.h> 42 #include <sys/efi_partition.h> 43 #include <sys/vtoc.h> 44 #include <sys/zfs_ioctl.h> 45 #include <dlfcn.h> 46 47 #include "zfs_namecheck.h" 48 #include "zfs_prop.h" 49 #include "libzfs_impl.h" 50 #include "zfs_comutil.h" 51 #include "zfeature_common.h" 52 53 static int read_efi_label(nvlist_t *, diskaddr_t *, boolean_t *); 54 static boolean_t zpool_vdev_is_interior(const char *name); 55 56 #define BACKUP_SLICE "s2" 57 58 typedef struct prop_flags { 59 int create:1; /* Validate property on creation */ 60 int import:1; /* Validate property on import */ 61 } prop_flags_t; 62 63 /* 64 * ==================================================================== 65 * zpool property functions 66 * ==================================================================== 67 */ 68 69 static int 70 zpool_get_all_props(zpool_handle_t *zhp) 71 { 72 zfs_cmd_t zc = { 0 }; 73 libzfs_handle_t *hdl = zhp->zpool_hdl; 74 75 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 76 77 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 78 return (-1); 79 80 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 81 if (errno == ENOMEM) { 82 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 83 zcmd_free_nvlists(&zc); 84 return (-1); 85 } 86 } else { 87 zcmd_free_nvlists(&zc); 88 return (-1); 89 } 90 } 91 92 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 93 zcmd_free_nvlists(&zc); 94 return (-1); 95 } 96 97 zcmd_free_nvlists(&zc); 98 99 return (0); 100 } 101 102 static int 103 zpool_props_refresh(zpool_handle_t *zhp) 104 { 105 nvlist_t *old_props; 106 107 old_props = zhp->zpool_props; 108 109 if (zpool_get_all_props(zhp) != 0) 110 return (-1); 111 112 nvlist_free(old_props); 113 return (0); 114 } 115 116 static char * 117 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 118 zprop_source_t *src) 119 { 120 nvlist_t *nv, *nvl; 121 uint64_t ival; 122 char *value; 123 zprop_source_t source; 124 125 nvl = zhp->zpool_props; 126 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 127 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 128 source = ival; 129 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 130 } else { 131 source = ZPROP_SRC_DEFAULT; 132 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 133 value = "-"; 134 } 135 136 if (src) 137 *src = source; 138 139 return (value); 140 } 141 142 uint64_t 143 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 144 { 145 nvlist_t *nv, *nvl; 146 uint64_t value; 147 zprop_source_t source; 148 149 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 150 /* 151 * zpool_get_all_props() has most likely failed because 152 * the pool is faulted, but if all we need is the top level 153 * vdev's guid then get it from the zhp config nvlist. 154 */ 155 if ((prop == ZPOOL_PROP_GUID) && 156 (nvlist_lookup_nvlist(zhp->zpool_config, 157 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 158 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 159 == 0)) { 160 return (value); 161 } 162 return (zpool_prop_default_numeric(prop)); 163 } 164 165 nvl = zhp->zpool_props; 166 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 167 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 168 source = value; 169 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 170 } else { 171 source = ZPROP_SRC_DEFAULT; 172 value = zpool_prop_default_numeric(prop); 173 } 174 175 if (src) 176 *src = source; 177 178 return (value); 179 } 180 181 /* 182 * Map VDEV STATE to printed strings. 183 */ 184 const char * 185 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 186 { 187 switch (state) { 188 case VDEV_STATE_CLOSED: 189 case VDEV_STATE_OFFLINE: 190 return (gettext("OFFLINE")); 191 case VDEV_STATE_REMOVED: 192 return (gettext("REMOVED")); 193 case VDEV_STATE_CANT_OPEN: 194 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 195 return (gettext("FAULTED")); 196 else if (aux == VDEV_AUX_SPLIT_POOL) 197 return (gettext("SPLIT")); 198 else 199 return (gettext("UNAVAIL")); 200 case VDEV_STATE_FAULTED: 201 return (gettext("FAULTED")); 202 case VDEV_STATE_DEGRADED: 203 return (gettext("DEGRADED")); 204 case VDEV_STATE_HEALTHY: 205 return (gettext("ONLINE")); 206 207 default: 208 break; 209 } 210 211 return (gettext("UNKNOWN")); 212 } 213 214 /* 215 * Map POOL STATE to printed strings. 216 */ 217 const char * 218 zpool_pool_state_to_name(pool_state_t state) 219 { 220 switch (state) { 221 case POOL_STATE_ACTIVE: 222 return (gettext("ACTIVE")); 223 case POOL_STATE_EXPORTED: 224 return (gettext("EXPORTED")); 225 case POOL_STATE_DESTROYED: 226 return (gettext("DESTROYED")); 227 case POOL_STATE_SPARE: 228 return (gettext("SPARE")); 229 case POOL_STATE_L2CACHE: 230 return (gettext("L2CACHE")); 231 case POOL_STATE_UNINITIALIZED: 232 return (gettext("UNINITIALIZED")); 233 case POOL_STATE_UNAVAIL: 234 return (gettext("UNAVAIL")); 235 case POOL_STATE_POTENTIALLY_ACTIVE: 236 return (gettext("POTENTIALLY_ACTIVE")); 237 } 238 239 return (gettext("UNKNOWN")); 240 } 241 242 /* 243 * Get a zpool property value for 'prop' and return the value in 244 * a pre-allocated buffer. 245 */ 246 int 247 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 248 zprop_source_t *srctype, boolean_t literal) 249 { 250 uint64_t intval; 251 const char *strval; 252 zprop_source_t src = ZPROP_SRC_NONE; 253 nvlist_t *nvroot; 254 vdev_stat_t *vs; 255 uint_t vsc; 256 257 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 258 switch (prop) { 259 case ZPOOL_PROP_NAME: 260 (void) strlcpy(buf, zpool_get_name(zhp), len); 261 break; 262 263 case ZPOOL_PROP_HEALTH: 264 (void) strlcpy(buf, "FAULTED", len); 265 break; 266 267 case ZPOOL_PROP_GUID: 268 intval = zpool_get_prop_int(zhp, prop, &src); 269 (void) snprintf(buf, len, "%llu", intval); 270 break; 271 272 case ZPOOL_PROP_ALTROOT: 273 case ZPOOL_PROP_CACHEFILE: 274 case ZPOOL_PROP_COMMENT: 275 if (zhp->zpool_props != NULL || 276 zpool_get_all_props(zhp) == 0) { 277 (void) strlcpy(buf, 278 zpool_get_prop_string(zhp, prop, &src), 279 len); 280 break; 281 } 282 /* FALLTHROUGH */ 283 default: 284 (void) strlcpy(buf, "-", len); 285 break; 286 } 287 288 if (srctype != NULL) 289 *srctype = src; 290 return (0); 291 } 292 293 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 294 prop != ZPOOL_PROP_NAME) 295 return (-1); 296 297 switch (zpool_prop_get_type(prop)) { 298 case PROP_TYPE_STRING: 299 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 300 len); 301 break; 302 303 case PROP_TYPE_NUMBER: 304 intval = zpool_get_prop_int(zhp, prop, &src); 305 306 switch (prop) { 307 case ZPOOL_PROP_SIZE: 308 case ZPOOL_PROP_ALLOCATED: 309 case ZPOOL_PROP_FREE: 310 case ZPOOL_PROP_FREEING: 311 case ZPOOL_PROP_LEAKED: 312 case ZPOOL_PROP_ASHIFT: 313 if (literal) { 314 (void) snprintf(buf, len, "%llu", 315 (u_longlong_t)intval); 316 } else { 317 (void) zfs_nicenum(intval, buf, len); 318 } 319 break; 320 case ZPOOL_PROP_BOOTSIZE: 321 case ZPOOL_PROP_EXPANDSZ: 322 case ZPOOL_PROP_CHECKPOINT: 323 if (intval == 0) { 324 (void) strlcpy(buf, "-", len); 325 } else if (literal) { 326 (void) snprintf(buf, len, "%llu", 327 (u_longlong_t)intval); 328 } else { 329 (void) zfs_nicenum(intval, buf, len); 330 } 331 break; 332 case ZPOOL_PROP_CAPACITY: 333 if (literal) { 334 (void) snprintf(buf, len, "%llu", 335 (u_longlong_t)intval); 336 } else { 337 (void) snprintf(buf, len, "%llu%%", 338 (u_longlong_t)intval); 339 } 340 break; 341 case ZPOOL_PROP_FRAGMENTATION: 342 if (intval == UINT64_MAX) { 343 (void) strlcpy(buf, "-", len); 344 } else { 345 (void) snprintf(buf, len, "%llu%%", 346 (u_longlong_t)intval); 347 } 348 break; 349 case ZPOOL_PROP_DEDUPRATIO: 350 (void) snprintf(buf, len, "%llu.%02llux", 351 (u_longlong_t)(intval / 100), 352 (u_longlong_t)(intval % 100)); 353 break; 354 case ZPOOL_PROP_HEALTH: 355 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 356 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 357 verify(nvlist_lookup_uint64_array(nvroot, 358 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 359 == 0); 360 361 (void) strlcpy(buf, zpool_state_to_name(intval, 362 vs->vs_aux), len); 363 break; 364 case ZPOOL_PROP_VERSION: 365 if (intval >= SPA_VERSION_FEATURES) { 366 (void) snprintf(buf, len, "-"); 367 break; 368 } 369 /* FALLTHROUGH */ 370 default: 371 (void) snprintf(buf, len, "%llu", intval); 372 } 373 break; 374 375 case PROP_TYPE_INDEX: 376 intval = zpool_get_prop_int(zhp, prop, &src); 377 if (zpool_prop_index_to_string(prop, intval, &strval) 378 != 0) 379 return (-1); 380 (void) strlcpy(buf, strval, len); 381 break; 382 383 default: 384 abort(); 385 } 386 387 if (srctype) 388 *srctype = src; 389 390 return (0); 391 } 392 393 /* 394 * Check if the bootfs name has the same pool name as it is set to. 395 * Assuming bootfs is a valid dataset name. 396 */ 397 static boolean_t 398 bootfs_name_valid(const char *pool, char *bootfs) 399 { 400 int len = strlen(pool); 401 if (bootfs[0] == '\0') 402 return (B_TRUE); 403 404 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 405 return (B_FALSE); 406 407 if (strncmp(pool, bootfs, len) == 0 && 408 (bootfs[len] == '/' || bootfs[len] == '\0')) 409 return (B_TRUE); 410 411 return (B_FALSE); 412 } 413 414 boolean_t 415 zpool_is_bootable(zpool_handle_t *zhp) 416 { 417 char bootfs[ZFS_MAX_DATASET_NAME_LEN]; 418 419 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 420 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 421 sizeof (bootfs)) != 0); 422 } 423 424 425 /* 426 * Given an nvlist of zpool properties to be set, validate that they are 427 * correct, and parse any numeric properties (index, boolean, etc) if they are 428 * specified as strings. 429 */ 430 static nvlist_t * 431 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 432 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 433 { 434 nvpair_t *elem; 435 nvlist_t *retprops; 436 zpool_prop_t prop; 437 char *strval; 438 uint64_t intval; 439 char *slash, *check; 440 struct stat64 statbuf; 441 zpool_handle_t *zhp; 442 443 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 444 (void) no_memory(hdl); 445 return (NULL); 446 } 447 448 elem = NULL; 449 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 450 const char *propname = nvpair_name(elem); 451 452 prop = zpool_name_to_prop(propname); 453 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) { 454 int err; 455 char *fname = strchr(propname, '@') + 1; 456 457 err = zfeature_lookup_name(fname, NULL); 458 if (err != 0) { 459 ASSERT3U(err, ==, ENOENT); 460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 461 "invalid feature '%s', '%s'"), fname, 462 propname); 463 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 464 goto error; 465 } 466 467 if (nvpair_type(elem) != DATA_TYPE_STRING) { 468 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 469 "'%s' must be a string"), propname); 470 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 471 goto error; 472 } 473 474 (void) nvpair_value_string(elem, &strval); 475 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 476 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 477 "property '%s' can only be set to " 478 "'enabled'"), propname); 479 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 480 goto error; 481 } 482 483 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 484 (void) no_memory(hdl); 485 goto error; 486 } 487 continue; 488 } 489 490 /* 491 * Make sure this property is valid and applies to this type. 492 */ 493 if (prop == ZPOOL_PROP_INVAL) { 494 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 495 "invalid property '%s'"), propname); 496 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 497 goto error; 498 } 499 500 if (zpool_prop_readonly(prop)) { 501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 502 "is readonly"), propname); 503 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 504 goto error; 505 } 506 507 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 508 &strval, &intval, errbuf) != 0) 509 goto error; 510 511 /* 512 * Perform additional checking for specific properties. 513 */ 514 switch (prop) { 515 case ZPOOL_PROP_VERSION: 516 if (intval < version || 517 !SPA_VERSION_IS_SUPPORTED(intval)) { 518 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 519 "property '%s' number %d is invalid."), 520 propname, intval); 521 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 522 goto error; 523 } 524 break; 525 526 case ZPOOL_PROP_BOOTSIZE: 527 if (!flags.create) { 528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 529 "property '%s' can only be set during pool " 530 "creation"), propname); 531 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 532 goto error; 533 } 534 break; 535 536 case ZPOOL_PROP_ASHIFT: 537 if (intval != 0 && 538 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) { 539 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 540 "invalid '%s=%d' property: only values " 541 "between %" PRId32 " and %" PRId32 " " 542 "are allowed.\n"), 543 propname, intval, ASHIFT_MIN, ASHIFT_MAX); 544 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 545 goto error; 546 } 547 break; 548 549 case ZPOOL_PROP_BOOTFS: 550 if (flags.create || flags.import) { 551 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 552 "property '%s' cannot be set at creation " 553 "or import time"), propname); 554 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 555 goto error; 556 } 557 558 if (version < SPA_VERSION_BOOTFS) { 559 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 560 "pool must be upgraded to support " 561 "'%s' property"), propname); 562 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 563 goto error; 564 } 565 566 /* 567 * bootfs property value has to be a dataset name and 568 * the dataset has to be in the same pool as it sets to. 569 */ 570 if (!bootfs_name_valid(poolname, strval)) { 571 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 572 "is an invalid name"), strval); 573 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 574 goto error; 575 } 576 577 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 578 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 579 "could not open pool '%s'"), poolname); 580 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 581 goto error; 582 } 583 zpool_close(zhp); 584 break; 585 586 case ZPOOL_PROP_ALTROOT: 587 if (!flags.create && !flags.import) { 588 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 589 "property '%s' can only be set during pool " 590 "creation or import"), propname); 591 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 592 goto error; 593 } 594 595 if (strval[0] != '/') { 596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 597 "bad alternate root '%s'"), strval); 598 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 599 goto error; 600 } 601 break; 602 603 case ZPOOL_PROP_CACHEFILE: 604 if (strval[0] == '\0') 605 break; 606 607 if (strcmp(strval, "none") == 0) 608 break; 609 610 if (strval[0] != '/') { 611 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 612 "property '%s' must be empty, an " 613 "absolute path, or 'none'"), propname); 614 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 615 goto error; 616 } 617 618 slash = strrchr(strval, '/'); 619 620 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 621 strcmp(slash, "/..") == 0) { 622 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 623 "'%s' is not a valid file"), strval); 624 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 625 goto error; 626 } 627 628 *slash = '\0'; 629 630 if (strval[0] != '\0' && 631 (stat64(strval, &statbuf) != 0 || 632 !S_ISDIR(statbuf.st_mode))) { 633 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 634 "'%s' is not a valid directory"), 635 strval); 636 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 637 goto error; 638 } 639 640 *slash = '/'; 641 break; 642 643 case ZPOOL_PROP_COMMENT: 644 for (check = strval; *check != '\0'; check++) { 645 if (!isprint(*check)) { 646 zfs_error_aux(hdl, 647 dgettext(TEXT_DOMAIN, 648 "comment may only have printable " 649 "characters")); 650 (void) zfs_error(hdl, EZFS_BADPROP, 651 errbuf); 652 goto error; 653 } 654 } 655 if (strlen(strval) > ZPROP_MAX_COMMENT) { 656 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 657 "comment must not exceed %d characters"), 658 ZPROP_MAX_COMMENT); 659 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 660 goto error; 661 } 662 break; 663 664 case ZPOOL_PROP_READONLY: 665 if (!flags.import) { 666 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 667 "property '%s' can only be set at " 668 "import time"), propname); 669 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 670 goto error; 671 } 672 break; 673 674 case ZPOOL_PROP_TNAME: 675 if (!flags.create) { 676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 677 "property '%s' can only be set at " 678 "creation time"), propname); 679 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 680 goto error; 681 } 682 break; 683 684 case ZPOOL_PROP_MULTIHOST: 685 if (get_system_hostid() == 0) { 686 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 687 "requires a non-zero system hostid")); 688 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 689 goto error; 690 } 691 break; 692 693 default: 694 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 695 "property '%s'(%d) not defined"), propname, prop); 696 break; 697 } 698 } 699 700 return (retprops); 701 error: 702 nvlist_free(retprops); 703 return (NULL); 704 } 705 706 /* 707 * Set zpool property : propname=propval. 708 */ 709 int 710 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 711 { 712 zfs_cmd_t zc = { 0 }; 713 int ret = -1; 714 char errbuf[1024]; 715 nvlist_t *nvl = NULL; 716 nvlist_t *realprops; 717 uint64_t version; 718 prop_flags_t flags = { 0 }; 719 720 (void) snprintf(errbuf, sizeof (errbuf), 721 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 722 zhp->zpool_name); 723 724 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 725 return (no_memory(zhp->zpool_hdl)); 726 727 if (nvlist_add_string(nvl, propname, propval) != 0) { 728 nvlist_free(nvl); 729 return (no_memory(zhp->zpool_hdl)); 730 } 731 732 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 733 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 734 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 735 nvlist_free(nvl); 736 return (-1); 737 } 738 739 nvlist_free(nvl); 740 nvl = realprops; 741 742 /* 743 * Execute the corresponding ioctl() to set this property. 744 */ 745 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 746 747 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 748 nvlist_free(nvl); 749 return (-1); 750 } 751 752 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 753 754 zcmd_free_nvlists(&zc); 755 nvlist_free(nvl); 756 757 if (ret) 758 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 759 else 760 (void) zpool_props_refresh(zhp); 761 762 return (ret); 763 } 764 765 int 766 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 767 { 768 libzfs_handle_t *hdl = zhp->zpool_hdl; 769 zprop_list_t *entry; 770 char buf[ZFS_MAXPROPLEN]; 771 nvlist_t *features = NULL; 772 zprop_list_t **last; 773 boolean_t firstexpand = (NULL == *plp); 774 775 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 776 return (-1); 777 778 last = plp; 779 while (*last != NULL) 780 last = &(*last)->pl_next; 781 782 if ((*plp)->pl_all) 783 features = zpool_get_features(zhp); 784 785 if ((*plp)->pl_all && firstexpand) { 786 for (int i = 0; i < SPA_FEATURES; i++) { 787 zprop_list_t *entry = zfs_alloc(hdl, 788 sizeof (zprop_list_t)); 789 entry->pl_prop = ZPROP_INVAL; 790 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 791 spa_feature_table[i].fi_uname); 792 entry->pl_width = strlen(entry->pl_user_prop); 793 entry->pl_all = B_TRUE; 794 795 *last = entry; 796 last = &entry->pl_next; 797 } 798 } 799 800 /* add any unsupported features */ 801 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 802 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 803 char *propname; 804 boolean_t found; 805 zprop_list_t *entry; 806 807 if (zfeature_is_supported(nvpair_name(nvp))) 808 continue; 809 810 propname = zfs_asprintf(hdl, "unsupported@%s", 811 nvpair_name(nvp)); 812 813 /* 814 * Before adding the property to the list make sure that no 815 * other pool already added the same property. 816 */ 817 found = B_FALSE; 818 entry = *plp; 819 while (entry != NULL) { 820 if (entry->pl_user_prop != NULL && 821 strcmp(propname, entry->pl_user_prop) == 0) { 822 found = B_TRUE; 823 break; 824 } 825 entry = entry->pl_next; 826 } 827 if (found) { 828 free(propname); 829 continue; 830 } 831 832 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 833 entry->pl_prop = ZPROP_INVAL; 834 entry->pl_user_prop = propname; 835 entry->pl_width = strlen(entry->pl_user_prop); 836 entry->pl_all = B_TRUE; 837 838 *last = entry; 839 last = &entry->pl_next; 840 } 841 842 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 843 844 if (entry->pl_fixed) 845 continue; 846 847 if (entry->pl_prop != ZPROP_INVAL && 848 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 849 NULL, B_FALSE) == 0) { 850 if (strlen(buf) > entry->pl_width) 851 entry->pl_width = strlen(buf); 852 } 853 } 854 855 return (0); 856 } 857 858 /* 859 * Get the state for the given feature on the given ZFS pool. 860 */ 861 int 862 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 863 size_t len) 864 { 865 uint64_t refcount; 866 boolean_t found = B_FALSE; 867 nvlist_t *features = zpool_get_features(zhp); 868 boolean_t supported; 869 const char *feature = strchr(propname, '@') + 1; 870 871 supported = zpool_prop_feature(propname); 872 ASSERT(supported || zpool_prop_unsupported(propname)); 873 874 /* 875 * Convert from feature name to feature guid. This conversion is 876 * unecessary for unsupported@... properties because they already 877 * use guids. 878 */ 879 if (supported) { 880 int ret; 881 spa_feature_t fid; 882 883 ret = zfeature_lookup_name(feature, &fid); 884 if (ret != 0) { 885 (void) strlcpy(buf, "-", len); 886 return (ENOTSUP); 887 } 888 feature = spa_feature_table[fid].fi_guid; 889 } 890 891 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 892 found = B_TRUE; 893 894 if (supported) { 895 if (!found) { 896 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 897 } else { 898 if (refcount == 0) 899 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 900 else 901 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 902 } 903 } else { 904 if (found) { 905 if (refcount == 0) { 906 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 907 } else { 908 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 909 } 910 } else { 911 (void) strlcpy(buf, "-", len); 912 return (ENOTSUP); 913 } 914 } 915 916 return (0); 917 } 918 919 /* 920 * Don't start the slice at the default block of 34; many storage 921 * devices will use a stripe width of 128k, so start there instead. 922 */ 923 #define NEW_START_BLOCK 256 924 925 /* 926 * Validate the given pool name, optionally putting an extended error message in 927 * 'buf'. 928 */ 929 boolean_t 930 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 931 { 932 namecheck_err_t why; 933 char what; 934 int ret; 935 936 ret = pool_namecheck(pool, &why, &what); 937 938 /* 939 * The rules for reserved pool names were extended at a later point. 940 * But we need to support users with existing pools that may now be 941 * invalid. So we only check for this expanded set of names during a 942 * create (or import), and only in userland. 943 */ 944 if (ret == 0 && !isopen && 945 (strncmp(pool, "mirror", 6) == 0 || 946 strncmp(pool, "raidz", 5) == 0 || 947 strncmp(pool, "spare", 5) == 0 || 948 strcmp(pool, "log") == 0)) { 949 if (hdl != NULL) 950 zfs_error_aux(hdl, 951 dgettext(TEXT_DOMAIN, "name is reserved")); 952 return (B_FALSE); 953 } 954 955 956 if (ret != 0) { 957 if (hdl != NULL) { 958 switch (why) { 959 case NAME_ERR_TOOLONG: 960 zfs_error_aux(hdl, 961 dgettext(TEXT_DOMAIN, "name is too long")); 962 break; 963 964 case NAME_ERR_INVALCHAR: 965 zfs_error_aux(hdl, 966 dgettext(TEXT_DOMAIN, "invalid character " 967 "'%c' in pool name"), what); 968 break; 969 970 case NAME_ERR_NOLETTER: 971 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 972 "name must begin with a letter")); 973 break; 974 975 case NAME_ERR_RESERVED: 976 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 977 "name is reserved")); 978 break; 979 980 case NAME_ERR_DISKLIKE: 981 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 982 "pool name is reserved")); 983 break; 984 985 case NAME_ERR_LEADING_SLASH: 986 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 987 "leading slash in name")); 988 break; 989 990 case NAME_ERR_EMPTY_COMPONENT: 991 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 992 "empty component in name")); 993 break; 994 995 case NAME_ERR_TRAILING_SLASH: 996 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 997 "trailing slash in name")); 998 break; 999 1000 case NAME_ERR_MULTIPLE_DELIMITERS: 1001 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1002 "multiple '@' and/or '#' delimiters in " 1003 "name")); 1004 break; 1005 1006 default: 1007 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1008 "(%d) not defined"), why); 1009 break; 1010 } 1011 } 1012 return (B_FALSE); 1013 } 1014 1015 return (B_TRUE); 1016 } 1017 1018 /* 1019 * Open a handle to the given pool, even if the pool is currently in the FAULTED 1020 * state. 1021 */ 1022 zpool_handle_t * 1023 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 1024 { 1025 zpool_handle_t *zhp; 1026 boolean_t missing; 1027 1028 /* 1029 * Make sure the pool name is valid. 1030 */ 1031 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 1032 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1033 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1034 pool); 1035 return (NULL); 1036 } 1037 1038 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1039 return (NULL); 1040 1041 zhp->zpool_hdl = hdl; 1042 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1043 1044 if (zpool_refresh_stats(zhp, &missing) != 0) { 1045 zpool_close(zhp); 1046 return (NULL); 1047 } 1048 1049 if (missing) { 1050 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1051 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1052 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1053 zpool_close(zhp); 1054 return (NULL); 1055 } 1056 1057 return (zhp); 1058 } 1059 1060 /* 1061 * Like the above, but silent on error. Used when iterating over pools (because 1062 * the configuration cache may be out of date). 1063 */ 1064 int 1065 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1066 { 1067 zpool_handle_t *zhp; 1068 boolean_t missing; 1069 1070 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1071 return (-1); 1072 1073 zhp->zpool_hdl = hdl; 1074 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1075 1076 if (zpool_refresh_stats(zhp, &missing) != 0) { 1077 zpool_close(zhp); 1078 return (-1); 1079 } 1080 1081 if (missing) { 1082 zpool_close(zhp); 1083 *ret = NULL; 1084 return (0); 1085 } 1086 1087 *ret = zhp; 1088 return (0); 1089 } 1090 1091 /* 1092 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1093 * state. 1094 */ 1095 zpool_handle_t * 1096 zpool_open(libzfs_handle_t *hdl, const char *pool) 1097 { 1098 zpool_handle_t *zhp; 1099 1100 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1101 return (NULL); 1102 1103 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1104 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1105 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1106 zpool_close(zhp); 1107 return (NULL); 1108 } 1109 1110 return (zhp); 1111 } 1112 1113 /* 1114 * Close the handle. Simply frees the memory associated with the handle. 1115 */ 1116 void 1117 zpool_close(zpool_handle_t *zhp) 1118 { 1119 nvlist_free(zhp->zpool_config); 1120 nvlist_free(zhp->zpool_old_config); 1121 nvlist_free(zhp->zpool_props); 1122 free(zhp); 1123 } 1124 1125 /* 1126 * Return the name of the pool. 1127 */ 1128 const char * 1129 zpool_get_name(zpool_handle_t *zhp) 1130 { 1131 return (zhp->zpool_name); 1132 } 1133 1134 1135 /* 1136 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1137 */ 1138 int 1139 zpool_get_state(zpool_handle_t *zhp) 1140 { 1141 return (zhp->zpool_state); 1142 } 1143 1144 /* 1145 * Check if vdev list contains a special vdev 1146 */ 1147 static boolean_t 1148 zpool_has_special_vdev(nvlist_t *nvroot) 1149 { 1150 nvlist_t **child; 1151 uint_t children; 1152 1153 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child, 1154 &children) == 0) { 1155 for (uint_t c = 0; c < children; c++) { 1156 char *bias; 1157 1158 if (nvlist_lookup_string(child[c], 1159 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 && 1160 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) { 1161 return (B_TRUE); 1162 } 1163 } 1164 } 1165 return (B_FALSE); 1166 } 1167 1168 /* 1169 * Create the named pool, using the provided vdev list. It is assumed 1170 * that the consumer has already validated the contents of the nvlist, so we 1171 * don't have to worry about error semantics. 1172 */ 1173 int 1174 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1175 nvlist_t *props, nvlist_t *fsprops) 1176 { 1177 zfs_cmd_t zc = { 0 }; 1178 nvlist_t *zc_fsprops = NULL; 1179 nvlist_t *zc_props = NULL; 1180 nvlist_t *hidden_args = NULL; 1181 uint8_t *wkeydata = NULL; 1182 uint_t wkeylen = 0; 1183 char msg[1024]; 1184 int ret = -1; 1185 1186 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1187 "cannot create '%s'"), pool); 1188 1189 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1190 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1191 1192 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1193 return (-1); 1194 1195 if (props) { 1196 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1197 1198 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1199 SPA_VERSION_1, flags, msg)) == NULL) { 1200 goto create_failed; 1201 } 1202 } 1203 1204 if (fsprops) { 1205 uint64_t zoned; 1206 char *zonestr; 1207 1208 zoned = ((nvlist_lookup_string(fsprops, 1209 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1210 strcmp(zonestr, "on") == 0); 1211 1212 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1213 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) { 1214 goto create_failed; 1215 } 1216 1217 if (nvlist_exists(zc_fsprops, 1218 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) && 1219 !zpool_has_special_vdev(nvroot)) { 1220 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1221 "%s property requires a special vdev"), 1222 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)); 1223 (void) zfs_error(hdl, EZFS_BADPROP, msg); 1224 goto create_failed; 1225 } 1226 1227 if (!zc_props && 1228 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1229 goto create_failed; 1230 } 1231 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE, 1232 &wkeydata, &wkeylen) != 0) { 1233 (void) zfs_error(hdl, EZFS_CRYPTOFAILED, msg); 1234 goto create_failed; 1235 } 1236 if (nvlist_add_nvlist(zc_props, 1237 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1238 goto create_failed; 1239 } 1240 if (wkeydata != NULL) { 1241 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0) 1242 goto create_failed; 1243 1244 if (nvlist_add_uint8_array(hidden_args, "wkeydata", 1245 wkeydata, wkeylen) != 0) 1246 goto create_failed; 1247 1248 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS, 1249 hidden_args) != 0) 1250 goto create_failed; 1251 } 1252 } 1253 1254 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1255 goto create_failed; 1256 1257 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1258 1259 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1260 1261 zcmd_free_nvlists(&zc); 1262 nvlist_free(zc_props); 1263 nvlist_free(zc_fsprops); 1264 nvlist_free(hidden_args); 1265 if (wkeydata != NULL) 1266 free(wkeydata); 1267 1268 switch (errno) { 1269 case EBUSY: 1270 /* 1271 * This can happen if the user has specified the same 1272 * device multiple times. We can't reliably detect this 1273 * until we try to add it and see we already have a 1274 * label. 1275 */ 1276 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1277 "one or more vdevs refer to the same device")); 1278 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1279 1280 case ERANGE: 1281 /* 1282 * This happens if the record size is smaller or larger 1283 * than the allowed size range, or not a power of 2. 1284 * 1285 * NOTE: although zfs_valid_proplist is called earlier, 1286 * this case may have slipped through since the 1287 * pool does not exist yet and it is therefore 1288 * impossible to read properties e.g. max blocksize 1289 * from the pool. 1290 */ 1291 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1292 "record size invalid")); 1293 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1294 1295 case EOVERFLOW: 1296 /* 1297 * This occurs when one of the devices is below 1298 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1299 * device was the problem device since there's no 1300 * reliable way to determine device size from userland. 1301 */ 1302 { 1303 char buf[64]; 1304 1305 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1306 1307 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1308 "one or more devices is less than the " 1309 "minimum size (%s)"), buf); 1310 } 1311 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1312 1313 case ENOSPC: 1314 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1315 "one or more devices is out of space")); 1316 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1317 1318 default: 1319 return (zpool_standard_error(hdl, errno, msg)); 1320 } 1321 } 1322 1323 create_failed: 1324 zcmd_free_nvlists(&zc); 1325 nvlist_free(zc_props); 1326 nvlist_free(zc_fsprops); 1327 nvlist_free(hidden_args); 1328 if (wkeydata != NULL) 1329 free(wkeydata); 1330 return (ret); 1331 } 1332 1333 /* 1334 * Destroy the given pool. It is up to the caller to ensure that there are no 1335 * datasets left in the pool. 1336 */ 1337 int 1338 zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1339 { 1340 zfs_cmd_t zc = { 0 }; 1341 zfs_handle_t *zfp = NULL; 1342 libzfs_handle_t *hdl = zhp->zpool_hdl; 1343 char msg[1024]; 1344 1345 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1346 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1347 return (-1); 1348 1349 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1350 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1351 1352 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1353 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1354 "cannot destroy '%s'"), zhp->zpool_name); 1355 1356 if (errno == EROFS) { 1357 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1358 "one or more devices is read only")); 1359 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1360 } else { 1361 (void) zpool_standard_error(hdl, errno, msg); 1362 } 1363 1364 if (zfp) 1365 zfs_close(zfp); 1366 return (-1); 1367 } 1368 1369 if (zfp) { 1370 remove_mountpoint(zfp); 1371 zfs_close(zfp); 1372 } 1373 1374 return (0); 1375 } 1376 1377 /* 1378 * Create a checkpoint in the given pool. 1379 */ 1380 int 1381 zpool_checkpoint(zpool_handle_t *zhp) 1382 { 1383 libzfs_handle_t *hdl = zhp->zpool_hdl; 1384 char msg[1024]; 1385 int error; 1386 1387 error = lzc_pool_checkpoint(zhp->zpool_name); 1388 if (error != 0) { 1389 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1390 "cannot checkpoint '%s'"), zhp->zpool_name); 1391 (void) zpool_standard_error(hdl, error, msg); 1392 return (-1); 1393 } 1394 1395 return (0); 1396 } 1397 1398 /* 1399 * Discard the checkpoint from the given pool. 1400 */ 1401 int 1402 zpool_discard_checkpoint(zpool_handle_t *zhp) 1403 { 1404 libzfs_handle_t *hdl = zhp->zpool_hdl; 1405 char msg[1024]; 1406 int error; 1407 1408 error = lzc_pool_checkpoint_discard(zhp->zpool_name); 1409 if (error != 0) { 1410 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1411 "cannot discard checkpoint in '%s'"), zhp->zpool_name); 1412 (void) zpool_standard_error(hdl, error, msg); 1413 return (-1); 1414 } 1415 1416 return (0); 1417 } 1418 1419 /* 1420 * Add the given vdevs to the pool. The caller must have already performed the 1421 * necessary verification to ensure that the vdev specification is well-formed. 1422 */ 1423 int 1424 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1425 { 1426 zfs_cmd_t zc = { 0 }; 1427 int ret; 1428 libzfs_handle_t *hdl = zhp->zpool_hdl; 1429 char msg[1024]; 1430 nvlist_t **spares, **l2cache; 1431 uint_t nspares, nl2cache; 1432 1433 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1434 "cannot add to '%s'"), zhp->zpool_name); 1435 1436 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1437 SPA_VERSION_SPARES && 1438 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1439 &spares, &nspares) == 0) { 1440 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1441 "upgraded to add hot spares")); 1442 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1443 } 1444 1445 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1446 SPA_VERSION_L2CACHE && 1447 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1448 &l2cache, &nl2cache) == 0) { 1449 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1450 "upgraded to add cache devices")); 1451 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1452 } 1453 1454 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1455 return (-1); 1456 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1457 1458 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1459 switch (errno) { 1460 case EBUSY: 1461 /* 1462 * This can happen if the user has specified the same 1463 * device multiple times. We can't reliably detect this 1464 * until we try to add it and see we already have a 1465 * label. 1466 */ 1467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1468 "one or more vdevs refer to the same device")); 1469 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1470 break; 1471 1472 case EINVAL: 1473 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1474 "invalid config; a pool with removing/removed " 1475 "vdevs does not support adding raidz vdevs")); 1476 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1477 break; 1478 1479 case EOVERFLOW: 1480 /* 1481 * This occurrs when one of the devices is below 1482 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1483 * device was the problem device since there's no 1484 * reliable way to determine device size from userland. 1485 */ 1486 { 1487 char buf[64]; 1488 1489 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1490 1491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1492 "device is less than the minimum " 1493 "size (%s)"), buf); 1494 } 1495 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1496 break; 1497 1498 case ENOTSUP: 1499 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1500 "pool must be upgraded to add these vdevs")); 1501 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1502 break; 1503 1504 case EDOM: 1505 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1506 "root pool can not have multiple vdevs" 1507 " or separate logs")); 1508 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1509 break; 1510 1511 default: 1512 (void) zpool_standard_error(hdl, errno, msg); 1513 } 1514 1515 ret = -1; 1516 } else { 1517 ret = 0; 1518 } 1519 1520 zcmd_free_nvlists(&zc); 1521 1522 return (ret); 1523 } 1524 1525 /* 1526 * Exports the pool from the system. The caller must ensure that there are no 1527 * mounted datasets in the pool. 1528 */ 1529 static int 1530 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1531 const char *log_str) 1532 { 1533 zfs_cmd_t zc = { 0 }; 1534 char msg[1024]; 1535 1536 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1537 "cannot export '%s'"), zhp->zpool_name); 1538 1539 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1540 zc.zc_cookie = force; 1541 zc.zc_guid = hardforce; 1542 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1543 1544 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1545 switch (errno) { 1546 case EXDEV: 1547 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1548 "use '-f' to override the following errors:\n" 1549 "'%s' has an active shared spare which could be" 1550 " used by other pools once '%s' is exported."), 1551 zhp->zpool_name, zhp->zpool_name); 1552 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1553 msg)); 1554 default: 1555 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1556 msg)); 1557 } 1558 } 1559 1560 return (0); 1561 } 1562 1563 int 1564 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1565 { 1566 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1567 } 1568 1569 int 1570 zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1571 { 1572 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1573 } 1574 1575 static void 1576 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1577 nvlist_t *config) 1578 { 1579 nvlist_t *nv = NULL; 1580 uint64_t rewindto; 1581 int64_t loss = -1; 1582 struct tm t; 1583 char timestr[128]; 1584 1585 if (!hdl->libzfs_printerr || config == NULL) 1586 return; 1587 1588 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1589 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1590 return; 1591 } 1592 1593 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1594 return; 1595 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1596 1597 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1598 strftime(timestr, 128, 0, &t) != 0) { 1599 if (dryrun) { 1600 (void) printf(dgettext(TEXT_DOMAIN, 1601 "Would be able to return %s " 1602 "to its state as of %s.\n"), 1603 name, timestr); 1604 } else { 1605 (void) printf(dgettext(TEXT_DOMAIN, 1606 "Pool %s returned to its state as of %s.\n"), 1607 name, timestr); 1608 } 1609 if (loss > 120) { 1610 (void) printf(dgettext(TEXT_DOMAIN, 1611 "%s approximately %lld "), 1612 dryrun ? "Would discard" : "Discarded", 1613 (loss + 30) / 60); 1614 (void) printf(dgettext(TEXT_DOMAIN, 1615 "minutes of transactions.\n")); 1616 } else if (loss > 0) { 1617 (void) printf(dgettext(TEXT_DOMAIN, 1618 "%s approximately %lld "), 1619 dryrun ? "Would discard" : "Discarded", loss); 1620 (void) printf(dgettext(TEXT_DOMAIN, 1621 "seconds of transactions.\n")); 1622 } 1623 } 1624 } 1625 1626 void 1627 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1628 nvlist_t *config) 1629 { 1630 nvlist_t *nv = NULL; 1631 int64_t loss = -1; 1632 uint64_t edata = UINT64_MAX; 1633 uint64_t rewindto; 1634 struct tm t; 1635 char timestr[128]; 1636 1637 if (!hdl->libzfs_printerr) 1638 return; 1639 1640 if (reason >= 0) 1641 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1642 else 1643 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1644 1645 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1646 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1647 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1648 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1649 goto no_info; 1650 1651 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1652 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1653 &edata); 1654 1655 (void) printf(dgettext(TEXT_DOMAIN, 1656 "Recovery is possible, but will result in some data loss.\n")); 1657 1658 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1659 strftime(timestr, 128, 0, &t) != 0) { 1660 (void) printf(dgettext(TEXT_DOMAIN, 1661 "\tReturning the pool to its state as of %s\n" 1662 "\tshould correct the problem. "), 1663 timestr); 1664 } else { 1665 (void) printf(dgettext(TEXT_DOMAIN, 1666 "\tReverting the pool to an earlier state " 1667 "should correct the problem.\n\t")); 1668 } 1669 1670 if (loss > 120) { 1671 (void) printf(dgettext(TEXT_DOMAIN, 1672 "Approximately %lld minutes of data\n" 1673 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1674 } else if (loss > 0) { 1675 (void) printf(dgettext(TEXT_DOMAIN, 1676 "Approximately %lld seconds of data\n" 1677 "\tmust be discarded, irreversibly. "), loss); 1678 } 1679 if (edata != 0 && edata != UINT64_MAX) { 1680 if (edata == 1) { 1681 (void) printf(dgettext(TEXT_DOMAIN, 1682 "After rewind, at least\n" 1683 "\tone persistent user-data error will remain. ")); 1684 } else { 1685 (void) printf(dgettext(TEXT_DOMAIN, 1686 "After rewind, several\n" 1687 "\tpersistent user-data errors will remain. ")); 1688 } 1689 } 1690 (void) printf(dgettext(TEXT_DOMAIN, 1691 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1692 reason >= 0 ? "clear" : "import", name); 1693 1694 (void) printf(dgettext(TEXT_DOMAIN, 1695 "A scrub of the pool\n" 1696 "\tis strongly recommended after recovery.\n")); 1697 return; 1698 1699 no_info: 1700 (void) printf(dgettext(TEXT_DOMAIN, 1701 "Destroy and re-create the pool from\n\ta backup source.\n")); 1702 } 1703 1704 /* 1705 * zpool_import() is a contracted interface. Should be kept the same 1706 * if possible. 1707 * 1708 * Applications should use zpool_import_props() to import a pool with 1709 * new properties value to be set. 1710 */ 1711 int 1712 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1713 char *altroot) 1714 { 1715 nvlist_t *props = NULL; 1716 int ret; 1717 1718 if (altroot != NULL) { 1719 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1720 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1721 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1722 newname)); 1723 } 1724 1725 if (nvlist_add_string(props, 1726 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1727 nvlist_add_string(props, 1728 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1729 nvlist_free(props); 1730 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1731 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1732 newname)); 1733 } 1734 } 1735 1736 ret = zpool_import_props(hdl, config, newname, props, 1737 ZFS_IMPORT_NORMAL); 1738 nvlist_free(props); 1739 return (ret); 1740 } 1741 1742 static void 1743 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1744 int indent) 1745 { 1746 nvlist_t **child; 1747 uint_t c, children; 1748 char *vname; 1749 uint64_t is_log = 0; 1750 1751 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1752 &is_log); 1753 1754 if (name != NULL) 1755 (void) printf("\t%*s%s%s\n", indent, "", name, 1756 is_log ? " [log]" : ""); 1757 1758 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1759 &child, &children) != 0) 1760 return; 1761 1762 for (c = 0; c < children; c++) { 1763 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID); 1764 print_vdev_tree(hdl, vname, child[c], indent + 2); 1765 free(vname); 1766 } 1767 } 1768 1769 void 1770 zpool_print_unsup_feat(nvlist_t *config) 1771 { 1772 nvlist_t *nvinfo, *unsup_feat; 1773 1774 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1775 0); 1776 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1777 &unsup_feat) == 0); 1778 1779 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1780 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1781 char *desc; 1782 1783 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1784 verify(nvpair_value_string(nvp, &desc) == 0); 1785 1786 if (strlen(desc) > 0) 1787 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1788 else 1789 (void) printf("\t%s\n", nvpair_name(nvp)); 1790 } 1791 } 1792 1793 /* 1794 * Import the given pool using the known configuration and a list of 1795 * properties to be set. The configuration should have come from 1796 * zpool_find_import(). The 'newname' parameters control whether the pool 1797 * is imported with a different name. 1798 */ 1799 int 1800 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1801 nvlist_t *props, int flags) 1802 { 1803 zfs_cmd_t zc = { 0 }; 1804 zpool_load_policy_t policy; 1805 nvlist_t *nv = NULL; 1806 nvlist_t *nvinfo = NULL; 1807 nvlist_t *missing = NULL; 1808 char *thename; 1809 char *origname; 1810 int ret; 1811 int error = 0; 1812 char errbuf[1024]; 1813 1814 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1815 &origname) == 0); 1816 1817 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1818 "cannot import pool '%s'"), origname); 1819 1820 if (newname != NULL) { 1821 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1822 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1823 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1824 newname)); 1825 thename = (char *)newname; 1826 } else { 1827 thename = origname; 1828 } 1829 1830 if (props != NULL) { 1831 uint64_t version; 1832 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1833 1834 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1835 &version) == 0); 1836 1837 if ((props = zpool_valid_proplist(hdl, origname, 1838 props, version, flags, errbuf)) == NULL) 1839 return (-1); 1840 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1841 nvlist_free(props); 1842 return (-1); 1843 } 1844 nvlist_free(props); 1845 } 1846 1847 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1848 1849 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1850 &zc.zc_guid) == 0); 1851 1852 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1853 zcmd_free_nvlists(&zc); 1854 return (-1); 1855 } 1856 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1857 zcmd_free_nvlists(&zc); 1858 return (-1); 1859 } 1860 1861 zc.zc_cookie = flags; 1862 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1863 errno == ENOMEM) { 1864 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1865 zcmd_free_nvlists(&zc); 1866 return (-1); 1867 } 1868 } 1869 if (ret != 0) 1870 error = errno; 1871 1872 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1873 1874 zcmd_free_nvlists(&zc); 1875 1876 zpool_get_load_policy(config, &policy); 1877 1878 if (error) { 1879 char desc[1024]; 1880 char aux[256]; 1881 1882 /* 1883 * Dry-run failed, but we print out what success 1884 * looks like if we found a best txg 1885 */ 1886 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) { 1887 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1888 B_TRUE, nv); 1889 nvlist_free(nv); 1890 return (-1); 1891 } 1892 1893 if (newname == NULL) 1894 (void) snprintf(desc, sizeof (desc), 1895 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1896 thename); 1897 else 1898 (void) snprintf(desc, sizeof (desc), 1899 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1900 origname, thename); 1901 1902 switch (error) { 1903 case ENOTSUP: 1904 if (nv != NULL && nvlist_lookup_nvlist(nv, 1905 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1906 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1907 (void) printf(dgettext(TEXT_DOMAIN, "This " 1908 "pool uses the following feature(s) not " 1909 "supported by this system:\n")); 1910 zpool_print_unsup_feat(nv); 1911 if (nvlist_exists(nvinfo, 1912 ZPOOL_CONFIG_CAN_RDONLY)) { 1913 (void) printf(dgettext(TEXT_DOMAIN, 1914 "All unsupported features are only " 1915 "required for writing to the pool." 1916 "\nThe pool can be imported using " 1917 "'-o readonly=on'.\n")); 1918 } 1919 } 1920 /* 1921 * Unsupported version. 1922 */ 1923 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1924 break; 1925 1926 case EREMOTEIO: 1927 if (nv != NULL && nvlist_lookup_nvlist(nv, 1928 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) { 1929 char *hostname = "<unknown>"; 1930 uint64_t hostid = 0; 1931 mmp_state_t mmp_state; 1932 1933 mmp_state = fnvlist_lookup_uint64(nvinfo, 1934 ZPOOL_CONFIG_MMP_STATE); 1935 1936 if (nvlist_exists(nvinfo, 1937 ZPOOL_CONFIG_MMP_HOSTNAME)) 1938 hostname = fnvlist_lookup_string(nvinfo, 1939 ZPOOL_CONFIG_MMP_HOSTNAME); 1940 1941 if (nvlist_exists(nvinfo, 1942 ZPOOL_CONFIG_MMP_HOSTID)) 1943 hostid = fnvlist_lookup_uint64(nvinfo, 1944 ZPOOL_CONFIG_MMP_HOSTID); 1945 1946 if (mmp_state == MMP_STATE_ACTIVE) { 1947 (void) snprintf(aux, sizeof (aux), 1948 dgettext(TEXT_DOMAIN, "pool is imp" 1949 "orted on host '%s' (hostid=%lx).\n" 1950 "Export the pool on the other " 1951 "system, then run 'zpool import'."), 1952 hostname, (unsigned long) hostid); 1953 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 1954 (void) snprintf(aux, sizeof (aux), 1955 dgettext(TEXT_DOMAIN, "pool has " 1956 "the multihost property on and " 1957 "the\nsystem's hostid is not " 1958 "set.\n")); 1959 } 1960 1961 (void) zfs_error_aux(hdl, aux); 1962 } 1963 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc); 1964 break; 1965 1966 case EINVAL: 1967 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1968 break; 1969 1970 case EROFS: 1971 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1972 "one or more devices is read only")); 1973 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1974 break; 1975 1976 case ENXIO: 1977 if (nv && nvlist_lookup_nvlist(nv, 1978 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1979 nvlist_lookup_nvlist(nvinfo, 1980 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1981 (void) printf(dgettext(TEXT_DOMAIN, 1982 "The devices below are missing or " 1983 "corrupted, use '-m' to import the pool " 1984 "anyway:\n")); 1985 print_vdev_tree(hdl, NULL, missing, 2); 1986 (void) printf("\n"); 1987 } 1988 (void) zpool_standard_error(hdl, error, desc); 1989 break; 1990 1991 case EEXIST: 1992 (void) zpool_standard_error(hdl, error, desc); 1993 break; 1994 case ENAMETOOLONG: 1995 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1996 "new name of at least one dataset is longer than " 1997 "the maximum allowable length")); 1998 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 1999 break; 2000 default: 2001 (void) zpool_standard_error(hdl, error, desc); 2002 zpool_explain_recover(hdl, 2003 newname ? origname : thename, -error, nv); 2004 break; 2005 } 2006 2007 nvlist_free(nv); 2008 ret = -1; 2009 } else { 2010 zpool_handle_t *zhp; 2011 2012 /* 2013 * This should never fail, but play it safe anyway. 2014 */ 2015 if (zpool_open_silent(hdl, thename, &zhp) != 0) 2016 ret = -1; 2017 else if (zhp != NULL) 2018 zpool_close(zhp); 2019 if (policy.zlp_rewind & 2020 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2021 zpool_rewind_exclaim(hdl, newname ? origname : thename, 2022 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv); 2023 } 2024 nvlist_free(nv); 2025 return (0); 2026 } 2027 2028 return (ret); 2029 } 2030 2031 /* 2032 * Translate vdev names to guids. If a vdev_path is determined to be 2033 * unsuitable then a vd_errlist is allocated and the vdev path and errno 2034 * are added to it. 2035 */ 2036 static int 2037 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds, 2038 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist) 2039 { 2040 nvlist_t *errlist = NULL; 2041 int error = 0; 2042 2043 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL; 2044 elem = nvlist_next_nvpair(vds, elem)) { 2045 boolean_t spare, cache; 2046 2047 char *vd_path = nvpair_name(elem); 2048 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache, 2049 NULL); 2050 2051 if ((tgt == NULL) || cache || spare) { 2052 if (errlist == NULL) { 2053 errlist = fnvlist_alloc(); 2054 error = EINVAL; 2055 } 2056 2057 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE : 2058 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE); 2059 fnvlist_add_int64(errlist, vd_path, err); 2060 continue; 2061 } 2062 2063 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 2064 fnvlist_add_uint64(vdev_guids, vd_path, guid); 2065 2066 char msg[MAXNAMELEN]; 2067 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid); 2068 fnvlist_add_string(guids_to_paths, msg, vd_path); 2069 } 2070 2071 if (error != 0) { 2072 verify(errlist != NULL); 2073 if (vd_errlist != NULL) 2074 *vd_errlist = errlist; 2075 else 2076 fnvlist_free(errlist); 2077 } 2078 2079 return (error); 2080 } 2081 2082 /* 2083 * Scan the pool. 2084 */ 2085 int 2086 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) 2087 { 2088 zfs_cmd_t zc = { 0 }; 2089 char msg[1024]; 2090 int err; 2091 libzfs_handle_t *hdl = zhp->zpool_hdl; 2092 2093 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2094 zc.zc_cookie = func; 2095 zc.zc_flags = cmd; 2096 2097 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0) 2098 return (0); 2099 2100 err = errno; 2101 2102 /* ECANCELED on a scrub means we resumed a paused scrub */ 2103 if (err == ECANCELED && func == POOL_SCAN_SCRUB && 2104 cmd == POOL_SCRUB_NORMAL) 2105 return (0); 2106 2107 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL) 2108 return (0); 2109 2110 if (func == POOL_SCAN_SCRUB) { 2111 if (cmd == POOL_SCRUB_PAUSE) { 2112 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2113 "cannot pause scrubbing %s"), zc.zc_name); 2114 } else { 2115 assert(cmd == POOL_SCRUB_NORMAL); 2116 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2117 "cannot scrub %s"), zc.zc_name); 2118 } 2119 } else if (func == POOL_SCAN_RESILVER) { 2120 assert(cmd == POOL_SCRUB_NORMAL); 2121 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2122 "cannot restart resilver on %s"), zc.zc_name); 2123 } else if (func == POOL_SCAN_NONE) { 2124 (void) snprintf(msg, sizeof (msg), 2125 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 2126 zc.zc_name); 2127 } else { 2128 assert(!"unexpected result"); 2129 } 2130 2131 if (err == EBUSY) { 2132 nvlist_t *nvroot; 2133 pool_scan_stat_t *ps = NULL; 2134 uint_t psc; 2135 2136 verify(nvlist_lookup_nvlist(zhp->zpool_config, 2137 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 2138 (void) nvlist_lookup_uint64_array(nvroot, 2139 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 2140 if (ps && ps->pss_func == POOL_SCAN_SCRUB) { 2141 if (cmd == POOL_SCRUB_PAUSE) 2142 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg)); 2143 else 2144 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 2145 } else { 2146 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 2147 } 2148 } else if (err == ENOENT) { 2149 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 2150 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) { 2151 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg)); 2152 } else { 2153 return (zpool_standard_error(hdl, err, msg)); 2154 } 2155 } 2156 2157 static int 2158 xlate_init_err(int err) 2159 { 2160 switch (err) { 2161 case ENODEV: 2162 return (EZFS_NODEVICE); 2163 case EINVAL: 2164 case EROFS: 2165 return (EZFS_BADDEV); 2166 case EBUSY: 2167 return (EZFS_INITIALIZING); 2168 case ESRCH: 2169 return (EZFS_NO_INITIALIZE); 2170 } 2171 return (err); 2172 } 2173 2174 /* 2175 * Begin, suspend, or cancel the initialization (initializing of all free 2176 * blocks) for the given vdevs in the given pool. 2177 */ 2178 int 2179 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type, 2180 nvlist_t *vds) 2181 { 2182 char msg[1024]; 2183 int err; 2184 2185 nvlist_t *vdev_guids = fnvlist_alloc(); 2186 nvlist_t *guids_to_paths = fnvlist_alloc(); 2187 nvlist_t *vd_errlist = NULL; 2188 nvlist_t *errlist; 2189 nvpair_t *elem; 2190 2191 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2192 guids_to_paths, &vd_errlist); 2193 2194 if (err == 0) { 2195 err = lzc_initialize(zhp->zpool_name, cmd_type, 2196 vdev_guids, &errlist); 2197 if (err == 0) { 2198 fnvlist_free(vdev_guids); 2199 fnvlist_free(guids_to_paths); 2200 return (0); 2201 } 2202 2203 if (errlist != NULL) { 2204 vd_errlist = fnvlist_lookup_nvlist(errlist, 2205 ZPOOL_INITIALIZE_VDEVS); 2206 } 2207 2208 (void) snprintf(msg, sizeof (msg), 2209 dgettext(TEXT_DOMAIN, "operation failed")); 2210 } else { 2211 verify(vd_errlist != NULL); 2212 } 2213 2214 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL; 2215 elem = nvlist_next_nvpair(vd_errlist, elem)) { 2216 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem)); 2217 char *path; 2218 2219 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2220 &path) != 0) 2221 path = nvpair_name(elem); 2222 2223 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2224 "cannot initialize '%s'", path); 2225 } 2226 2227 fnvlist_free(vdev_guids); 2228 fnvlist_free(guids_to_paths); 2229 2230 if (vd_errlist != NULL) { 2231 fnvlist_free(vd_errlist); 2232 return (-1); 2233 } 2234 2235 return (zpool_standard_error(zhp->zpool_hdl, err, msg)); 2236 } 2237 2238 static int 2239 xlate_trim_err(int err) 2240 { 2241 switch (err) { 2242 case ENODEV: 2243 return (EZFS_NODEVICE); 2244 case EINVAL: 2245 case EROFS: 2246 return (EZFS_BADDEV); 2247 case EBUSY: 2248 return (EZFS_TRIMMING); 2249 case ESRCH: 2250 return (EZFS_NO_TRIM); 2251 case EOPNOTSUPP: 2252 return (EZFS_TRIM_NOTSUP); 2253 } 2254 return (err); 2255 } 2256 2257 /* 2258 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for 2259 * the given vdevs in the given pool. 2260 */ 2261 int 2262 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds, 2263 trimflags_t *trim_flags) 2264 { 2265 char msg[1024]; 2266 int err; 2267 2268 nvlist_t *vdev_guids = fnvlist_alloc(); 2269 nvlist_t *guids_to_paths = fnvlist_alloc(); 2270 nvlist_t *vd_errlist = NULL; 2271 nvlist_t *errlist; 2272 nvpair_t *elem; 2273 2274 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids, 2275 guids_to_paths, &vd_errlist); 2276 if (err == 0) { 2277 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate, 2278 trim_flags->secure, vdev_guids, &errlist); 2279 if (err == 0) { 2280 fnvlist_free(vdev_guids); 2281 fnvlist_free(guids_to_paths); 2282 return (0); 2283 } 2284 2285 if (errlist != NULL) { 2286 vd_errlist = fnvlist_lookup_nvlist(errlist, 2287 ZPOOL_TRIM_VDEVS); 2288 } 2289 2290 (void) snprintf(msg, sizeof (msg), 2291 dgettext(TEXT_DOMAIN, "operation failed")); 2292 } else { 2293 verify(vd_errlist != NULL); 2294 } 2295 2296 for (elem = nvlist_next_nvpair(vd_errlist, NULL); 2297 elem != NULL; elem = nvlist_next_nvpair(vd_errlist, elem)) { 2298 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem)); 2299 char *path; 2300 /* 2301 * If only the pool was specified, and it was not a secure 2302 * trim then suppress warnings for individual vdevs which 2303 * do not support trimming. 2304 */ 2305 if (vd_error == EZFS_TRIM_NOTSUP && 2306 trim_flags->fullpool && 2307 !trim_flags->secure) { 2308 continue; 2309 } 2310 2311 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem), 2312 &path) != 0) 2313 path = nvpair_name(elem); 2314 2315 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error, 2316 "cannot trim '%s'", path); 2317 } 2318 2319 fnvlist_free(vdev_guids); 2320 fnvlist_free(guids_to_paths); 2321 2322 if (vd_errlist != NULL) { 2323 fnvlist_free(vd_errlist); 2324 return (-1); 2325 } 2326 2327 return (zpool_standard_error(zhp->zpool_hdl, err, msg)); 2328 } 2329 2330 /* 2331 * This provides a very minimal check whether a given string is likely a 2332 * c#t#d# style string. Users of this are expected to do their own 2333 * verification of the s# part. 2334 */ 2335 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 2336 2337 /* 2338 * More elaborate version for ones which may start with "/dev/dsk/" 2339 * and the like. 2340 */ 2341 static int 2342 ctd_check_path(char *str) 2343 { 2344 /* 2345 * If it starts with a slash, check the last component. 2346 */ 2347 if (str && str[0] == '/') { 2348 char *tmp = strrchr(str, '/'); 2349 2350 /* 2351 * If it ends in "/old", check the second-to-last 2352 * component of the string instead. 2353 */ 2354 if (tmp != str && strcmp(tmp, "/old") == 0) { 2355 for (tmp--; *tmp != '/'; tmp--) 2356 ; 2357 } 2358 str = tmp + 1; 2359 } 2360 return (CTD_CHECK(str)); 2361 } 2362 2363 /* 2364 * Find a vdev that matches the search criteria specified. We use the 2365 * the nvpair name to determine how we should look for the device. 2366 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 2367 * spare; but FALSE if its an INUSE spare. 2368 */ 2369 static nvlist_t * 2370 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 2371 boolean_t *l2cache, boolean_t *log) 2372 { 2373 uint_t c, children; 2374 nvlist_t **child; 2375 nvlist_t *ret; 2376 uint64_t is_log; 2377 char *srchkey; 2378 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 2379 2380 /* Nothing to look for */ 2381 if (search == NULL || pair == NULL) 2382 return (NULL); 2383 2384 /* Obtain the key we will use to search */ 2385 srchkey = nvpair_name(pair); 2386 2387 switch (nvpair_type(pair)) { 2388 case DATA_TYPE_UINT64: 2389 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 2390 uint64_t srchval, theguid; 2391 2392 verify(nvpair_value_uint64(pair, &srchval) == 0); 2393 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2394 &theguid) == 0); 2395 if (theguid == srchval) 2396 return (nv); 2397 } 2398 break; 2399 2400 case DATA_TYPE_STRING: { 2401 char *srchval, *val; 2402 2403 verify(nvpair_value_string(pair, &srchval) == 0); 2404 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 2405 break; 2406 2407 /* 2408 * Search for the requested value. Special cases: 2409 * 2410 * - ZPOOL_CONFIG_PATH for whole disk entries. To support 2411 * UEFI boot, these end in "s0" or "s0/old" or "s1" or 2412 * "s1/old". The "s0" or "s1" part is hidden from the user, 2413 * but included in the string, so this matches around it. 2414 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2415 * 2416 * Otherwise, all other searches are simple string compares. 2417 */ 2418 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 2419 ctd_check_path(val)) { 2420 uint64_t wholedisk = 0; 2421 2422 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2423 &wholedisk); 2424 if (wholedisk) { 2425 int slen = strlen(srchval); 2426 int vlen = strlen(val); 2427 2428 if (slen != vlen - 2) 2429 break; 2430 2431 /* 2432 * make_leaf_vdev() should only set 2433 * wholedisk for ZPOOL_CONFIG_PATHs which 2434 * will include "/dev/dsk/", giving plenty of 2435 * room for the indices used next. 2436 */ 2437 ASSERT(vlen >= 6); 2438 2439 /* 2440 * strings identical except trailing "s0" 2441 */ 2442 if ((strcmp(&val[vlen - 2], "s0") == 0 || 2443 strcmp(&val[vlen - 2], "s1") == 0) && 2444 strncmp(srchval, val, slen) == 0) 2445 return (nv); 2446 2447 /* 2448 * strings identical except trailing "s0/old" 2449 */ 2450 if ((strcmp(&val[vlen - 6], "s0/old") == 0 || 2451 strcmp(&val[vlen - 6], "s1/old") == 0) && 2452 strcmp(&srchval[slen - 4], "/old") == 0 && 2453 strncmp(srchval, val, slen - 4) == 0) 2454 return (nv); 2455 2456 break; 2457 } 2458 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2459 char *type, *idx, *end, *p; 2460 uint64_t id, vdev_id; 2461 2462 /* 2463 * Determine our vdev type, keeping in mind 2464 * that the srchval is composed of a type and 2465 * vdev id pair (i.e. mirror-4). 2466 */ 2467 if ((type = strdup(srchval)) == NULL) 2468 return (NULL); 2469 2470 if ((p = strrchr(type, '-')) == NULL) { 2471 free(type); 2472 break; 2473 } 2474 idx = p + 1; 2475 *p = '\0'; 2476 2477 /* 2478 * If the types don't match then keep looking. 2479 */ 2480 if (strncmp(val, type, strlen(val)) != 0) { 2481 free(type); 2482 break; 2483 } 2484 2485 verify(zpool_vdev_is_interior(type)); 2486 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2487 &id) == 0); 2488 2489 errno = 0; 2490 vdev_id = strtoull(idx, &end, 10); 2491 2492 free(type); 2493 if (errno != 0) 2494 return (NULL); 2495 2496 /* 2497 * Now verify that we have the correct vdev id. 2498 */ 2499 if (vdev_id == id) 2500 return (nv); 2501 } 2502 2503 /* 2504 * Common case 2505 */ 2506 if (strcmp(srchval, val) == 0) 2507 return (nv); 2508 break; 2509 } 2510 2511 default: 2512 break; 2513 } 2514 2515 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2516 &child, &children) != 0) 2517 return (NULL); 2518 2519 for (c = 0; c < children; c++) { 2520 if ((ret = vdev_to_nvlist_iter(child[c], search, 2521 avail_spare, l2cache, NULL)) != NULL) { 2522 /* 2523 * The 'is_log' value is only set for the toplevel 2524 * vdev, not the leaf vdevs. So we always lookup the 2525 * log device from the root of the vdev tree (where 2526 * 'log' is non-NULL). 2527 */ 2528 if (log != NULL && 2529 nvlist_lookup_uint64(child[c], 2530 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2531 is_log) { 2532 *log = B_TRUE; 2533 } 2534 return (ret); 2535 } 2536 } 2537 2538 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2539 &child, &children) == 0) { 2540 for (c = 0; c < children; c++) { 2541 if ((ret = vdev_to_nvlist_iter(child[c], search, 2542 avail_spare, l2cache, NULL)) != NULL) { 2543 *avail_spare = B_TRUE; 2544 return (ret); 2545 } 2546 } 2547 } 2548 2549 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2550 &child, &children) == 0) { 2551 for (c = 0; c < children; c++) { 2552 if ((ret = vdev_to_nvlist_iter(child[c], search, 2553 avail_spare, l2cache, NULL)) != NULL) { 2554 *l2cache = B_TRUE; 2555 return (ret); 2556 } 2557 } 2558 } 2559 2560 return (NULL); 2561 } 2562 2563 /* 2564 * Given a physical path (minus the "/devices" prefix), find the 2565 * associated vdev. 2566 */ 2567 nvlist_t * 2568 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2569 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2570 { 2571 nvlist_t *search, *nvroot, *ret; 2572 2573 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2574 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2575 2576 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2577 &nvroot) == 0); 2578 2579 *avail_spare = B_FALSE; 2580 *l2cache = B_FALSE; 2581 if (log != NULL) 2582 *log = B_FALSE; 2583 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2584 nvlist_free(search); 2585 2586 return (ret); 2587 } 2588 2589 /* 2590 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2591 */ 2592 static boolean_t 2593 zpool_vdev_is_interior(const char *name) 2594 { 2595 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2596 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || 2597 strncmp(name, 2598 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || 2599 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2600 return (B_TRUE); 2601 return (B_FALSE); 2602 } 2603 2604 nvlist_t * 2605 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2606 boolean_t *l2cache, boolean_t *log) 2607 { 2608 char buf[MAXPATHLEN]; 2609 char *end; 2610 nvlist_t *nvroot, *search, *ret; 2611 uint64_t guid; 2612 2613 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2614 2615 guid = strtoull(path, &end, 10); 2616 if (guid != 0 && *end == '\0') { 2617 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2618 } else if (zpool_vdev_is_interior(path)) { 2619 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2620 } else if (path[0] != '/') { 2621 (void) snprintf(buf, sizeof (buf), "%s/%s", ZFS_DISK_ROOT, 2622 path); 2623 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2624 } else { 2625 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2626 } 2627 2628 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2629 &nvroot) == 0); 2630 2631 *avail_spare = B_FALSE; 2632 *l2cache = B_FALSE; 2633 if (log != NULL) 2634 *log = B_FALSE; 2635 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2636 nvlist_free(search); 2637 2638 return (ret); 2639 } 2640 2641 static int 2642 vdev_is_online(nvlist_t *nv) 2643 { 2644 uint64_t ival; 2645 2646 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2647 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2648 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2649 return (0); 2650 2651 return (1); 2652 } 2653 2654 /* 2655 * Helper function for zpool_get_physpaths(). 2656 */ 2657 static int 2658 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2659 size_t *bytes_written) 2660 { 2661 size_t bytes_left, pos, rsz; 2662 char *tmppath; 2663 const char *format; 2664 2665 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2666 &tmppath) != 0) 2667 return (EZFS_NODEVICE); 2668 2669 pos = *bytes_written; 2670 bytes_left = physpath_size - pos; 2671 format = (pos == 0) ? "%s" : " %s"; 2672 2673 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2674 *bytes_written += rsz; 2675 2676 if (rsz >= bytes_left) { 2677 /* if physpath was not copied properly, clear it */ 2678 if (bytes_left != 0) { 2679 physpath[pos] = 0; 2680 } 2681 return (EZFS_NOSPC); 2682 } 2683 return (0); 2684 } 2685 2686 static int 2687 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2688 size_t *rsz, boolean_t is_spare) 2689 { 2690 char *type; 2691 int ret; 2692 2693 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2694 return (EZFS_INVALCONFIG); 2695 2696 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2697 /* 2698 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2699 * For a spare vdev, we only want to boot from the active 2700 * spare device. 2701 */ 2702 if (is_spare) { 2703 uint64_t spare = 0; 2704 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2705 &spare); 2706 if (!spare) 2707 return (EZFS_INVALCONFIG); 2708 } 2709 2710 if (vdev_is_online(nv)) { 2711 if ((ret = vdev_get_one_physpath(nv, physpath, 2712 phypath_size, rsz)) != 0) 2713 return (ret); 2714 } 2715 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2716 strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2717 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2718 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2719 nvlist_t **child; 2720 uint_t count; 2721 int i, ret; 2722 2723 if (nvlist_lookup_nvlist_array(nv, 2724 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2725 return (EZFS_INVALCONFIG); 2726 2727 for (i = 0; i < count; i++) { 2728 ret = vdev_get_physpaths(child[i], physpath, 2729 phypath_size, rsz, is_spare); 2730 if (ret == EZFS_NOSPC) 2731 return (ret); 2732 } 2733 } 2734 2735 return (EZFS_POOL_INVALARG); 2736 } 2737 2738 /* 2739 * Get phys_path for a root pool config. 2740 * Return 0 on success; non-zero on failure. 2741 */ 2742 static int 2743 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2744 { 2745 size_t rsz; 2746 nvlist_t *vdev_root; 2747 nvlist_t **child; 2748 uint_t count; 2749 char *type; 2750 2751 rsz = 0; 2752 2753 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2754 &vdev_root) != 0) 2755 return (EZFS_INVALCONFIG); 2756 2757 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2758 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2759 &child, &count) != 0) 2760 return (EZFS_INVALCONFIG); 2761 2762 /* 2763 * root pool can only have a single top-level vdev. 2764 */ 2765 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2766 return (EZFS_POOL_INVALARG); 2767 2768 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2769 B_FALSE); 2770 2771 /* No online devices */ 2772 if (rsz == 0) 2773 return (EZFS_NODEVICE); 2774 2775 return (0); 2776 } 2777 2778 /* 2779 * Get phys_path for a root pool 2780 * Return 0 on success; non-zero on failure. 2781 */ 2782 int 2783 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2784 { 2785 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2786 phypath_size)); 2787 } 2788 2789 /* 2790 * If the device has being dynamically expanded then we need to relabel 2791 * the disk to use the new unallocated space. 2792 */ 2793 static int 2794 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2795 { 2796 char path[MAXPATHLEN]; 2797 char errbuf[1024]; 2798 int fd, error; 2799 int (*_efi_use_whole_disk)(int); 2800 2801 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2802 "efi_use_whole_disk")) == NULL) 2803 return (-1); 2804 2805 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name); 2806 2807 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2808 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2809 "relabel '%s': unable to open device"), name); 2810 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2811 } 2812 2813 /* 2814 * It's possible that we might encounter an error if the device 2815 * does not have any unallocated space left. If so, we simply 2816 * ignore that error and continue on. 2817 */ 2818 error = _efi_use_whole_disk(fd); 2819 (void) close(fd); 2820 if (error && error != VT_ENOSPC) { 2821 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2822 "relabel '%s': unable to read disk capacity"), name); 2823 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2824 } 2825 return (0); 2826 } 2827 2828 /* 2829 * Bring the specified vdev online. The 'flags' parameter is a set of the 2830 * ZFS_ONLINE_* flags. 2831 */ 2832 int 2833 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2834 vdev_state_t *newstate) 2835 { 2836 zfs_cmd_t zc = { 0 }; 2837 char msg[1024]; 2838 char *pathname; 2839 nvlist_t *tgt; 2840 boolean_t avail_spare, l2cache, islog; 2841 libzfs_handle_t *hdl = zhp->zpool_hdl; 2842 2843 if (flags & ZFS_ONLINE_EXPAND) { 2844 (void) snprintf(msg, sizeof (msg), 2845 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2846 } else { 2847 (void) snprintf(msg, sizeof (msg), 2848 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2849 } 2850 2851 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2852 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2853 &islog)) == NULL) 2854 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2855 2856 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2857 2858 if (avail_spare) 2859 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2860 2861 if ((flags & ZFS_ONLINE_EXPAND || 2862 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) && 2863 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) { 2864 uint64_t wholedisk = 0; 2865 2866 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2867 &wholedisk); 2868 2869 /* 2870 * XXX - L2ARC 1.0 devices can't support expansion. 2871 */ 2872 if (l2cache) { 2873 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2874 "cannot expand cache devices")); 2875 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2876 } 2877 2878 if (wholedisk) { 2879 pathname += strlen(ZFS_DISK_ROOT) + 1; 2880 (void) zpool_relabel_disk(hdl, pathname); 2881 } 2882 } 2883 2884 zc.zc_cookie = VDEV_STATE_ONLINE; 2885 zc.zc_obj = flags; 2886 2887 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2888 if (errno == EINVAL) { 2889 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2890 "from this pool into a new one. Use '%s' " 2891 "instead"), "zpool detach"); 2892 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2893 } 2894 return (zpool_standard_error(hdl, errno, msg)); 2895 } 2896 2897 *newstate = zc.zc_cookie; 2898 return (0); 2899 } 2900 2901 /* 2902 * Take the specified vdev offline 2903 */ 2904 int 2905 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2906 { 2907 zfs_cmd_t zc = { 0 }; 2908 char msg[1024]; 2909 nvlist_t *tgt; 2910 boolean_t avail_spare, l2cache; 2911 libzfs_handle_t *hdl = zhp->zpool_hdl; 2912 2913 (void) snprintf(msg, sizeof (msg), 2914 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2915 2916 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2917 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2918 NULL)) == NULL) 2919 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2920 2921 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2922 2923 if (avail_spare) 2924 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2925 2926 zc.zc_cookie = VDEV_STATE_OFFLINE; 2927 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2928 2929 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2930 return (0); 2931 2932 switch (errno) { 2933 case EBUSY: 2934 2935 /* 2936 * There are no other replicas of this device. 2937 */ 2938 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2939 2940 case EEXIST: 2941 /* 2942 * The log device has unplayed logs 2943 */ 2944 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2945 2946 default: 2947 return (zpool_standard_error(hdl, errno, msg)); 2948 } 2949 } 2950 2951 /* 2952 * Mark the given vdev faulted. 2953 */ 2954 int 2955 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2956 { 2957 zfs_cmd_t zc = { 0 }; 2958 char msg[1024]; 2959 libzfs_handle_t *hdl = zhp->zpool_hdl; 2960 2961 (void) snprintf(msg, sizeof (msg), 2962 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2963 2964 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2965 zc.zc_guid = guid; 2966 zc.zc_cookie = VDEV_STATE_FAULTED; 2967 zc.zc_obj = aux; 2968 2969 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2970 return (0); 2971 2972 switch (errno) { 2973 case EBUSY: 2974 2975 /* 2976 * There are no other replicas of this device. 2977 */ 2978 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2979 2980 default: 2981 return (zpool_standard_error(hdl, errno, msg)); 2982 } 2983 2984 } 2985 2986 /* 2987 * Mark the given vdev degraded. 2988 */ 2989 int 2990 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2991 { 2992 zfs_cmd_t zc = { 0 }; 2993 char msg[1024]; 2994 libzfs_handle_t *hdl = zhp->zpool_hdl; 2995 2996 (void) snprintf(msg, sizeof (msg), 2997 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2998 2999 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3000 zc.zc_guid = guid; 3001 zc.zc_cookie = VDEV_STATE_DEGRADED; 3002 zc.zc_obj = aux; 3003 3004 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 3005 return (0); 3006 3007 return (zpool_standard_error(hdl, errno, msg)); 3008 } 3009 3010 /* 3011 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 3012 * a hot spare. 3013 */ 3014 static boolean_t 3015 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 3016 { 3017 nvlist_t **child; 3018 uint_t c, children; 3019 char *type; 3020 3021 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 3022 &children) == 0) { 3023 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 3024 &type) == 0); 3025 3026 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 3027 children == 2 && child[which] == tgt) 3028 return (B_TRUE); 3029 3030 for (c = 0; c < children; c++) 3031 if (is_replacing_spare(child[c], tgt, which)) 3032 return (B_TRUE); 3033 } 3034 3035 return (B_FALSE); 3036 } 3037 3038 /* 3039 * Attach new_disk (fully described by nvroot) to old_disk. 3040 * If 'replacing' is specified, the new disk will replace the old one. 3041 */ 3042 int 3043 zpool_vdev_attach(zpool_handle_t *zhp, 3044 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 3045 { 3046 zfs_cmd_t zc = { 0 }; 3047 char msg[1024]; 3048 int ret; 3049 nvlist_t *tgt; 3050 boolean_t avail_spare, l2cache, islog; 3051 uint64_t val; 3052 char *newname; 3053 nvlist_t **child; 3054 uint_t children; 3055 nvlist_t *config_root; 3056 libzfs_handle_t *hdl = zhp->zpool_hdl; 3057 boolean_t rootpool = zpool_is_bootable(zhp); 3058 3059 if (replacing) 3060 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 3061 "cannot replace %s with %s"), old_disk, new_disk); 3062 else 3063 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 3064 "cannot attach %s to %s"), new_disk, old_disk); 3065 3066 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3067 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 3068 &islog)) == NULL) 3069 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3070 3071 if (avail_spare) 3072 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3073 3074 if (l2cache) 3075 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3076 3077 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3078 zc.zc_cookie = replacing; 3079 3080 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3081 &child, &children) != 0 || children != 1) { 3082 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3083 "new device must be a single disk")); 3084 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 3085 } 3086 3087 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 3088 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 3089 3090 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL) 3091 return (-1); 3092 3093 /* 3094 * If the target is a hot spare that has been swapped in, we can only 3095 * replace it with another hot spare. 3096 */ 3097 if (replacing && 3098 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 3099 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 3100 NULL) == NULL || !avail_spare) && 3101 is_replacing_spare(config_root, tgt, 1)) { 3102 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3103 "can only be replaced by another hot spare")); 3104 free(newname); 3105 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 3106 } 3107 3108 free(newname); 3109 3110 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 3111 return (-1); 3112 3113 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 3114 3115 zcmd_free_nvlists(&zc); 3116 3117 if (ret == 0) { 3118 if (rootpool) { 3119 /* 3120 * XXX need a better way to prevent user from 3121 * booting up a half-baked vdev. 3122 */ 3123 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 3124 "sure to wait until resilver is done " 3125 "before rebooting.\n")); 3126 } 3127 return (0); 3128 } 3129 3130 switch (errno) { 3131 case ENOTSUP: 3132 /* 3133 * Can't attach to or replace this type of vdev. 3134 */ 3135 if (replacing) { 3136 uint64_t version = zpool_get_prop_int(zhp, 3137 ZPOOL_PROP_VERSION, NULL); 3138 3139 if (islog) 3140 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3141 "cannot replace a log with a spare")); 3142 else if (version >= SPA_VERSION_MULTI_REPLACE) 3143 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3144 "already in replacing/spare config; wait " 3145 "for completion or use 'zpool detach'")); 3146 else 3147 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3148 "cannot replace a replacing device")); 3149 } else { 3150 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3151 "can only attach to mirrors and top-level " 3152 "disks")); 3153 } 3154 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3155 break; 3156 3157 case EINVAL: 3158 /* 3159 * The new device must be a single disk. 3160 */ 3161 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3162 "new device must be a single disk")); 3163 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3164 break; 3165 3166 case EBUSY: 3167 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, " 3168 "or device removal is in progress"), 3169 new_disk); 3170 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3171 break; 3172 3173 case EOVERFLOW: 3174 /* 3175 * The new device is too small. 3176 */ 3177 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3178 "device is too small")); 3179 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3180 break; 3181 3182 case EDOM: 3183 /* 3184 * The new device has a different optimal sector size. 3185 */ 3186 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3187 "new device has a different optimal sector size; use the " 3188 "option '-o ashift=N' to override the optimal size")); 3189 (void) zfs_error(hdl, EZFS_BADDEV, msg); 3190 break; 3191 3192 case ENAMETOOLONG: 3193 /* 3194 * The resulting top-level vdev spec won't fit in the label. 3195 */ 3196 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 3197 break; 3198 3199 default: 3200 (void) zpool_standard_error(hdl, errno, msg); 3201 } 3202 3203 return (-1); 3204 } 3205 3206 /* 3207 * Detach the specified device. 3208 */ 3209 int 3210 zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 3211 { 3212 zfs_cmd_t zc = { 0 }; 3213 char msg[1024]; 3214 nvlist_t *tgt; 3215 boolean_t avail_spare, l2cache; 3216 libzfs_handle_t *hdl = zhp->zpool_hdl; 3217 3218 (void) snprintf(msg, sizeof (msg), 3219 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 3220 3221 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3222 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3223 NULL)) == NULL) 3224 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3225 3226 if (avail_spare) 3227 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3228 3229 if (l2cache) 3230 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 3231 3232 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3233 3234 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 3235 return (0); 3236 3237 switch (errno) { 3238 3239 case ENOTSUP: 3240 /* 3241 * Can't detach from this type of vdev. 3242 */ 3243 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 3244 "applicable to mirror and replacing vdevs")); 3245 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 3246 break; 3247 3248 case EBUSY: 3249 /* 3250 * There are no other replicas of this device. 3251 */ 3252 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 3253 break; 3254 3255 default: 3256 (void) zpool_standard_error(hdl, errno, msg); 3257 } 3258 3259 return (-1); 3260 } 3261 3262 /* 3263 * Find a mirror vdev in the source nvlist. 3264 * 3265 * The mchild array contains a list of disks in one of the top-level mirrors 3266 * of the source pool. The schild array contains a list of disks that the 3267 * user specified on the command line. We loop over the mchild array to 3268 * see if any entry in the schild array matches. 3269 * 3270 * If a disk in the mchild array is found in the schild array, we return 3271 * the index of that entry. Otherwise we return -1. 3272 */ 3273 static int 3274 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 3275 nvlist_t **schild, uint_t schildren) 3276 { 3277 uint_t mc; 3278 3279 for (mc = 0; mc < mchildren; mc++) { 3280 uint_t sc; 3281 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3282 mchild[mc], 0); 3283 3284 for (sc = 0; sc < schildren; sc++) { 3285 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 3286 schild[sc], 0); 3287 boolean_t result = (strcmp(mpath, spath) == 0); 3288 3289 free(spath); 3290 if (result) { 3291 free(mpath); 3292 return (mc); 3293 } 3294 } 3295 3296 free(mpath); 3297 } 3298 3299 return (-1); 3300 } 3301 3302 /* 3303 * Split a mirror pool. If newroot points to null, then a new nvlist 3304 * is generated and it is the responsibility of the caller to free it. 3305 */ 3306 int 3307 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 3308 nvlist_t *props, splitflags_t flags) 3309 { 3310 zfs_cmd_t zc = { 0 }; 3311 char msg[1024]; 3312 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 3313 nvlist_t **varray = NULL, *zc_props = NULL; 3314 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 3315 libzfs_handle_t *hdl = zhp->zpool_hdl; 3316 uint64_t vers; 3317 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 3318 int retval = 0; 3319 3320 (void) snprintf(msg, sizeof (msg), 3321 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 3322 3323 if (!zpool_name_valid(hdl, B_FALSE, newname)) 3324 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 3325 3326 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 3327 (void) fprintf(stderr, gettext("Internal error: unable to " 3328 "retrieve pool configuration\n")); 3329 return (-1); 3330 } 3331 3332 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 3333 == 0); 3334 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 3335 3336 if (props) { 3337 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 3338 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 3339 props, vers, flags, msg)) == NULL) 3340 return (-1); 3341 } 3342 3343 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 3344 &children) != 0) { 3345 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3346 "Source pool is missing vdev tree")); 3347 nvlist_free(zc_props); 3348 return (-1); 3349 } 3350 3351 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 3352 vcount = 0; 3353 3354 if (*newroot == NULL || 3355 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 3356 &newchild, &newchildren) != 0) 3357 newchildren = 0; 3358 3359 for (c = 0; c < children; c++) { 3360 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 3361 char *type; 3362 nvlist_t **mchild, *vdev; 3363 uint_t mchildren; 3364 int entry; 3365 3366 /* 3367 * Unlike cache & spares, slogs are stored in the 3368 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 3369 */ 3370 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3371 &is_log); 3372 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3373 &is_hole); 3374 if (is_log || is_hole) { 3375 /* 3376 * Create a hole vdev and put it in the config. 3377 */ 3378 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 3379 goto out; 3380 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 3381 VDEV_TYPE_HOLE) != 0) 3382 goto out; 3383 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 3384 1) != 0) 3385 goto out; 3386 if (lastlog == 0) 3387 lastlog = vcount; 3388 varray[vcount++] = vdev; 3389 continue; 3390 } 3391 lastlog = 0; 3392 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 3393 == 0); 3394 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 3395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3396 "Source pool must be composed only of mirrors\n")); 3397 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3398 goto out; 3399 } 3400 3401 verify(nvlist_lookup_nvlist_array(child[c], 3402 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3403 3404 /* find or add an entry for this top-level vdev */ 3405 if (newchildren > 0 && 3406 (entry = find_vdev_entry(zhp, mchild, mchildren, 3407 newchild, newchildren)) >= 0) { 3408 /* We found a disk that the user specified. */ 3409 vdev = mchild[entry]; 3410 ++found; 3411 } else { 3412 /* User didn't specify a disk for this vdev. */ 3413 vdev = mchild[mchildren - 1]; 3414 } 3415 3416 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3417 goto out; 3418 } 3419 3420 /* did we find every disk the user specified? */ 3421 if (found != newchildren) { 3422 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3423 "include at most one disk from each mirror")); 3424 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3425 goto out; 3426 } 3427 3428 /* Prepare the nvlist for populating. */ 3429 if (*newroot == NULL) { 3430 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3431 goto out; 3432 freelist = B_TRUE; 3433 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3434 VDEV_TYPE_ROOT) != 0) 3435 goto out; 3436 } else { 3437 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3438 } 3439 3440 /* Add all the children we found */ 3441 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3442 lastlog == 0 ? vcount : lastlog) != 0) 3443 goto out; 3444 3445 /* 3446 * If we're just doing a dry run, exit now with success. 3447 */ 3448 if (flags.dryrun) { 3449 memory_err = B_FALSE; 3450 freelist = B_FALSE; 3451 goto out; 3452 } 3453 3454 /* now build up the config list & call the ioctl */ 3455 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3456 goto out; 3457 3458 if (nvlist_add_nvlist(newconfig, 3459 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3460 nvlist_add_string(newconfig, 3461 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3462 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3463 goto out; 3464 3465 /* 3466 * The new pool is automatically part of the namespace unless we 3467 * explicitly export it. 3468 */ 3469 if (!flags.import) 3470 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3471 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3472 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3473 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3474 goto out; 3475 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3476 goto out; 3477 3478 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3479 retval = zpool_standard_error(hdl, errno, msg); 3480 goto out; 3481 } 3482 3483 freelist = B_FALSE; 3484 memory_err = B_FALSE; 3485 3486 out: 3487 if (varray != NULL) { 3488 int v; 3489 3490 for (v = 0; v < vcount; v++) 3491 nvlist_free(varray[v]); 3492 free(varray); 3493 } 3494 zcmd_free_nvlists(&zc); 3495 nvlist_free(zc_props); 3496 nvlist_free(newconfig); 3497 if (freelist) { 3498 nvlist_free(*newroot); 3499 *newroot = NULL; 3500 } 3501 3502 if (retval != 0) 3503 return (retval); 3504 3505 if (memory_err) 3506 return (no_memory(hdl)); 3507 3508 return (0); 3509 } 3510 3511 /* 3512 * Remove the given device. 3513 */ 3514 int 3515 zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3516 { 3517 zfs_cmd_t zc = { 0 }; 3518 char msg[1024]; 3519 nvlist_t *tgt; 3520 boolean_t avail_spare, l2cache, islog; 3521 libzfs_handle_t *hdl = zhp->zpool_hdl; 3522 uint64_t version; 3523 3524 (void) snprintf(msg, sizeof (msg), 3525 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3526 3527 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3528 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3529 &islog)) == NULL) 3530 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3531 3532 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3533 if (islog && version < SPA_VERSION_HOLES) { 3534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3535 "pool must be upgraded to support log removal")); 3536 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3537 } 3538 3539 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3540 3541 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3542 return (0); 3543 3544 switch (errno) { 3545 3546 case EINVAL: 3547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3548 "invalid config; all top-level vdevs must " 3549 "have the same sector size and not be raidz.")); 3550 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3551 break; 3552 3553 case EBUSY: 3554 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3555 "Pool busy; removal may already be in progress")); 3556 (void) zfs_error(hdl, EZFS_BUSY, msg); 3557 break; 3558 3559 default: 3560 (void) zpool_standard_error(hdl, errno, msg); 3561 } 3562 return (-1); 3563 } 3564 3565 int 3566 zpool_vdev_remove_cancel(zpool_handle_t *zhp) 3567 { 3568 zfs_cmd_t zc = { 0 }; 3569 char msg[1024]; 3570 libzfs_handle_t *hdl = zhp->zpool_hdl; 3571 3572 (void) snprintf(msg, sizeof (msg), 3573 dgettext(TEXT_DOMAIN, "cannot cancel removal")); 3574 3575 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3576 zc.zc_cookie = 1; 3577 3578 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3579 return (0); 3580 3581 return (zpool_standard_error(hdl, errno, msg)); 3582 } 3583 3584 int 3585 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path, 3586 uint64_t *sizep) 3587 { 3588 char msg[1024]; 3589 nvlist_t *tgt; 3590 boolean_t avail_spare, l2cache, islog; 3591 libzfs_handle_t *hdl = zhp->zpool_hdl; 3592 3593 (void) snprintf(msg, sizeof (msg), 3594 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"), 3595 path); 3596 3597 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3598 &islog)) == NULL) 3599 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3600 3601 if (avail_spare || l2cache || islog) { 3602 *sizep = 0; 3603 return (0); 3604 } 3605 3606 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) { 3607 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3608 "indirect size not available")); 3609 return (zfs_error(hdl, EINVAL, msg)); 3610 } 3611 return (0); 3612 } 3613 3614 /* 3615 * Clear the errors for the pool, or the particular device if specified. 3616 */ 3617 int 3618 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3619 { 3620 zfs_cmd_t zc = { 0 }; 3621 char msg[1024]; 3622 nvlist_t *tgt; 3623 zpool_load_policy_t policy; 3624 boolean_t avail_spare, l2cache; 3625 libzfs_handle_t *hdl = zhp->zpool_hdl; 3626 nvlist_t *nvi = NULL; 3627 int error; 3628 3629 if (path) 3630 (void) snprintf(msg, sizeof (msg), 3631 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3632 path); 3633 else 3634 (void) snprintf(msg, sizeof (msg), 3635 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3636 zhp->zpool_name); 3637 3638 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3639 if (path) { 3640 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3641 &l2cache, NULL)) == NULL) 3642 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3643 3644 /* 3645 * Don't allow error clearing for hot spares. Do allow 3646 * error clearing for l2cache devices. 3647 */ 3648 if (avail_spare) 3649 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3650 3651 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3652 &zc.zc_guid) == 0); 3653 } 3654 3655 zpool_get_load_policy(rewindnvl, &policy); 3656 zc.zc_cookie = policy.zlp_rewind; 3657 3658 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3659 return (-1); 3660 3661 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3662 return (-1); 3663 3664 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3665 errno == ENOMEM) { 3666 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3667 zcmd_free_nvlists(&zc); 3668 return (-1); 3669 } 3670 } 3671 3672 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) && 3673 errno != EPERM && errno != EACCES)) { 3674 if (policy.zlp_rewind & 3675 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3676 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3677 zpool_rewind_exclaim(hdl, zc.zc_name, 3678 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), 3679 nvi); 3680 nvlist_free(nvi); 3681 } 3682 zcmd_free_nvlists(&zc); 3683 return (0); 3684 } 3685 3686 zcmd_free_nvlists(&zc); 3687 return (zpool_standard_error(hdl, errno, msg)); 3688 } 3689 3690 /* 3691 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3692 */ 3693 int 3694 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3695 { 3696 zfs_cmd_t zc = { 0 }; 3697 char msg[1024]; 3698 libzfs_handle_t *hdl = zhp->zpool_hdl; 3699 3700 (void) snprintf(msg, sizeof (msg), 3701 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3702 guid); 3703 3704 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3705 zc.zc_guid = guid; 3706 zc.zc_cookie = ZPOOL_NO_REWIND; 3707 3708 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3709 return (0); 3710 3711 return (zpool_standard_error(hdl, errno, msg)); 3712 } 3713 3714 /* 3715 * Change the GUID for a pool. 3716 */ 3717 int 3718 zpool_reguid(zpool_handle_t *zhp) 3719 { 3720 char msg[1024]; 3721 libzfs_handle_t *hdl = zhp->zpool_hdl; 3722 zfs_cmd_t zc = { 0 }; 3723 3724 (void) snprintf(msg, sizeof (msg), 3725 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3726 3727 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3728 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3729 return (0); 3730 3731 return (zpool_standard_error(hdl, errno, msg)); 3732 } 3733 3734 /* 3735 * Reopen the pool. 3736 */ 3737 int 3738 zpool_reopen(zpool_handle_t *zhp) 3739 { 3740 zfs_cmd_t zc = { 0 }; 3741 char msg[1024]; 3742 libzfs_handle_t *hdl = zhp->zpool_hdl; 3743 3744 (void) snprintf(msg, sizeof (msg), 3745 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3746 zhp->zpool_name); 3747 3748 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3749 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3750 return (0); 3751 return (zpool_standard_error(hdl, errno, msg)); 3752 } 3753 3754 /* call into libzfs_core to execute the sync IOCTL per pool */ 3755 int 3756 zpool_sync_one(zpool_handle_t *zhp, void *data) 3757 { 3758 int ret; 3759 libzfs_handle_t *hdl = zpool_get_handle(zhp); 3760 const char *pool_name = zpool_get_name(zhp); 3761 boolean_t *force = data; 3762 nvlist_t *innvl = fnvlist_alloc(); 3763 3764 fnvlist_add_boolean_value(innvl, "force", *force); 3765 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) { 3766 nvlist_free(innvl); 3767 return (zpool_standard_error_fmt(hdl, ret, 3768 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name)); 3769 } 3770 nvlist_free(innvl); 3771 3772 return (0); 3773 } 3774 3775 /* 3776 * Convert from a devid string to a path. 3777 */ 3778 static char * 3779 devid_to_path(char *devid_str) 3780 { 3781 ddi_devid_t devid; 3782 char *minor; 3783 char *path; 3784 devid_nmlist_t *list = NULL; 3785 int ret; 3786 3787 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3788 return (NULL); 3789 3790 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3791 3792 devid_str_free(minor); 3793 devid_free(devid); 3794 3795 if (ret != 0) 3796 return (NULL); 3797 3798 /* 3799 * In a case the strdup() fails, we will just return NULL below. 3800 */ 3801 path = strdup(list[0].devname); 3802 3803 devid_free_nmlist(list); 3804 3805 return (path); 3806 } 3807 3808 /* 3809 * Convert from a path to a devid string. 3810 */ 3811 static char * 3812 path_to_devid(const char *path) 3813 { 3814 int fd; 3815 ddi_devid_t devid; 3816 char *minor, *ret; 3817 3818 if ((fd = open(path, O_RDONLY)) < 0) 3819 return (NULL); 3820 3821 minor = NULL; 3822 ret = NULL; 3823 if (devid_get(fd, &devid) == 0) { 3824 if (devid_get_minor_name(fd, &minor) == 0) 3825 ret = devid_str_encode(devid, minor); 3826 if (minor != NULL) 3827 devid_str_free(minor); 3828 devid_free(devid); 3829 } 3830 (void) close(fd); 3831 3832 return (ret); 3833 } 3834 3835 struct path_from_physpath_walker_args { 3836 char *pfpwa_path; 3837 }; 3838 3839 /* 3840 * Walker for use with di_devlink_walk(). Stores the "/dev" path of the first 3841 * primary devlink (i.e., the first devlink which refers to our "/devices" 3842 * node) and stops walking. 3843 */ 3844 static int 3845 path_from_physpath_walker(di_devlink_t devlink, void *arg) 3846 { 3847 struct path_from_physpath_walker_args *pfpwa = arg; 3848 3849 if (di_devlink_type(devlink) != DI_PRIMARY_LINK) { 3850 return (DI_WALK_CONTINUE); 3851 } 3852 3853 verify(pfpwa->pfpwa_path == NULL); 3854 if ((pfpwa->pfpwa_path = strdup(di_devlink_path(devlink))) != NULL) { 3855 return (DI_WALK_TERMINATE); 3856 } 3857 3858 return (DI_WALK_CONTINUE); 3859 } 3860 3861 /* 3862 * Search for a "/dev" path that refers to our physical path. Returns the new 3863 * path if one is found and it does not match the existing "path" value. If 3864 * the value is unchanged, or one could not be found, returns NULL. 3865 */ 3866 static char * 3867 path_from_physpath(libzfs_handle_t *hdl, const char *path, 3868 const char *physpath) 3869 { 3870 struct path_from_physpath_walker_args pfpwa; 3871 3872 if (physpath == NULL) { 3873 return (NULL); 3874 } 3875 3876 if (hdl->libzfs_devlink == NULL) { 3877 if ((hdl->libzfs_devlink = di_devlink_init(NULL, 0)) == 3878 DI_LINK_NIL) { 3879 /* 3880 * We may not be able to open a handle if this process 3881 * is insufficiently privileged, or we are too early in 3882 * boot for devfsadm to be ready. Ignore this error 3883 * and defer the path check to a subsequent run. 3884 */ 3885 return (NULL); 3886 } 3887 } 3888 3889 pfpwa.pfpwa_path = NULL; 3890 (void) di_devlink_walk(hdl->libzfs_devlink, NULL, physpath, 3891 DI_PRIMARY_LINK, &pfpwa, path_from_physpath_walker); 3892 3893 if (path != NULL && pfpwa.pfpwa_path != NULL && 3894 strcmp(path, pfpwa.pfpwa_path) == 0) { 3895 /* 3896 * If the path is already correct, no change is required. 3897 */ 3898 free(pfpwa.pfpwa_path); 3899 return (NULL); 3900 } 3901 3902 return (pfpwa.pfpwa_path); 3903 } 3904 3905 /* 3906 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3907 * ignore any failure here, since a common case is for an unprivileged user to 3908 * type 'zpool status', and we'll display the correct information anyway. 3909 */ 3910 static void 3911 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3912 { 3913 zfs_cmd_t zc = { 0 }; 3914 3915 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3916 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3917 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3918 &zc.zc_guid) == 0); 3919 3920 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3921 } 3922 3923 /* 3924 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3925 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3926 * We also check if this is a whole disk, in which case we strip off the 3927 * trailing 's0' slice name. 3928 * 3929 * This routine is also responsible for identifying when disks have been 3930 * reconfigured in a new location. The kernel will have opened the device by 3931 * devid, but the path will still refer to the old location. To catch this, we 3932 * first do a path -> devid translation (which is fast for the common case). If 3933 * the devid matches, we're done. If not, we do a reverse devid -> path 3934 * translation and issue the appropriate ioctl() to update the path of the vdev. 3935 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3936 * of these checks. 3937 */ 3938 char * 3939 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3940 int name_flags) 3941 { 3942 char *path, *env; 3943 uint64_t value; 3944 char buf[64]; 3945 3946 env = getenv("ZPOOL_VDEV_NAME_PATH"); 3947 if (env && (strtoul(env, NULL, 0) > 0 || 3948 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 3949 name_flags |= VDEV_NAME_PATH; 3950 3951 env = getenv("ZPOOL_VDEV_NAME_GUID"); 3952 if (env && (strtoul(env, NULL, 0) > 0 || 3953 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 3954 name_flags |= VDEV_NAME_GUID; 3955 3956 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS"); 3957 if (env && (strtoul(env, NULL, 0) > 0 || 3958 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) 3959 name_flags |= VDEV_NAME_FOLLOW_LINKS; 3960 3961 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3962 name_flags & VDEV_NAME_GUID) { 3963 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value); 3964 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value); 3965 path = buf; 3966 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3967 vdev_stat_t *vs; 3968 uint_t vsc; 3969 char *newpath = NULL; 3970 char *physpath = NULL; 3971 char *devid = NULL; 3972 3973 /* 3974 * If the device is dead (faulted, offline, etc) then don't 3975 * bother opening it. Otherwise we may be forcing the user to 3976 * open a misbehaving device, which can have undesirable 3977 * effects. 3978 */ 3979 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3980 (uint64_t **)&vs, &vsc) != 0 || 3981 vs->vs_state < VDEV_STATE_DEGRADED || 3982 zhp == NULL) { 3983 goto after_open; 3984 } 3985 3986 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3987 /* 3988 * This vdev has a devid. We can use it to check the 3989 * current path. 3990 */ 3991 char *newdevid = path_to_devid(path); 3992 3993 if (newdevid == NULL || strcmp(devid, newdevid) != 0) { 3994 newpath = devid_to_path(devid); 3995 } 3996 3997 if (newdevid != NULL) 3998 devid_str_free(newdevid); 3999 4000 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 4001 &physpath) == 0) { 4002 /* 4003 * This vdev does not have a devid, but it does have a 4004 * physical path. Attempt to translate this to a /dev 4005 * path. 4006 */ 4007 newpath = path_from_physpath(hdl, path, physpath); 4008 } 4009 4010 if (newpath != NULL) { 4011 /* 4012 * Update the path appropriately. 4013 */ 4014 set_path(zhp, nv, newpath); 4015 if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, 4016 newpath) == 0) { 4017 verify(nvlist_lookup_string(nv, 4018 ZPOOL_CONFIG_PATH, &path) == 0); 4019 } 4020 free(newpath); 4021 } 4022 4023 if (name_flags & VDEV_NAME_FOLLOW_LINKS) { 4024 char *rp = realpath(path, NULL); 4025 if (rp) { 4026 strlcpy(buf, rp, sizeof (buf)); 4027 path = buf; 4028 free(rp); 4029 } 4030 } 4031 4032 after_open: 4033 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0) 4034 path += strlen(ZFS_DISK_ROOTD); 4035 4036 /* 4037 * Remove the partition from the path it this is a whole disk. 4038 */ 4039 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value) 4040 == 0 && value && !(name_flags & VDEV_NAME_PATH)) { 4041 int pathlen = strlen(path); 4042 char *tmp = zfs_strdup(hdl, path); 4043 4044 /* 4045 * If it starts with c#, and ends with "s0" or "s1", 4046 * chop the slice off, or if it ends with "s0/old" or 4047 * "s1/old", remove the slice from the middle. 4048 */ 4049 if (CTD_CHECK(tmp)) { 4050 if (strcmp(&tmp[pathlen - 2], "s0") == 0 || 4051 strcmp(&tmp[pathlen - 2], "s1") == 0) { 4052 tmp[pathlen - 2] = '\0'; 4053 } else if (pathlen > 6 && 4054 (strcmp(&tmp[pathlen - 6], "s0/old") == 0 || 4055 strcmp(&tmp[pathlen - 6], "s1/old") == 0)) { 4056 (void) strcpy(&tmp[pathlen - 6], 4057 "/old"); 4058 } 4059 } 4060 return (tmp); 4061 } 4062 } else { 4063 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 4064 4065 /* 4066 * If it's a raidz device, we need to stick in the parity level. 4067 */ 4068 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 4069 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4070 &value) == 0); 4071 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 4072 (u_longlong_t)value); 4073 path = buf; 4074 } 4075 4076 /* 4077 * We identify each top-level vdev by using a <type-id> 4078 * naming convention. 4079 */ 4080 if (name_flags & VDEV_NAME_TYPE_ID) { 4081 uint64_t id; 4082 4083 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 4084 &id) == 0); 4085 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 4086 (u_longlong_t)id); 4087 path = buf; 4088 } 4089 } 4090 4091 return (zfs_strdup(hdl, path)); 4092 } 4093 4094 static int 4095 zbookmark_mem_compare(const void *a, const void *b) 4096 { 4097 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 4098 } 4099 4100 /* 4101 * Retrieve the persistent error log, uniquify the members, and return to the 4102 * caller. 4103 */ 4104 int 4105 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 4106 { 4107 zfs_cmd_t zc = { 0 }; 4108 uint64_t count; 4109 zbookmark_phys_t *zb = NULL; 4110 int i; 4111 4112 /* 4113 * Retrieve the raw error list from the kernel. If the number of errors 4114 * has increased, allocate more space and continue until we get the 4115 * entire list. 4116 */ 4117 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 4118 &count) == 0); 4119 if (count == 0) 4120 return (0); 4121 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 4122 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 4123 return (-1); 4124 zc.zc_nvlist_dst_size = count; 4125 (void) strcpy(zc.zc_name, zhp->zpool_name); 4126 for (;;) { 4127 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 4128 &zc) != 0) { 4129 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4130 if (errno == ENOMEM) { 4131 void *dst; 4132 4133 count = zc.zc_nvlist_dst_size; 4134 dst = zfs_alloc(zhp->zpool_hdl, count * 4135 sizeof (zbookmark_phys_t)); 4136 if (dst == NULL) 4137 return (-1); 4138 zc.zc_nvlist_dst = (uintptr_t)dst; 4139 } else { 4140 return (-1); 4141 } 4142 } else { 4143 break; 4144 } 4145 } 4146 4147 /* 4148 * Sort the resulting bookmarks. This is a little confusing due to the 4149 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 4150 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 4151 * _not_ copied as part of the process. So we point the start of our 4152 * array appropriate and decrement the total number of elements. 4153 */ 4154 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 4155 zc.zc_nvlist_dst_size; 4156 count -= zc.zc_nvlist_dst_size; 4157 4158 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 4159 4160 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 4161 4162 /* 4163 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 4164 */ 4165 for (i = 0; i < count; i++) { 4166 nvlist_t *nv; 4167 4168 /* ignoring zb_blkid and zb_level for now */ 4169 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 4170 zb[i-1].zb_object == zb[i].zb_object) 4171 continue; 4172 4173 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 4174 goto nomem; 4175 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 4176 zb[i].zb_objset) != 0) { 4177 nvlist_free(nv); 4178 goto nomem; 4179 } 4180 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 4181 zb[i].zb_object) != 0) { 4182 nvlist_free(nv); 4183 goto nomem; 4184 } 4185 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 4186 nvlist_free(nv); 4187 goto nomem; 4188 } 4189 nvlist_free(nv); 4190 } 4191 4192 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4193 return (0); 4194 4195 nomem: 4196 free((void *)(uintptr_t)zc.zc_nvlist_dst); 4197 return (no_memory(zhp->zpool_hdl)); 4198 } 4199 4200 /* 4201 * Upgrade a ZFS pool to the latest on-disk version. 4202 */ 4203 int 4204 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 4205 { 4206 zfs_cmd_t zc = { 0 }; 4207 libzfs_handle_t *hdl = zhp->zpool_hdl; 4208 4209 (void) strcpy(zc.zc_name, zhp->zpool_name); 4210 zc.zc_cookie = new_version; 4211 4212 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 4213 return (zpool_standard_error_fmt(hdl, errno, 4214 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 4215 zhp->zpool_name)); 4216 return (0); 4217 } 4218 4219 void 4220 zfs_save_arguments(int argc, char **argv, char *string, int len) 4221 { 4222 (void) strlcpy(string, basename(argv[0]), len); 4223 for (int i = 1; i < argc; i++) { 4224 (void) strlcat(string, " ", len); 4225 (void) strlcat(string, argv[i], len); 4226 } 4227 } 4228 4229 int 4230 zpool_log_history(libzfs_handle_t *hdl, const char *message) 4231 { 4232 zfs_cmd_t zc = { 0 }; 4233 nvlist_t *args; 4234 int err; 4235 4236 args = fnvlist_alloc(); 4237 fnvlist_add_string(args, "message", message); 4238 err = zcmd_write_src_nvlist(hdl, &zc, args); 4239 if (err == 0) 4240 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 4241 nvlist_free(args); 4242 zcmd_free_nvlists(&zc); 4243 return (err); 4244 } 4245 4246 /* 4247 * Perform ioctl to get some command history of a pool. 4248 * 4249 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 4250 * logical offset of the history buffer to start reading from. 4251 * 4252 * Upon return, 'off' is the next logical offset to read from and 4253 * 'len' is the actual amount of bytes read into 'buf'. 4254 */ 4255 static int 4256 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 4257 { 4258 zfs_cmd_t zc = { 0 }; 4259 libzfs_handle_t *hdl = zhp->zpool_hdl; 4260 4261 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4262 4263 zc.zc_history = (uint64_t)(uintptr_t)buf; 4264 zc.zc_history_len = *len; 4265 zc.zc_history_offset = *off; 4266 4267 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 4268 switch (errno) { 4269 case EPERM: 4270 return (zfs_error_fmt(hdl, EZFS_PERM, 4271 dgettext(TEXT_DOMAIN, 4272 "cannot show history for pool '%s'"), 4273 zhp->zpool_name)); 4274 case ENOENT: 4275 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 4276 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4277 "'%s'"), zhp->zpool_name)); 4278 case ENOTSUP: 4279 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 4280 dgettext(TEXT_DOMAIN, "cannot get history for pool " 4281 "'%s', pool must be upgraded"), zhp->zpool_name)); 4282 default: 4283 return (zpool_standard_error_fmt(hdl, errno, 4284 dgettext(TEXT_DOMAIN, 4285 "cannot get history for '%s'"), zhp->zpool_name)); 4286 } 4287 } 4288 4289 *len = zc.zc_history_len; 4290 *off = zc.zc_history_offset; 4291 4292 return (0); 4293 } 4294 4295 /* 4296 * Process the buffer of nvlists, unpacking and storing each nvlist record 4297 * into 'records'. 'leftover' is set to the number of bytes that weren't 4298 * processed as there wasn't a complete record. 4299 */ 4300 int 4301 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 4302 nvlist_t ***records, uint_t *numrecords) 4303 { 4304 uint64_t reclen; 4305 nvlist_t *nv; 4306 int i; 4307 4308 while (bytes_read > sizeof (reclen)) { 4309 4310 /* get length of packed record (stored as little endian) */ 4311 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 4312 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 4313 4314 if (bytes_read < sizeof (reclen) + reclen) 4315 break; 4316 4317 /* unpack record */ 4318 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 4319 return (ENOMEM); 4320 bytes_read -= sizeof (reclen) + reclen; 4321 buf += sizeof (reclen) + reclen; 4322 4323 /* add record to nvlist array */ 4324 (*numrecords)++; 4325 if (ISP2(*numrecords + 1)) { 4326 *records = realloc(*records, 4327 *numrecords * 2 * sizeof (nvlist_t *)); 4328 } 4329 (*records)[*numrecords - 1] = nv; 4330 } 4331 4332 *leftover = bytes_read; 4333 return (0); 4334 } 4335 4336 /* 4337 * Retrieve the command history of a pool. 4338 */ 4339 int 4340 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 4341 { 4342 char *buf; 4343 int buflen = 128 * 1024; 4344 uint64_t off = 0; 4345 nvlist_t **records = NULL; 4346 uint_t numrecords = 0; 4347 int err, i; 4348 4349 buf = malloc(buflen); 4350 if (buf == NULL) 4351 return (ENOMEM); 4352 do { 4353 uint64_t bytes_read = buflen; 4354 uint64_t leftover; 4355 4356 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 4357 break; 4358 4359 /* if nothing else was read in, we're at EOF, just return */ 4360 if (!bytes_read) 4361 break; 4362 4363 if ((err = zpool_history_unpack(buf, bytes_read, 4364 &leftover, &records, &numrecords)) != 0) 4365 break; 4366 off -= leftover; 4367 if (leftover == bytes_read) { 4368 /* 4369 * no progress made, because buffer is not big enough 4370 * to hold this record; resize and retry. 4371 */ 4372 buflen *= 2; 4373 free(buf); 4374 buf = malloc(buflen); 4375 if (buf == NULL) 4376 return (ENOMEM); 4377 } 4378 4379 /* CONSTCOND */ 4380 } while (1); 4381 4382 free(buf); 4383 4384 if (!err) { 4385 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 4386 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 4387 records, numrecords) == 0); 4388 } 4389 for (i = 0; i < numrecords; i++) 4390 nvlist_free(records[i]); 4391 free(records); 4392 4393 return (err); 4394 } 4395 4396 void 4397 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 4398 char *pathname, size_t len) 4399 { 4400 zfs_cmd_t zc = { 0 }; 4401 boolean_t mounted = B_FALSE; 4402 char *mntpnt = NULL; 4403 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 4404 4405 if (dsobj == 0) { 4406 /* special case for the MOS */ 4407 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 4408 return; 4409 } 4410 4411 /* get the dataset's name */ 4412 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 4413 zc.zc_obj = dsobj; 4414 if (ioctl(zhp->zpool_hdl->libzfs_fd, 4415 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 4416 /* just write out a path of two object numbers */ 4417 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 4418 dsobj, obj); 4419 return; 4420 } 4421 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 4422 4423 /* find out if the dataset is mounted */ 4424 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 4425 4426 /* get the corrupted object's path */ 4427 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 4428 zc.zc_obj = obj; 4429 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 4430 &zc) == 0) { 4431 if (mounted) { 4432 (void) snprintf(pathname, len, "%s%s", mntpnt, 4433 zc.zc_value); 4434 } else { 4435 (void) snprintf(pathname, len, "%s:%s", 4436 dsname, zc.zc_value); 4437 } 4438 } else { 4439 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 4440 } 4441 free(mntpnt); 4442 } 4443 4444 /* 4445 * Read the EFI label from the config, if a label does not exist then 4446 * pass back the error to the caller. If the caller has passed a non-NULL 4447 * diskaddr argument then we set it to the starting address of the EFI 4448 * partition. If the caller has passed a non-NULL boolean argument, then 4449 * we set it to indicate if the disk does have efi system partition. 4450 */ 4451 static int 4452 read_efi_label(nvlist_t *config, diskaddr_t *sb, boolean_t *system) 4453 { 4454 char *path; 4455 int fd; 4456 char diskname[MAXPATHLEN]; 4457 boolean_t boot = B_FALSE; 4458 int err = -1; 4459 int slice; 4460 4461 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 4462 return (err); 4463 4464 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT, 4465 strrchr(path, '/')); 4466 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 4467 struct dk_gpt *vtoc; 4468 4469 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 4470 for (slice = 0; slice < vtoc->efi_nparts; slice++) { 4471 if (vtoc->efi_parts[slice].p_tag == V_SYSTEM) 4472 boot = B_TRUE; 4473 if (vtoc->efi_parts[slice].p_tag == V_USR) 4474 break; 4475 } 4476 if (sb != NULL && vtoc->efi_parts[slice].p_tag == V_USR) 4477 *sb = vtoc->efi_parts[slice].p_start; 4478 if (system != NULL) 4479 *system = boot; 4480 efi_free(vtoc); 4481 } 4482 (void) close(fd); 4483 } 4484 return (err); 4485 } 4486 4487 /* 4488 * determine where a partition starts on a disk in the current 4489 * configuration 4490 */ 4491 static diskaddr_t 4492 find_start_block(nvlist_t *config) 4493 { 4494 nvlist_t **child; 4495 uint_t c, children; 4496 diskaddr_t sb = MAXOFFSET_T; 4497 uint64_t wholedisk; 4498 4499 if (nvlist_lookup_nvlist_array(config, 4500 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 4501 if (nvlist_lookup_uint64(config, 4502 ZPOOL_CONFIG_WHOLE_DISK, 4503 &wholedisk) != 0 || !wholedisk) { 4504 return (MAXOFFSET_T); 4505 } 4506 if (read_efi_label(config, &sb, NULL) < 0) 4507 sb = MAXOFFSET_T; 4508 return (sb); 4509 } 4510 4511 for (c = 0; c < children; c++) { 4512 sb = find_start_block(child[c]); 4513 if (sb != MAXOFFSET_T) { 4514 return (sb); 4515 } 4516 } 4517 return (MAXOFFSET_T); 4518 } 4519 4520 /* 4521 * Label an individual disk. The name provided is the short name, 4522 * stripped of any leading /dev path. 4523 */ 4524 int 4525 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name, 4526 zpool_boot_label_t boot_type, uint64_t boot_size, int *slice) 4527 { 4528 char path[MAXPATHLEN]; 4529 struct dk_gpt *vtoc; 4530 int fd; 4531 size_t resv = EFI_MIN_RESV_SIZE; 4532 uint64_t slice_size; 4533 diskaddr_t start_block; 4534 char errbuf[1024]; 4535 4536 /* prepare an error message just in case */ 4537 (void) snprintf(errbuf, sizeof (errbuf), 4538 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 4539 4540 if (zhp) { 4541 nvlist_t *nvroot; 4542 4543 verify(nvlist_lookup_nvlist(zhp->zpool_config, 4544 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 4545 4546 if (zhp->zpool_start_block == 0) 4547 start_block = find_start_block(nvroot); 4548 else 4549 start_block = zhp->zpool_start_block; 4550 zhp->zpool_start_block = start_block; 4551 } else { 4552 /* new pool */ 4553 start_block = NEW_START_BLOCK; 4554 } 4555 4556 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name, 4557 BACKUP_SLICE); 4558 4559 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 4560 /* 4561 * This shouldn't happen. We've long since verified that this 4562 * is a valid device. 4563 */ 4564 zfs_error_aux(hdl, 4565 dgettext(TEXT_DOMAIN, "unable to open device")); 4566 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 4567 } 4568 4569 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 4570 /* 4571 * The only way this can fail is if we run out of memory, or we 4572 * were unable to read the disk's capacity 4573 */ 4574 if (errno == ENOMEM) 4575 (void) no_memory(hdl); 4576 4577 (void) close(fd); 4578 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4579 "unable to read disk capacity"), name); 4580 4581 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 4582 } 4583 4584 /* 4585 * Why we use V_USR: V_BACKUP confuses users, and is considered 4586 * disposable by some EFI utilities (since EFI doesn't have a backup 4587 * slice). V_UNASSIGNED is supposed to be used only for zero size 4588 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 4589 * etc. were all pretty specific. V_USR is as close to reality as we 4590 * can get, in the absence of V_OTHER. 4591 */ 4592 /* first fix the partition start block */ 4593 if (start_block == MAXOFFSET_T) 4594 start_block = NEW_START_BLOCK; 4595 4596 /* 4597 * EFI System partition is using slice 0. 4598 * ZFS is on slice 1 and slice 8 is reserved. 4599 * We assume the GPT partition table without system 4600 * partition has zfs p_start == NEW_START_BLOCK. 4601 * If start_block != NEW_START_BLOCK, it means we have 4602 * system partition. Correct solution would be to query/cache vtoc 4603 * from existing vdev member. 4604 */ 4605 if (boot_type == ZPOOL_CREATE_BOOT_LABEL) { 4606 if (boot_size % vtoc->efi_lbasize != 0) { 4607 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4608 "boot partition size must be a multiple of %d"), 4609 vtoc->efi_lbasize); 4610 (void) close(fd); 4611 efi_free(vtoc); 4612 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4613 } 4614 /* 4615 * System partition size checks. 4616 * Note the 1MB is quite arbitrary value, since we 4617 * are creating dedicated pool, it should be enough 4618 * to hold fat + efi bootloader. May need to be 4619 * adjusted if the bootloader size will grow. 4620 */ 4621 if (boot_size < 1024 * 1024) { 4622 char buf[64]; 4623 zfs_nicenum(boot_size, buf, sizeof (buf)); 4624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4625 "Specified size %s for EFI System partition is too " 4626 "small, the minimum size is 1MB."), buf); 4627 (void) close(fd); 4628 efi_free(vtoc); 4629 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4630 } 4631 /* 33MB is tested with mkfs -F pcfs */ 4632 if (hdl->libzfs_printerr && 4633 ((vtoc->efi_lbasize == 512 && 4634 boot_size < 33 * 1024 * 1024) || 4635 (vtoc->efi_lbasize == 4096 && 4636 boot_size < 256 * 1024 * 1024))) { 4637 char buf[64]; 4638 zfs_nicenum(boot_size, buf, sizeof (buf)); 4639 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, 4640 "Warning: EFI System partition size %s is " 4641 "not allowing to create FAT32 file\nsystem, which " 4642 "may result in unbootable system.\n"), buf); 4643 } 4644 /* Adjust zfs partition start by size of system partition. */ 4645 start_block += boot_size / vtoc->efi_lbasize; 4646 } 4647 4648 if (start_block == NEW_START_BLOCK) { 4649 /* 4650 * Use default layout. 4651 * ZFS is on slice 0 and slice 8 is reserved. 4652 */ 4653 slice_size = vtoc->efi_last_u_lba + 1; 4654 slice_size -= EFI_MIN_RESV_SIZE; 4655 slice_size -= start_block; 4656 if (slice != NULL) 4657 *slice = 0; 4658 4659 vtoc->efi_parts[0].p_start = start_block; 4660 vtoc->efi_parts[0].p_size = slice_size; 4661 4662 vtoc->efi_parts[0].p_tag = V_USR; 4663 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 4664 4665 vtoc->efi_parts[8].p_start = slice_size + start_block; 4666 vtoc->efi_parts[8].p_size = resv; 4667 vtoc->efi_parts[8].p_tag = V_RESERVED; 4668 } else { 4669 slice_size = start_block - NEW_START_BLOCK; 4670 vtoc->efi_parts[0].p_start = NEW_START_BLOCK; 4671 vtoc->efi_parts[0].p_size = slice_size; 4672 vtoc->efi_parts[0].p_tag = V_SYSTEM; 4673 (void) strcpy(vtoc->efi_parts[0].p_name, "loader"); 4674 if (slice != NULL) 4675 *slice = 1; 4676 /* prepare slice 1 */ 4677 slice_size = vtoc->efi_last_u_lba + 1 - slice_size; 4678 slice_size -= resv; 4679 slice_size -= NEW_START_BLOCK; 4680 vtoc->efi_parts[1].p_start = start_block; 4681 vtoc->efi_parts[1].p_size = slice_size; 4682 vtoc->efi_parts[1].p_tag = V_USR; 4683 (void) strcpy(vtoc->efi_parts[1].p_name, "zfs"); 4684 4685 vtoc->efi_parts[8].p_start = slice_size + start_block; 4686 vtoc->efi_parts[8].p_size = resv; 4687 vtoc->efi_parts[8].p_tag = V_RESERVED; 4688 } 4689 4690 if (efi_write(fd, vtoc) != 0) { 4691 /* 4692 * Some block drivers (like pcata) may not support EFI 4693 * GPT labels. Print out a helpful error message dir- 4694 * ecting the user to manually label the disk and give 4695 * a specific slice. 4696 */ 4697 (void) close(fd); 4698 efi_free(vtoc); 4699 4700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4701 "try using fdisk(1M) and then provide a specific slice")); 4702 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4703 } 4704 4705 (void) close(fd); 4706 efi_free(vtoc); 4707 return (0); 4708 } 4709 4710 static boolean_t 4711 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4712 { 4713 char *type; 4714 nvlist_t **child; 4715 uint_t children, c; 4716 4717 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4718 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4719 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4720 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4721 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4722 "vdev type '%s' is not supported"), type); 4723 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4724 return (B_FALSE); 4725 } 4726 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4727 &child, &children) == 0) { 4728 for (c = 0; c < children; c++) { 4729 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4730 return (B_FALSE); 4731 } 4732 } 4733 return (B_TRUE); 4734 } 4735 4736 /* 4737 * Check if this zvol is allowable for use as a dump device; zero if 4738 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4739 * 4740 * Allowable storage configurations include mirrors, all raidz variants, and 4741 * pools with log, cache, and spare devices. Pools which are backed by files or 4742 * have missing/hole vdevs are not suitable. 4743 */ 4744 int 4745 zvol_check_dump_config(char *arg) 4746 { 4747 zpool_handle_t *zhp = NULL; 4748 nvlist_t *config, *nvroot; 4749 char *p, *volname; 4750 nvlist_t **top; 4751 uint_t toplevels; 4752 libzfs_handle_t *hdl; 4753 char errbuf[1024]; 4754 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 4755 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4756 int ret = 1; 4757 4758 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4759 return (-1); 4760 } 4761 4762 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4763 "dump is not supported on device '%s'"), arg); 4764 4765 if ((hdl = libzfs_init()) == NULL) 4766 return (1); 4767 libzfs_print_on_error(hdl, B_TRUE); 4768 4769 volname = arg + pathlen; 4770 4771 /* check the configuration of the pool */ 4772 if ((p = strchr(volname, '/')) == NULL) { 4773 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4774 "malformed dataset name")); 4775 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4776 return (1); 4777 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) { 4778 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4779 "dataset name is too long")); 4780 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4781 return (1); 4782 } else { 4783 (void) strncpy(poolname, volname, p - volname); 4784 poolname[p - volname] = '\0'; 4785 } 4786 4787 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4788 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4789 "could not open pool '%s'"), poolname); 4790 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4791 goto out; 4792 } 4793 config = zpool_get_config(zhp, NULL); 4794 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4795 &nvroot) != 0) { 4796 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4797 "could not obtain vdev configuration for '%s'"), poolname); 4798 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4799 goto out; 4800 } 4801 4802 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4803 &top, &toplevels) == 0); 4804 4805 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4806 goto out; 4807 } 4808 ret = 0; 4809 4810 out: 4811 if (zhp) 4812 zpool_close(zhp); 4813 libzfs_fini(hdl); 4814 return (ret); 4815 } 4816