1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Pool import support functions. 30 * 31 * To import a pool, we rely on reading the configuration information from the 32 * ZFS label of each device. If we successfully read the label, then we 33 * organize the configuration information in the following hierarchy: 34 * 35 * pool guid -> toplevel vdev guid -> label txg 36 * 37 * Duplicate entries matching this same tuple will be discarded. Once we have 38 * examined every device, we pick the best label txg config for each toplevel 39 * vdev. We then arrange these toplevel vdevs into a complete pool config, and 40 * update any paths that have changed. Finally, we attempt to import the pool 41 * using our derived config, and record the results. 42 */ 43 44 #include <devid.h> 45 #include <dirent.h> 46 #include <errno.h> 47 #include <libintl.h> 48 #include <stdlib.h> 49 #include <string.h> 50 #include <sys/stat.h> 51 #include <unistd.h> 52 #include <fcntl.h> 53 54 #include <sys/vdev_impl.h> 55 56 #include "libzfs.h" 57 #include "libzfs_impl.h" 58 59 /* 60 * Intermediate structures used to gather configuration information. 61 */ 62 typedef struct config_entry { 63 uint64_t ce_txg; 64 nvlist_t *ce_config; 65 struct config_entry *ce_next; 66 } config_entry_t; 67 68 typedef struct vdev_entry { 69 uint64_t ve_guid; 70 config_entry_t *ve_configs; 71 struct vdev_entry *ve_next; 72 } vdev_entry_t; 73 74 typedef struct pool_entry { 75 uint64_t pe_guid; 76 vdev_entry_t *pe_vdevs; 77 struct pool_entry *pe_next; 78 } pool_entry_t; 79 80 typedef struct name_entry { 81 char *ne_name; 82 uint64_t ne_guid; 83 struct name_entry *ne_next; 84 } name_entry_t; 85 86 typedef struct pool_list { 87 pool_entry_t *pools; 88 name_entry_t *names; 89 } pool_list_t; 90 91 static char * 92 get_devid(const char *path) 93 { 94 int fd; 95 ddi_devid_t devid; 96 char *minor, *ret; 97 98 if ((fd = open(path, O_RDONLY)) < 0) 99 return (NULL); 100 101 minor = NULL; 102 ret = NULL; 103 if (devid_get(fd, &devid) == 0) { 104 if (devid_get_minor_name(fd, &minor) == 0) 105 ret = devid_str_encode(devid, minor); 106 if (minor != NULL) 107 devid_str_free(minor); 108 devid_free(devid); 109 } 110 (void) close(fd); 111 112 return (ret); 113 } 114 115 116 /* 117 * Go through and fix up any path and/or devid information for the given vdev 118 * configuration. 119 */ 120 static int 121 fix_paths(nvlist_t *nv, name_entry_t *names) 122 { 123 nvlist_t **child; 124 uint_t c, children; 125 uint64_t guid; 126 name_entry_t *ne, *best; 127 char *path, *devid; 128 int matched; 129 130 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 131 &child, &children) == 0) { 132 for (c = 0; c < children; c++) 133 if (fix_paths(child[c], names) != 0) 134 return (-1); 135 return (0); 136 } 137 138 /* 139 * This is a leaf (file or disk) vdev. In either case, go through 140 * the name list and see if we find a matching guid. If so, replace 141 * the path and see if we can calculate a new devid. 142 * 143 * There may be multiple names associated with a particular guid, in 144 * which case we have overlapping slices or multiple paths to the same 145 * disk. If this is the case, then we want to pick the path that is 146 * the most similar to the original, where "most similar" is the number 147 * of matching characters starting from the end of the path. This will 148 * preserve slice numbers even if the disks have been reorganized, and 149 * will also catch preferred disk names if multiple paths exist. 150 */ 151 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); 152 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) 153 path = NULL; 154 155 matched = 0; 156 best = NULL; 157 for (ne = names; ne != NULL; ne = ne->ne_next) { 158 if (ne->ne_guid == guid) { 159 const char *src, *dst; 160 int count; 161 162 if (path == NULL) { 163 best = ne; 164 break; 165 } 166 167 src = ne->ne_name + strlen(ne->ne_name) - 1; 168 dst = path + strlen(path) - 1; 169 for (count = 0; src >= ne->ne_name && dst >= path; 170 src--, dst--, count++) 171 if (*src != *dst) 172 break; 173 174 /* 175 * At this point, 'count' is the number of characters 176 * matched from the end. 177 */ 178 if (count > matched || best == NULL) { 179 best = ne; 180 matched = count; 181 } 182 } 183 } 184 185 if (best == NULL) 186 return (0); 187 188 if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0) 189 return (-1); 190 191 if ((devid = get_devid(best->ne_name)) == NULL) { 192 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); 193 } else { 194 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) 195 return (-1); 196 devid_str_free(devid); 197 } 198 199 return (0); 200 } 201 202 /* 203 * Add the given configuration to the list of known devices. 204 */ 205 static int 206 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path, 207 nvlist_t *config) 208 { 209 uint64_t pool_guid, vdev_guid, top_guid, txg, state; 210 pool_entry_t *pe; 211 vdev_entry_t *ve; 212 config_entry_t *ce; 213 name_entry_t *ne; 214 215 /* 216 * If this is a hot spare not currently in use or level 2 cache 217 * device, add it to the list of names to translate, but don't do 218 * anything else. 219 */ 220 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 221 &state) == 0 && 222 (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) && 223 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { 224 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) 225 return (-1); 226 227 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { 228 free(ne); 229 return (-1); 230 } 231 ne->ne_guid = vdev_guid; 232 ne->ne_next = pl->names; 233 pl->names = ne; 234 return (0); 235 } 236 237 /* 238 * If we have a valid config but cannot read any of these fields, then 239 * it means we have a half-initialized label. In vdev_label_init() 240 * we write a label with txg == 0 so that we can identify the device 241 * in case the user refers to the same disk later on. If we fail to 242 * create the pool, we'll be left with a label in this state 243 * which should not be considered part of a valid pool. 244 */ 245 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 246 &pool_guid) != 0 || 247 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 248 &vdev_guid) != 0 || 249 nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, 250 &top_guid) != 0 || 251 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 252 &txg) != 0 || txg == 0) { 253 nvlist_free(config); 254 return (0); 255 } 256 257 /* 258 * First, see if we know about this pool. If not, then add it to the 259 * list of known pools. 260 */ 261 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { 262 if (pe->pe_guid == pool_guid) 263 break; 264 } 265 266 if (pe == NULL) { 267 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { 268 nvlist_free(config); 269 return (-1); 270 } 271 pe->pe_guid = pool_guid; 272 pe->pe_next = pl->pools; 273 pl->pools = pe; 274 } 275 276 /* 277 * Second, see if we know about this toplevel vdev. Add it if its 278 * missing. 279 */ 280 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { 281 if (ve->ve_guid == top_guid) 282 break; 283 } 284 285 if (ve == NULL) { 286 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { 287 nvlist_free(config); 288 return (-1); 289 } 290 ve->ve_guid = top_guid; 291 ve->ve_next = pe->pe_vdevs; 292 pe->pe_vdevs = ve; 293 } 294 295 /* 296 * Third, see if we have a config with a matching transaction group. If 297 * so, then we do nothing. Otherwise, add it to the list of known 298 * configs. 299 */ 300 for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { 301 if (ce->ce_txg == txg) 302 break; 303 } 304 305 if (ce == NULL) { 306 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) { 307 nvlist_free(config); 308 return (-1); 309 } 310 ce->ce_txg = txg; 311 ce->ce_config = config; 312 ce->ce_next = ve->ve_configs; 313 ve->ve_configs = ce; 314 } else { 315 nvlist_free(config); 316 } 317 318 /* 319 * At this point we've successfully added our config to the list of 320 * known configs. The last thing to do is add the vdev guid -> path 321 * mappings so that we can fix up the configuration as necessary before 322 * doing the import. 323 */ 324 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) 325 return (-1); 326 327 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { 328 free(ne); 329 return (-1); 330 } 331 332 ne->ne_guid = vdev_guid; 333 ne->ne_next = pl->names; 334 pl->names = ne; 335 336 return (0); 337 } 338 339 /* 340 * Returns true if the named pool matches the given GUID. 341 */ 342 static int 343 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid, 344 boolean_t *isactive) 345 { 346 zpool_handle_t *zhp; 347 uint64_t theguid; 348 349 if (zpool_open_silent(hdl, name, &zhp) != 0) 350 return (-1); 351 352 if (zhp == NULL) { 353 *isactive = B_FALSE; 354 return (0); 355 } 356 357 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, 358 &theguid) == 0); 359 360 zpool_close(zhp); 361 362 *isactive = (theguid == guid); 363 return (0); 364 } 365 366 static nvlist_t * 367 refresh_config(libzfs_handle_t *hdl, nvlist_t *config) 368 { 369 nvlist_t *nvl; 370 zfs_cmd_t zc = { 0 }; 371 int err; 372 373 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) 374 return (NULL); 375 376 if (zcmd_alloc_dst_nvlist(hdl, &zc, 377 zc.zc_nvlist_conf_size * 2) != 0) { 378 zcmd_free_nvlists(&zc); 379 return (NULL); 380 } 381 382 while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT, 383 &zc)) != 0 && errno == ENOMEM) { 384 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 385 zcmd_free_nvlists(&zc); 386 return (NULL); 387 } 388 } 389 390 if (err) { 391 (void) zpool_standard_error(hdl, errno, 392 dgettext(TEXT_DOMAIN, "cannot discover pools")); 393 zcmd_free_nvlists(&zc); 394 return (NULL); 395 } 396 397 if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) { 398 zcmd_free_nvlists(&zc); 399 return (NULL); 400 } 401 402 zcmd_free_nvlists(&zc); 403 return (nvl); 404 } 405 406 /* 407 * Convert our list of pools into the definitive set of configurations. We 408 * start by picking the best config for each toplevel vdev. Once that's done, 409 * we assemble the toplevel vdevs into a full config for the pool. We make a 410 * pass to fix up any incorrect paths, and then add it to the main list to 411 * return to the user. 412 */ 413 static nvlist_t * 414 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok) 415 { 416 pool_entry_t *pe; 417 vdev_entry_t *ve; 418 config_entry_t *ce; 419 nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot; 420 nvlist_t **spares, **l2cache; 421 uint_t i, nspares, nl2cache; 422 boolean_t config_seen; 423 uint64_t best_txg; 424 char *name, *hostname; 425 uint64_t version, guid; 426 uint_t children = 0; 427 nvlist_t **child = NULL; 428 uint_t c; 429 boolean_t isactive; 430 uint64_t hostid; 431 nvlist_t *nvl; 432 433 if (nvlist_alloc(&ret, 0, 0) != 0) 434 goto nomem; 435 436 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { 437 uint64_t id; 438 439 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) 440 goto nomem; 441 config_seen = B_FALSE; 442 443 /* 444 * Iterate over all toplevel vdevs. Grab the pool configuration 445 * from the first one we find, and then go through the rest and 446 * add them as necessary to the 'vdevs' member of the config. 447 */ 448 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { 449 450 /* 451 * Determine the best configuration for this vdev by 452 * selecting the config with the latest transaction 453 * group. 454 */ 455 best_txg = 0; 456 for (ce = ve->ve_configs; ce != NULL; 457 ce = ce->ce_next) { 458 459 if (ce->ce_txg > best_txg) { 460 tmp = ce->ce_config; 461 best_txg = ce->ce_txg; 462 } 463 } 464 465 if (!config_seen) { 466 /* 467 * Copy the relevant pieces of data to the pool 468 * configuration: 469 * 470 * version 471 * pool guid 472 * name 473 * pool state 474 * hostid (if available) 475 * hostname (if available) 476 */ 477 uint64_t state; 478 479 verify(nvlist_lookup_uint64(tmp, 480 ZPOOL_CONFIG_VERSION, &version) == 0); 481 if (nvlist_add_uint64(config, 482 ZPOOL_CONFIG_VERSION, version) != 0) 483 goto nomem; 484 verify(nvlist_lookup_uint64(tmp, 485 ZPOOL_CONFIG_POOL_GUID, &guid) == 0); 486 if (nvlist_add_uint64(config, 487 ZPOOL_CONFIG_POOL_GUID, guid) != 0) 488 goto nomem; 489 verify(nvlist_lookup_string(tmp, 490 ZPOOL_CONFIG_POOL_NAME, &name) == 0); 491 if (nvlist_add_string(config, 492 ZPOOL_CONFIG_POOL_NAME, name) != 0) 493 goto nomem; 494 verify(nvlist_lookup_uint64(tmp, 495 ZPOOL_CONFIG_POOL_STATE, &state) == 0); 496 if (nvlist_add_uint64(config, 497 ZPOOL_CONFIG_POOL_STATE, state) != 0) 498 goto nomem; 499 hostid = 0; 500 if (nvlist_lookup_uint64(tmp, 501 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 502 if (nvlist_add_uint64(config, 503 ZPOOL_CONFIG_HOSTID, hostid) != 0) 504 goto nomem; 505 verify(nvlist_lookup_string(tmp, 506 ZPOOL_CONFIG_HOSTNAME, 507 &hostname) == 0); 508 if (nvlist_add_string(config, 509 ZPOOL_CONFIG_HOSTNAME, 510 hostname) != 0) 511 goto nomem; 512 } 513 514 config_seen = B_TRUE; 515 } 516 517 /* 518 * Add this top-level vdev to the child array. 519 */ 520 verify(nvlist_lookup_nvlist(tmp, 521 ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); 522 verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, 523 &id) == 0); 524 if (id >= children) { 525 nvlist_t **newchild; 526 527 newchild = zfs_alloc(hdl, (id + 1) * 528 sizeof (nvlist_t *)); 529 if (newchild == NULL) 530 goto nomem; 531 532 for (c = 0; c < children; c++) 533 newchild[c] = child[c]; 534 535 free(child); 536 child = newchild; 537 children = id + 1; 538 } 539 if (nvlist_dup(nvtop, &child[id], 0) != 0) 540 goto nomem; 541 542 } 543 544 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 545 &guid) == 0); 546 547 /* 548 * Look for any missing top-level vdevs. If this is the case, 549 * create a faked up 'missing' vdev as a placeholder. We cannot 550 * simply compress the child array, because the kernel performs 551 * certain checks to make sure the vdev IDs match their location 552 * in the configuration. 553 */ 554 for (c = 0; c < children; c++) 555 if (child[c] == NULL) { 556 nvlist_t *missing; 557 if (nvlist_alloc(&missing, NV_UNIQUE_NAME, 558 0) != 0) 559 goto nomem; 560 if (nvlist_add_string(missing, 561 ZPOOL_CONFIG_TYPE, 562 VDEV_TYPE_MISSING) != 0 || 563 nvlist_add_uint64(missing, 564 ZPOOL_CONFIG_ID, c) != 0 || 565 nvlist_add_uint64(missing, 566 ZPOOL_CONFIG_GUID, 0ULL) != 0) { 567 nvlist_free(missing); 568 goto nomem; 569 } 570 child[c] = missing; 571 } 572 573 /* 574 * Put all of this pool's top-level vdevs into a root vdev. 575 */ 576 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) 577 goto nomem; 578 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 579 VDEV_TYPE_ROOT) != 0 || 580 nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || 581 nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || 582 nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 583 child, children) != 0) { 584 nvlist_free(nvroot); 585 goto nomem; 586 } 587 588 for (c = 0; c < children; c++) 589 nvlist_free(child[c]); 590 free(child); 591 children = 0; 592 child = NULL; 593 594 /* 595 * Go through and fix up any paths and/or devids based on our 596 * known list of vdev GUID -> path mappings. 597 */ 598 if (fix_paths(nvroot, pl->names) != 0) { 599 nvlist_free(nvroot); 600 goto nomem; 601 } 602 603 /* 604 * Add the root vdev to this pool's configuration. 605 */ 606 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 607 nvroot) != 0) { 608 nvlist_free(nvroot); 609 goto nomem; 610 } 611 nvlist_free(nvroot); 612 613 /* 614 * zdb uses this path to report on active pools that were 615 * imported or created using -R. 616 */ 617 if (active_ok) 618 goto add_pool; 619 620 /* 621 * Determine if this pool is currently active, in which case we 622 * can't actually import it. 623 */ 624 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 625 &name) == 0); 626 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 627 &guid) == 0); 628 629 if (pool_active(hdl, name, guid, &isactive) != 0) 630 goto error; 631 632 if (isactive) { 633 nvlist_free(config); 634 config = NULL; 635 continue; 636 } 637 638 if ((nvl = refresh_config(hdl, config)) == NULL) 639 goto error; 640 641 nvlist_free(config); 642 config = nvl; 643 644 /* 645 * Go through and update the paths for spares, now that we have 646 * them. 647 */ 648 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 649 &nvroot) == 0); 650 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 651 &spares, &nspares) == 0) { 652 for (i = 0; i < nspares; i++) { 653 if (fix_paths(spares[i], pl->names) != 0) 654 goto nomem; 655 } 656 } 657 658 /* 659 * Update the paths for l2cache devices. 660 */ 661 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 662 &l2cache, &nl2cache) == 0) { 663 for (i = 0; i < nl2cache; i++) { 664 if (fix_paths(l2cache[i], pl->names) != 0) 665 goto nomem; 666 } 667 } 668 669 /* 670 * Restore the original information read from the actual label. 671 */ 672 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, 673 DATA_TYPE_UINT64); 674 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, 675 DATA_TYPE_STRING); 676 if (hostid != 0) { 677 verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, 678 hostid) == 0); 679 verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, 680 hostname) == 0); 681 } 682 683 add_pool: 684 /* 685 * Add this pool to the list of configs. 686 */ 687 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 688 &name) == 0); 689 if (nvlist_add_nvlist(ret, name, config) != 0) 690 goto nomem; 691 692 nvlist_free(config); 693 config = NULL; 694 } 695 696 return (ret); 697 698 nomem: 699 (void) no_memory(hdl); 700 error: 701 nvlist_free(config); 702 nvlist_free(ret); 703 for (c = 0; c < children; c++) 704 nvlist_free(child[c]); 705 free(child); 706 707 return (NULL); 708 } 709 710 /* 711 * Return the offset of the given label. 712 */ 713 static uint64_t 714 label_offset(uint64_t size, int l) 715 { 716 ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); 717 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? 718 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); 719 } 720 721 /* 722 * Given a file descriptor, read the label information and return an nvlist 723 * describing the configuration, if there is one. 724 */ 725 int 726 zpool_read_label(int fd, nvlist_t **config) 727 { 728 struct stat64 statbuf; 729 int l; 730 vdev_label_t *label; 731 uint64_t state, txg, size; 732 733 *config = NULL; 734 735 if (fstat64(fd, &statbuf) == -1) 736 return (0); 737 size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); 738 739 if ((label = malloc(sizeof (vdev_label_t))) == NULL) 740 return (-1); 741 742 for (l = 0; l < VDEV_LABELS; l++) { 743 if (pread(fd, label, sizeof (vdev_label_t), 744 label_offset(size, l)) != sizeof (vdev_label_t)) 745 continue; 746 747 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, 748 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) 749 continue; 750 751 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, 752 &state) != 0 || state > POOL_STATE_L2CACHE) { 753 nvlist_free(*config); 754 continue; 755 } 756 757 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 758 (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, 759 &txg) != 0 || txg == 0)) { 760 nvlist_free(*config); 761 continue; 762 } 763 764 free(label); 765 return (0); 766 } 767 768 free(label); 769 *config = NULL; 770 return (0); 771 } 772 773 /* 774 * Given a list of directories to search, find all pools stored on disk. This 775 * includes partial pools which are not available to import. If no args are 776 * given (argc is 0), then the default directory (/dev/dsk) is searched. 777 */ 778 nvlist_t * 779 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv, 780 boolean_t active_ok) 781 { 782 int i; 783 DIR *dirp = NULL; 784 struct dirent64 *dp; 785 char path[MAXPATHLEN]; 786 struct stat64 statbuf; 787 nvlist_t *ret = NULL, *config; 788 static char *default_dir = "/dev/dsk"; 789 int fd; 790 pool_list_t pools = { 0 }; 791 pool_entry_t *pe, *penext; 792 vdev_entry_t *ve, *venext; 793 config_entry_t *ce, *cenext; 794 name_entry_t *ne, *nenext; 795 796 797 if (argc == 0) { 798 argc = 1; 799 argv = &default_dir; 800 } 801 802 /* 803 * Go through and read the label configuration information from every 804 * possible device, organizing the information according to pool GUID 805 * and toplevel GUID. 806 */ 807 for (i = 0; i < argc; i++) { 808 if (argv[i][0] != '/') { 809 (void) zfs_error_fmt(hdl, EZFS_BADPATH, 810 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 811 argv[i]); 812 goto error; 813 } 814 815 if ((dirp = opendir(argv[i])) == NULL) { 816 zfs_error_aux(hdl, strerror(errno)); 817 (void) zfs_error_fmt(hdl, EZFS_BADPATH, 818 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 819 argv[i]); 820 goto error; 821 } 822 823 /* 824 * This is not MT-safe, but we have no MT consumers of libzfs 825 */ 826 while ((dp = readdir64(dirp)) != NULL) { 827 828 (void) snprintf(path, sizeof (path), "%s/%s", 829 argv[i], dp->d_name); 830 831 if (stat64(path, &statbuf) != 0) 832 continue; 833 834 /* 835 * Ignore directories (which includes "." and ".."). 836 */ 837 if (S_ISDIR(statbuf.st_mode)) 838 continue; 839 840 /* 841 * Ignore special (non-character or non-block) files. 842 */ 843 if (!S_ISREG(statbuf.st_mode) && 844 !S_ISBLK(statbuf.st_mode)) 845 continue; 846 847 if ((fd = open64(path, O_RDONLY)) < 0) 848 continue; 849 850 if ((zpool_read_label(fd, &config)) != 0) { 851 (void) close(fd); 852 (void) no_memory(hdl); 853 goto error; 854 } 855 856 (void) close(fd); 857 858 if (config != NULL) 859 if (add_config(hdl, &pools, path, config) != 0) 860 goto error; 861 } 862 863 (void) closedir(dirp); 864 dirp = NULL; 865 } 866 867 ret = get_configs(hdl, &pools, active_ok); 868 869 error: 870 for (pe = pools.pools; pe != NULL; pe = penext) { 871 penext = pe->pe_next; 872 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { 873 venext = ve->ve_next; 874 for (ce = ve->ve_configs; ce != NULL; ce = cenext) { 875 cenext = ce->ce_next; 876 if (ce->ce_config) 877 nvlist_free(ce->ce_config); 878 free(ce); 879 } 880 free(ve); 881 } 882 free(pe); 883 } 884 885 for (ne = pools.names; ne != NULL; ne = nenext) { 886 nenext = ne->ne_next; 887 if (ne->ne_name) 888 free(ne->ne_name); 889 free(ne); 890 } 891 892 if (dirp) 893 (void) closedir(dirp); 894 895 return (ret); 896 } 897 898 /* 899 * Given a cache file, return the contents as a list of importable pools. 900 */ 901 nvlist_t * 902 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, 903 boolean_t active_ok) 904 { 905 char *buf; 906 int fd; 907 struct stat64 statbuf; 908 nvlist_t *raw, *src, *dst; 909 nvlist_t *pools; 910 nvpair_t *elem; 911 char *name; 912 uint64_t guid; 913 boolean_t active; 914 915 if ((fd = open(cachefile, O_RDONLY)) < 0) { 916 zfs_error_aux(hdl, "%s", strerror(errno)); 917 (void) zfs_error(hdl, EZFS_BADCACHE, 918 dgettext(TEXT_DOMAIN, "failed to open cache file")); 919 return (NULL); 920 } 921 922 if (fstat64(fd, &statbuf) != 0) { 923 zfs_error_aux(hdl, "%s", strerror(errno)); 924 (void) close(fd); 925 (void) zfs_error(hdl, EZFS_BADCACHE, 926 dgettext(TEXT_DOMAIN, "failed to get size of cache file")); 927 return (NULL); 928 } 929 930 if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { 931 (void) close(fd); 932 return (NULL); 933 } 934 935 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { 936 (void) close(fd); 937 free(buf); 938 (void) zfs_error(hdl, EZFS_BADCACHE, 939 dgettext(TEXT_DOMAIN, 940 "failed to read cache file contents")); 941 return (NULL); 942 } 943 944 (void) close(fd); 945 946 if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { 947 free(buf); 948 (void) zfs_error(hdl, EZFS_BADCACHE, 949 dgettext(TEXT_DOMAIN, 950 "invalid or corrupt cache file contents")); 951 return (NULL); 952 } 953 954 free(buf); 955 956 /* 957 * Go through and get the current state of the pools and refresh their 958 * state. 959 */ 960 if (nvlist_alloc(&pools, 0, 0) != 0) { 961 (void) no_memory(hdl); 962 nvlist_free(raw); 963 return (NULL); 964 } 965 966 elem = NULL; 967 while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { 968 verify(nvpair_value_nvlist(elem, &src) == 0); 969 970 verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME, 971 &name) == 0); 972 verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, 973 &guid) == 0); 974 975 if (!active_ok) { 976 if (pool_active(hdl, name, guid, &active) != 0) { 977 nvlist_free(raw); 978 nvlist_free(pools); 979 return (NULL); 980 } 981 982 if (active) 983 continue; 984 985 if ((dst = refresh_config(hdl, src)) == NULL) { 986 nvlist_free(raw); 987 nvlist_free(pools); 988 return (NULL); 989 } 990 991 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) 992 != 0) { 993 (void) no_memory(hdl); 994 nvlist_free(dst); 995 nvlist_free(raw); 996 nvlist_free(pools); 997 return (NULL); 998 } 999 nvlist_free(dst); 1000 } else { 1001 if (nvlist_add_nvlist(pools, nvpair_name(elem), src) 1002 != 0) { 1003 (void) no_memory(hdl); 1004 nvlist_free(raw); 1005 nvlist_free(pools); 1006 return (NULL); 1007 } 1008 } 1009 } 1010 1011 nvlist_free(raw); 1012 return (pools); 1013 } 1014 1015 1016 boolean_t 1017 find_guid(nvlist_t *nv, uint64_t guid) 1018 { 1019 uint64_t tmp; 1020 nvlist_t **child; 1021 uint_t c, children; 1022 1023 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); 1024 if (tmp == guid) 1025 return (B_TRUE); 1026 1027 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1028 &child, &children) == 0) { 1029 for (c = 0; c < children; c++) 1030 if (find_guid(child[c], guid)) 1031 return (B_TRUE); 1032 } 1033 1034 return (B_FALSE); 1035 } 1036 1037 typedef struct aux_cbdata { 1038 const char *cb_type; 1039 uint64_t cb_guid; 1040 zpool_handle_t *cb_zhp; 1041 } aux_cbdata_t; 1042 1043 static int 1044 find_aux(zpool_handle_t *zhp, void *data) 1045 { 1046 aux_cbdata_t *cbp = data; 1047 nvlist_t **list; 1048 uint_t i, count; 1049 uint64_t guid; 1050 nvlist_t *nvroot; 1051 1052 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1053 &nvroot) == 0); 1054 1055 if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type, 1056 &list, &count) == 0) { 1057 for (i = 0; i < count; i++) { 1058 verify(nvlist_lookup_uint64(list[i], 1059 ZPOOL_CONFIG_GUID, &guid) == 0); 1060 if (guid == cbp->cb_guid) { 1061 cbp->cb_zhp = zhp; 1062 return (1); 1063 } 1064 } 1065 } 1066 1067 zpool_close(zhp); 1068 return (0); 1069 } 1070 1071 /* 1072 * Determines if the pool is in use. If so, it returns true and the state of 1073 * the pool as well as the name of the pool. Both strings are allocated and 1074 * must be freed by the caller. 1075 */ 1076 int 1077 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, 1078 boolean_t *inuse) 1079 { 1080 nvlist_t *config; 1081 char *name; 1082 boolean_t ret; 1083 uint64_t guid, vdev_guid; 1084 zpool_handle_t *zhp; 1085 nvlist_t *pool_config; 1086 uint64_t stateval, isspare; 1087 aux_cbdata_t cb = { 0 }; 1088 boolean_t isactive; 1089 1090 *inuse = B_FALSE; 1091 1092 if (zpool_read_label(fd, &config) != 0) { 1093 (void) no_memory(hdl); 1094 return (-1); 1095 } 1096 1097 if (config == NULL) 1098 return (0); 1099 1100 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 1101 &stateval) == 0); 1102 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 1103 &vdev_guid) == 0); 1104 1105 if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) { 1106 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1107 &name) == 0); 1108 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1109 &guid) == 0); 1110 } 1111 1112 switch (stateval) { 1113 case POOL_STATE_EXPORTED: 1114 ret = B_TRUE; 1115 break; 1116 1117 case POOL_STATE_ACTIVE: 1118 /* 1119 * For an active pool, we have to determine if it's really part 1120 * of a currently active pool (in which case the pool will exist 1121 * and the guid will be the same), or whether it's part of an 1122 * active pool that was disconnected without being explicitly 1123 * exported. 1124 */ 1125 if (pool_active(hdl, name, guid, &isactive) != 0) { 1126 nvlist_free(config); 1127 return (-1); 1128 } 1129 1130 if (isactive) { 1131 /* 1132 * Because the device may have been removed while 1133 * offlined, we only report it as active if the vdev is 1134 * still present in the config. Otherwise, pretend like 1135 * it's not in use. 1136 */ 1137 if ((zhp = zpool_open_canfail(hdl, name)) != NULL && 1138 (pool_config = zpool_get_config(zhp, NULL)) 1139 != NULL) { 1140 nvlist_t *nvroot; 1141 1142 verify(nvlist_lookup_nvlist(pool_config, 1143 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1144 ret = find_guid(nvroot, vdev_guid); 1145 } else { 1146 ret = B_FALSE; 1147 } 1148 1149 /* 1150 * If this is an active spare within another pool, we 1151 * treat it like an unused hot spare. This allows the 1152 * user to create a pool with a hot spare that currently 1153 * in use within another pool. Since we return B_TRUE, 1154 * libdiskmgt will continue to prevent generic consumers 1155 * from using the device. 1156 */ 1157 if (ret && nvlist_lookup_uint64(config, 1158 ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) 1159 stateval = POOL_STATE_SPARE; 1160 1161 if (zhp != NULL) 1162 zpool_close(zhp); 1163 } else { 1164 stateval = POOL_STATE_POTENTIALLY_ACTIVE; 1165 ret = B_TRUE; 1166 } 1167 break; 1168 1169 case POOL_STATE_SPARE: 1170 /* 1171 * For a hot spare, it can be either definitively in use, or 1172 * potentially active. To determine if it's in use, we iterate 1173 * over all pools in the system and search for one with a spare 1174 * with a matching guid. 1175 * 1176 * Due to the shared nature of spares, we don't actually report 1177 * the potentially active case as in use. This means the user 1178 * can freely create pools on the hot spares of exported pools, 1179 * but to do otherwise makes the resulting code complicated, and 1180 * we end up having to deal with this case anyway. 1181 */ 1182 cb.cb_zhp = NULL; 1183 cb.cb_guid = vdev_guid; 1184 cb.cb_type = ZPOOL_CONFIG_SPARES; 1185 if (zpool_iter(hdl, find_aux, &cb) == 1) { 1186 name = (char *)zpool_get_name(cb.cb_zhp); 1187 ret = TRUE; 1188 } else { 1189 ret = FALSE; 1190 } 1191 break; 1192 1193 case POOL_STATE_L2CACHE: 1194 1195 /* 1196 * Check if any pool is currently using this l2cache device. 1197 */ 1198 cb.cb_zhp = NULL; 1199 cb.cb_guid = vdev_guid; 1200 cb.cb_type = ZPOOL_CONFIG_L2CACHE; 1201 if (zpool_iter(hdl, find_aux, &cb) == 1) { 1202 name = (char *)zpool_get_name(cb.cb_zhp); 1203 ret = TRUE; 1204 } else { 1205 ret = FALSE; 1206 } 1207 break; 1208 1209 default: 1210 ret = B_FALSE; 1211 } 1212 1213 1214 if (ret) { 1215 if ((*namestr = zfs_strdup(hdl, name)) == NULL) { 1216 if (cb.cb_zhp) 1217 zpool_close(cb.cb_zhp); 1218 nvlist_free(config); 1219 return (-1); 1220 } 1221 *state = (pool_state_t)stateval; 1222 } 1223 1224 if (cb.cb_zhp) 1225 zpool_close(cb.cb_zhp); 1226 1227 nvlist_free(config); 1228 *inuse = ret; 1229 return (0); 1230 } 1231