1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Pool import support functions. 30 * 31 * To import a pool, we rely on reading the configuration information from the 32 * ZFS label of each device. If we successfully read the label, then we 33 * organize the configuration information in the following hierarchy: 34 * 35 * pool guid -> toplevel vdev guid -> label txg 36 * 37 * Duplicate entries matching this same tuple will be discarded. Once we have 38 * examined every device, we pick the best label txg config for each toplevel 39 * vdev. We then arrange these toplevel vdevs into a complete pool config, and 40 * update any paths that have changed. Finally, we attempt to import the pool 41 * using our derived config, and record the results. 42 */ 43 44 #include <devid.h> 45 #include <dirent.h> 46 #include <errno.h> 47 #include <libintl.h> 48 #include <stdlib.h> 49 #include <string.h> 50 #include <sys/stat.h> 51 #include <unistd.h> 52 #include <fcntl.h> 53 54 #include <sys/vdev_impl.h> 55 56 #include "libzfs.h" 57 #include "libzfs_impl.h" 58 59 /* 60 * Intermediate structures used to gather configuration information. 61 */ 62 typedef struct config_entry { 63 uint64_t ce_txg; 64 nvlist_t *ce_config; 65 struct config_entry *ce_next; 66 } config_entry_t; 67 68 typedef struct vdev_entry { 69 uint64_t ve_guid; 70 config_entry_t *ve_configs; 71 struct vdev_entry *ve_next; 72 } vdev_entry_t; 73 74 typedef struct pool_entry { 75 uint64_t pe_guid; 76 vdev_entry_t *pe_vdevs; 77 struct pool_entry *pe_next; 78 } pool_entry_t; 79 80 typedef struct name_entry { 81 const char *ne_name; 82 uint64_t ne_guid; 83 struct name_entry *ne_next; 84 } name_entry_t; 85 86 typedef struct pool_list { 87 pool_entry_t *pools; 88 name_entry_t *names; 89 } pool_list_t; 90 91 static char * 92 get_devid(const char *path) 93 { 94 int fd; 95 ddi_devid_t devid; 96 char *minor, *ret; 97 98 if ((fd = open(path, O_RDONLY)) < 0) 99 return (NULL); 100 101 minor = NULL; 102 ret = NULL; 103 if (devid_get(fd, &devid) == 0) { 104 if (devid_get_minor_name(fd, &minor) == 0) 105 ret = devid_str_encode(devid, minor); 106 if (minor != NULL) 107 devid_str_free(minor); 108 devid_free(devid); 109 } 110 (void) close(fd); 111 112 return (ret); 113 } 114 115 116 /* 117 * Go through and fix up any path and/or devid information for the given vdev 118 * configuration. 119 */ 120 static void 121 fix_paths(nvlist_t *nv, name_entry_t *names) 122 { 123 nvlist_t **child; 124 uint_t c, children; 125 uint64_t guid; 126 name_entry_t *ne, *best; 127 char *path, *devid; 128 int matched; 129 130 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 131 &child, &children) == 0) { 132 for (c = 0; c < children; c++) 133 fix_paths(child[c], names); 134 return; 135 } 136 137 /* 138 * This is a leaf (file or disk) vdev. In either case, go through 139 * the name list and see if we find a matching guid. If so, replace 140 * the path and see if we can calculate a new devid. 141 * 142 * There may be multiple names associated with a particular guid, in 143 * which case we have overlapping slices or multiple paths to the same 144 * disk. If this is the case, then we want to pick the path that is 145 * the most similar to the original, where "most similar" is the number 146 * of matching characters starting from the end of the path. This will 147 * preserve slice numbers even if the disks have been reorganized, and 148 * will also catch preferred disk names if multiple paths exist. 149 */ 150 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); 151 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) 152 path = NULL; 153 154 matched = 0; 155 best = NULL; 156 for (ne = names; ne != NULL; ne = ne->ne_next) { 157 if (ne->ne_guid == guid) { 158 const char *src, *dst; 159 int count; 160 161 if (path == NULL) { 162 best = ne; 163 break; 164 } 165 166 src = ne->ne_name + strlen(ne->ne_name) - 1; 167 dst = path + strlen(path) - 1; 168 for (count = 0; src >= ne->ne_name && dst >= path; 169 src--, dst--, count++) 170 if (*src != *dst) 171 break; 172 173 /* 174 * At this point, 'count' is the number of characters 175 * matched from the end. 176 */ 177 if (count > matched || best == NULL) { 178 best = ne; 179 matched = count; 180 } 181 } 182 } 183 184 if (best == NULL) 185 return; 186 187 verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) == 0); 188 189 if ((devid = get_devid(best->ne_name)) == NULL) { 190 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); 191 } else { 192 verify(nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) == 0); 193 devid_str_free(devid); 194 } 195 } 196 197 /* 198 * Add the given configuration to the list of known devices. 199 */ 200 static void 201 add_config(pool_list_t *pl, const char *path, nvlist_t *config) 202 { 203 uint64_t pool_guid, vdev_guid, top_guid, txg; 204 pool_entry_t *pe; 205 vdev_entry_t *ve; 206 config_entry_t *ce; 207 name_entry_t *ne; 208 209 /* 210 * If we have a valid config but cannot read any of these fields, then 211 * it means we have a half-initialized label. In vdev_label_init() 212 * we write a label with txg == 0 so that we can identify the device 213 * in case the user refers to the same disk later on. If we fail to 214 * create the pool, we'll be left with a label in this state 215 * which should not be considered part of a valid pool. 216 */ 217 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 218 &pool_guid) != 0 || 219 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 220 &vdev_guid) != 0 || 221 nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, 222 &top_guid) != 0 || 223 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 224 &txg) != 0 || txg == 0) { 225 nvlist_free(config); 226 return; 227 } 228 229 /* 230 * First, see if we know about this pool. If not, then add it to the 231 * list of known pools. 232 */ 233 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { 234 if (pe->pe_guid == pool_guid) 235 break; 236 } 237 238 if (pe == NULL) { 239 pe = zfs_malloc(sizeof (pool_entry_t)); 240 pe->pe_guid = pool_guid; 241 pe->pe_next = pl->pools; 242 pl->pools = pe; 243 } 244 245 /* 246 * Second, see if we know about this toplevel vdev. Add it if its 247 * missing. 248 */ 249 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { 250 if (ve->ve_guid == top_guid) 251 break; 252 } 253 254 if (ve == NULL) { 255 ve = zfs_malloc(sizeof (vdev_entry_t)); 256 ve->ve_guid = top_guid; 257 ve->ve_next = pe->pe_vdevs; 258 pe->pe_vdevs = ve; 259 } 260 261 /* 262 * Third, see if we have a config with a matching transaction group. If 263 * so, then we do nothing. Otherwise, add it to the list of known 264 * configs. 265 */ 266 for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { 267 if (ce->ce_txg == txg) 268 break; 269 } 270 271 if (ce == NULL) { 272 ce = zfs_malloc(sizeof (config_entry_t)); 273 ce->ce_txg = txg; 274 ce->ce_config = config; 275 ce->ce_next = ve->ve_configs; 276 ve->ve_configs = ce; 277 } else { 278 nvlist_free(config); 279 } 280 281 /* 282 * At this point we've successfully added our config to the list of 283 * known configs. The last thing to do is add the vdev guid -> path 284 * mappings so that we can fix up the configuration as necessary before 285 * doing the import. 286 */ 287 ne = zfs_malloc(sizeof (name_entry_t)); 288 289 ne->ne_name = zfs_strdup(path); 290 ne->ne_guid = vdev_guid; 291 ne->ne_next = pl->names; 292 pl->names = ne; 293 } 294 295 /* 296 * Convert our list of pools into the definitive set of configurations. We 297 * start by picking the best config for each toplevel vdev. Once that's done, 298 * we assemble the toplevel vdevs into a full config for the pool. We make a 299 * pass to fix up any incorrect paths, and then add it to the main list to 300 * return to the user. 301 */ 302 static nvlist_t * 303 get_configs(pool_list_t *pl) 304 { 305 pool_entry_t *pe, *penext; 306 vdev_entry_t *ve, *venext; 307 config_entry_t *ce, *cenext; 308 nvlist_t *ret, *config, *tmp, *nvtop, *nvroot; 309 int config_seen; 310 uint64_t best_txg; 311 char *name; 312 zfs_cmd_t zc = { 0 }; 313 uint64_t guid; 314 char *packed; 315 size_t len; 316 int err; 317 318 verify(nvlist_alloc(&ret, 0, 0) == 0); 319 320 for (pe = pl->pools; pe != NULL; pe = penext) { 321 uint_t c; 322 uint_t children = 0; 323 uint64_t id; 324 nvlist_t **child = NULL; 325 326 penext = pe->pe_next; 327 328 verify(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 329 config_seen = FALSE; 330 331 /* 332 * Iterate over all toplevel vdevs. Grab the pool configuration 333 * from the first one we find, and then go through the rest and 334 * add them as necessary to the 'vdevs' member of the config. 335 */ 336 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { 337 venext = ve->ve_next; 338 339 /* 340 * Determine the best configuration for this vdev by 341 * selecting the config with the latest transaction 342 * group. 343 */ 344 best_txg = 0; 345 for (ce = ve->ve_configs; ce != NULL; 346 ce = ce->ce_next) { 347 348 if (ce->ce_txg > best_txg) 349 tmp = ce->ce_config; 350 } 351 352 if (!config_seen) { 353 /* 354 * Copy the relevant pieces of data to the pool 355 * configuration: 356 * 357 * pool guid 358 * name 359 * pool state 360 */ 361 uint64_t state; 362 363 verify(nvlist_lookup_uint64(tmp, 364 ZPOOL_CONFIG_POOL_GUID, &guid) == 0); 365 verify(nvlist_add_uint64(config, 366 ZPOOL_CONFIG_POOL_GUID, guid) == 0); 367 verify(nvlist_lookup_string(tmp, 368 ZPOOL_CONFIG_POOL_NAME, &name) == 0); 369 verify(nvlist_add_string(config, 370 ZPOOL_CONFIG_POOL_NAME, name) == 0); 371 verify(nvlist_lookup_uint64(tmp, 372 ZPOOL_CONFIG_POOL_STATE, &state) == 0); 373 verify(nvlist_add_uint64(config, 374 ZPOOL_CONFIG_POOL_STATE, state) == 0); 375 376 config_seen = TRUE; 377 } 378 379 /* 380 * Add this top-level vdev to the child array. 381 */ 382 verify(nvlist_lookup_nvlist(tmp, 383 ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); 384 verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, 385 &id) == 0); 386 if (id >= children) { 387 nvlist_t **newchild; 388 389 newchild = zfs_malloc((id + 1) * 390 sizeof (nvlist_t *)); 391 392 for (c = 0; c < children; c++) 393 newchild[c] = child[c]; 394 395 free(child); 396 child = newchild; 397 children = id + 1; 398 } 399 verify(nvlist_dup(nvtop, &child[id], 0) == 0); 400 401 /* 402 * Go through and free all config information. 403 */ 404 for (ce = ve->ve_configs; ce != NULL; ce = cenext) { 405 cenext = ce->ce_next; 406 407 nvlist_free(ce->ce_config); 408 free(ce); 409 } 410 411 /* 412 * Free this vdev entry, since it has now been merged 413 * into the main config. 414 */ 415 free(ve); 416 } 417 418 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 419 &guid) == 0); 420 421 /* 422 * Look for any missing top-level vdevs. If this is the case, 423 * create a faked up 'missing' vdev as a placeholder. We cannot 424 * simply compress the child array, because the kernel performs 425 * certain checks to make sure the vdev IDs match their location 426 * in the configuration. 427 */ 428 for (c = 0; c < children; c++) 429 if (child[c] == NULL) { 430 nvlist_t *missing; 431 verify(nvlist_alloc(&missing, NV_UNIQUE_NAME, 432 0) == 0); 433 verify(nvlist_add_string(missing, 434 ZPOOL_CONFIG_TYPE, VDEV_TYPE_MISSING) == 0); 435 verify(nvlist_add_uint64(missing, 436 ZPOOL_CONFIG_ID, c) == 0); 437 verify(nvlist_add_uint64(missing, 438 ZPOOL_CONFIG_GUID, 0ULL) == 0); 439 child[c] = missing; 440 } 441 442 /* 443 * Put all of this pool's top-level vdevs into a root vdev. 444 */ 445 verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0); 446 verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 447 VDEV_TYPE_ROOT) == 0); 448 verify(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 449 verify(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) == 0); 450 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 451 child, children) == 0); 452 453 for (c = 0; c < children; c++) 454 nvlist_free(child[c]); 455 free(child); 456 457 /* 458 * Go through and fix up any paths and/or devids based on our 459 * known list of vdev GUID -> path mappings. 460 */ 461 fix_paths(nvroot, pl->names); 462 463 /* 464 * Add the root vdev to this pool's configuration. 465 */ 466 verify(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 467 nvroot) == 0); 468 nvlist_free(nvroot); 469 470 /* 471 * Free this pool entry. 472 */ 473 free(pe); 474 475 /* 476 * Determine if this pool is currently active, in which case we 477 * can't actually import it. 478 */ 479 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 480 &name) == 0); 481 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 482 &guid) == 0); 483 484 (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name)); 485 if (zfs_ioctl(ZFS_IOC_POOL_GUID, &zc) == 0 && 486 guid == zc.zc_guid) { 487 nvlist_free(config); 488 continue; 489 } 490 491 /* 492 * Try to do the import in order to get vdev state. 493 */ 494 if ((err = nvlist_size(config, &len, NV_ENCODE_NATIVE)) != 0) 495 zfs_baderror(err); 496 497 packed = zfs_malloc(len); 498 499 if ((err = nvlist_pack(config, &packed, &len, 500 NV_ENCODE_NATIVE, 0)) != 0) 501 zfs_baderror(err); 502 503 nvlist_free(config); 504 config = NULL; 505 506 zc.zc_config_src_size = len; 507 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 508 509 zc.zc_config_dst_size = 2 * len; 510 zc.zc_config_dst = (uint64_t)(uintptr_t) 511 zfs_malloc(zc.zc_config_dst_size); 512 513 while ((err = zfs_ioctl(ZFS_IOC_POOL_TRYIMPORT, 514 &zc)) != 0 && errno == ENOMEM) { 515 free((void *)(uintptr_t)zc.zc_config_dst); 516 zc.zc_config_dst = (uint64_t)(uintptr_t) 517 zfs_malloc(zc.zc_config_dst_size); 518 } 519 520 free(packed); 521 522 if (err) 523 zfs_baderror(errno); 524 525 verify(nvlist_unpack((void *)(uintptr_t)zc.zc_config_dst, 526 zc.zc_config_dst_size, &config, 0) == 0); 527 528 set_pool_health(config); 529 530 /* 531 * Add this pool to the list of configs. 532 */ 533 verify(nvlist_add_nvlist(ret, name, config) == 0); 534 535 nvlist_free(config); 536 537 free((void *)(uintptr_t)zc.zc_config_dst); 538 } 539 540 return (ret); 541 } 542 543 /* 544 * Return the offset of the given label. 545 */ 546 static uint64_t 547 label_offset(size_t size, int l) 548 { 549 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? 550 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); 551 } 552 553 /* 554 * Given a file descriptor, read the label information and return an nvlist 555 * describing the configuration, if there is one. 556 */ 557 nvlist_t * 558 zpool_read_label(int fd) 559 { 560 struct stat64 statbuf; 561 int l; 562 vdev_label_t *label; 563 nvlist_t *config; 564 uint64_t state, txg; 565 566 if (fstat64(fd, &statbuf) == -1) 567 return (NULL); 568 569 label = zfs_malloc(sizeof (vdev_label_t)); 570 571 for (l = 0; l < VDEV_LABELS; l++) { 572 if (pread(fd, label, sizeof (vdev_label_t), 573 label_offset(statbuf.st_size, l)) != sizeof (vdev_label_t)) 574 continue; 575 576 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, 577 sizeof (label->vl_vdev_phys.vp_nvlist), &config, 0) != 0) 578 continue; 579 580 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 581 &state) != 0 || state > POOL_STATE_DESTROYED) { 582 nvlist_free(config); 583 continue; 584 } 585 586 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 587 &txg) != 0 || txg == 0) { 588 nvlist_free(config); 589 continue; 590 } 591 592 free(label); 593 return (config); 594 } 595 596 free(label); 597 return (NULL); 598 } 599 600 /* 601 * Given a list of directories to search, find all pools stored on disk. This 602 * includes partial pools which are not available to import. If no args are 603 * given (argc is 0), then the default directory (/dev/dsk) is searched. 604 */ 605 nvlist_t * 606 zpool_find_import(int argc, char **argv) 607 { 608 int i; 609 DIR *dirp; 610 struct dirent64 *dp; 611 char path[MAXPATHLEN]; 612 struct stat64 statbuf; 613 nvlist_t *ret, *config; 614 static char *default_dir = "/dev/dsk"; 615 int fd; 616 pool_list_t pools = { 0 }; 617 618 if (argc == 0) { 619 argc = 1; 620 argv = &default_dir; 621 } 622 623 /* 624 * Go through and read the label configuration information from every 625 * possible device, organizing the information according to pool GUID 626 * and toplevel GUID. 627 */ 628 for (i = 0; i < argc; i++) { 629 if (argv[i][0] != '/') { 630 zfs_error(dgettext(TEXT_DOMAIN, 631 "cannot open '%s': must be an absolute path"), 632 argv[i]); 633 return (NULL); 634 } 635 636 if ((dirp = opendir(argv[i])) == NULL) { 637 zfs_error(dgettext(TEXT_DOMAIN, 638 "cannot open '%s': %s"), argv[i], 639 strerror(errno)); 640 return (NULL); 641 } 642 643 /* 644 * This is not MT-safe, but we have no MT consumers of libzfs 645 */ 646 while ((dp = readdir64(dirp)) != NULL) { 647 648 (void) snprintf(path, sizeof (path), "%s/%s", 649 argv[i], dp->d_name); 650 651 if (stat64(path, &statbuf) != 0) 652 continue; 653 654 /* 655 * Ignore directories (which includes "." and ".."). 656 */ 657 if (S_ISDIR(statbuf.st_mode)) 658 continue; 659 660 if ((fd = open64(path, O_RDONLY)) < 0) 661 continue; 662 663 config = zpool_read_label(fd); 664 665 (void) close(fd); 666 667 if (config != NULL) 668 add_config(&pools, path, config); 669 } 670 } 671 672 ret = get_configs(&pools); 673 674 return (ret); 675 } 676 677 int 678 find_guid(nvlist_t *nv, uint64_t guid) 679 { 680 uint64_t tmp; 681 nvlist_t **child; 682 uint_t c, children; 683 684 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); 685 if (tmp == guid) 686 return (TRUE); 687 688 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 689 &child, &children) == 0) { 690 for (c = 0; c < children; c++) 691 if (find_guid(child[c], guid)) 692 return (TRUE); 693 } 694 695 return (FALSE); 696 } 697 698 /* 699 * Determines if the pool is in use. If so, it returns TRUE and the state of 700 * the pool as well as the name of the pool. Both strings are allocated and 701 * must be freed by the caller. 702 */ 703 int 704 zpool_in_use(int fd, pool_state_t *state, char **namestr) 705 { 706 nvlist_t *config; 707 char *name; 708 int ret; 709 zfs_cmd_t zc = { 0 }; 710 uint64_t guid, vdev_guid; 711 zpool_handle_t *zhp; 712 nvlist_t *pool_config; 713 uint64_t stateval; 714 715 if ((config = zpool_read_label(fd)) == NULL) 716 return (FALSE); 717 718 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 719 &name) == 0); 720 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 721 &stateval) == 0); 722 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 723 &guid) == 0); 724 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 725 &vdev_guid) == 0); 726 727 switch (stateval) { 728 case POOL_STATE_EXPORTED: 729 ret = TRUE; 730 break; 731 732 case POOL_STATE_ACTIVE: 733 /* 734 * For an active pool, we have to determine if it's really part 735 * of an active pool (in which case the pool will exist and the 736 * guid will be the same), or whether it's part of an active 737 * pool that was disconnected without being explicitly exported. 738 * 739 * We use the direct ioctl() first to avoid triggering an error 740 * message if the pool cannot be opened. 741 */ 742 (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name)); 743 if (zfs_ioctl(ZFS_IOC_POOL_GUID, &zc) == 0 && 744 guid == zc.zc_guid) { 745 /* 746 * Because the device may have been removed while 747 * offlined, we only report it as active if the vdev is 748 * still present in the config. Otherwise, pretend like 749 * it's not in use. 750 */ 751 if ((zhp = zpool_open_canfail(name)) != NULL && 752 (pool_config = zpool_get_config(zhp, NULL)) 753 != NULL) { 754 nvlist_t *nvroot; 755 756 verify(nvlist_lookup_nvlist(pool_config, 757 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 758 ret = find_guid(nvroot, vdev_guid); 759 } else { 760 ret = FALSE; 761 } 762 } else { 763 stateval = POOL_STATE_POTENTIALLY_ACTIVE; 764 ret = TRUE; 765 } 766 break; 767 768 default: 769 ret = FALSE; 770 } 771 772 773 if (ret) { 774 *namestr = zfs_strdup(name); 775 *state = (pool_state_t)stateval; 776 } 777 778 nvlist_free(config); 779 return (ret); 780 } 781